Package org.apache.hadoop.hbase.mapreduce

Examples of org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.doBulkLoad()


      HTable table = new HTable(conf, tableName);
      try {
        TEST_UTIL.waitTableAvailable(tableName, 30000);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here


      HTable table = new HTable(conf, tableName);
      try {
        TEST_UTIL.waitTableAvailable(tableName, 30000);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

    job.waitForCompletion(true);
     
    log("[TS - M-R HFile generated..Now dumping to HBase] :: " + new Date() + "\n");
   
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(new Path(outFile), hDataTable);
     
    log("[TS - FINISH] :: " + new Date() + "\n");
    if(isDebug) bw.close();
   
  }
View Full Code Here

    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
      false);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    bulkload.doBulkLoad(hfilePath, table);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    result = scanAfterBulkLoad(scanner, result, "version2");
    Put put0 = new Put(Bytes.toBytes("row1"));
    put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
View Full Code Here

          Put put1 = new Put(Bytes.toBytes("row5"));
          put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
              Bytes.toBytes("version0")));
          table.put(put1);
          table.flushCommits();
          bulkload.doBulkLoad(hfilePath, table);
          latch.countDown();
        } catch (TableNotFoundException e) {
        } catch (IOException e) {
        }
      }
View Full Code Here

    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadNativeHFile/",
      "/temp/testBulkLoadNativeHFile/col/file", true);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    bulkload.doBulkLoad(hfilePath, table);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    // We had 'version0', 'version1' for 'row1,col:q' in the table.
    // Bulk load added 'version2'  scanner should be able to see 'version2'
    result = scanAfterBulkLoad(scanner, result, "version2");
View Full Code Here

      HTable table = new HTable(conf, tableName);
      try {
        Admin admin = TEST_UTIL.getHBaseAdmin();
        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

            return 1;
        }

        LOG.info("Loading HFiles from {}", outputPath);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(getConf());
        loader.doBulkLoad(outputPath, htable);
        htable.close();

        LOG.info("Incremental load complete");

        LOG.info("Removing output directory {}", outputPath);
View Full Code Here

    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
      false);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    bulkload.doBulkLoad(hfilePath, table);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    result = scanAfterBulkLoad(scanner, result, "version2");
    Put put0 = new Put(Bytes.toBytes("row1"));
    put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
View Full Code Here

          Put put1 = new Put(Bytes.toBytes("row5"));
          put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
              Bytes.toBytes("version0")));
          table.put(put1);
          table.flushCommits();
          bulkload.doBulkLoad(hfilePath, table);
          latch.countDown();
        } catch (TableNotFoundException e) {
        } catch (IOException e) {
        }
      }
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.