Package org.apache.hadoop.hbase.mapreduce

Examples of org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles$LoadQueueItem


      HTable table = new HTable(conf, tableName);
      try {
        Admin admin = TEST_UTIL.getHBaseAdmin();
        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here


            LOG.error("Import job failed, check JobTracker for details");
            return 1;
        }

        LOG.info("Loading HFiles from {}", outputPath);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(getConf());
        loader.doBulkLoad(outputPath, htable);
        htable.close();

        LOG.info("Incremental load complete");

        LOG.info("Removing output directory {}", outputPath);
View Full Code Here

    // use bulkload
    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
      false);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    bulkload.doBulkLoad(hfilePath, table);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    result = scanAfterBulkLoad(scanner, result, "version2");
    Put put0 = new Put(Bytes.toBytes("row1"));
    put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
View Full Code Here

    // use bulkload
    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadWithParallelScan/",
        "/temp/testBulkLoadWithParallelScan/col/file", false);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    ResultScanner scanner = table.getScanner(scan);
    // Create a scanner and then do bulk load
    final CountDownLatch latch = new CountDownLatch(1);
    new Thread() {
      public void run() {
        try {
          Put put1 = new Put(Bytes.toBytes("row5"));
          put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
              Bytes.toBytes("version0")));
          table.put(put1);
          table.flushCommits();
          bulkload.doBulkLoad(hfilePath, table);
          latch.countDown();
        } catch (TableNotFoundException e) {
        } catch (IOException e) {
        }
      }
View Full Code Here

    // use bulkload
    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadNativeHFile/",
      "/temp/testBulkLoadNativeHFile/col/file", true);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    bulkload.doBulkLoad(hfilePath, table);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    // We had 'version0', 'version1' for 'row1,col:q' in the table.
    // Bulk load added 'version2'  scanner should be able to see 'version2'
    result = scanAfterBulkLoad(scanner, result, "version2");
View Full Code Here

      HTable table = new HTable(conf, tableName);
      try {
        Admin admin = TEST_UTIL.getHBaseAdmin();
        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

      Path familyDir = new Path(dir, Bytes.toString(A));

      createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);

      //Bulk load
      new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));

      verifyMethodResult(SimpleRegionObserver.class,
          new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
          tableName,
          new Boolean[] {true, true}
View Full Code Here

        int status = ToolRunner.run(conf, new BulkImportTool(), args);
        if (status != 0) {
            System.exit(status);
        }
        SchemaMetrics.configureGlobally(conf);
        status = ToolRunner.run(new LoadIncrementalHFiles(conf),
                new String[]{conf.get(HFILE_PATH), conf.get(LilyJythonMapper.TABLE_NAME)});
        FileSystem.get(conf).delete(new Path(new URI(conf.get(HFILE_PATH))), true);
        System.exit(status);
    }
View Full Code Here

          try {
            baseOutputCommitter.commitJob(jobContext);
            Configuration conf = jobContext.getConfiguration();
            try {
              //import hfiles
              new LoadIncrementalHFiles(conf)
                .doBulkLoad(HFileOutputFormat.getOutputPath(jobContext),
                  new HTable(conf,
                    conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY)));
            } catch (Exception e) {
              throw new IOException("BulkLoad failed.", e);
View Full Code Here

      HTable table = new HTable(conf, tableName);
      try {
        HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles$LoadQueueItem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.