Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration


    archiveURIs = StringUtils.stringToURI(archives);
  }
 
  protected void setJobConf() throws IOException {
    msg("hadoopAliasConf_ = " + hadoopAliasConf_);
    config_ = new Configuration();
    if (!cluster_.equals("default")) {
      config_.addFinalResource(new Path(getHadoopAliasConfFile()));
    } else {
      // use only defaults: hadoop-default.xml and hadoop-site.xml
    }
View Full Code Here


  private static final File FILE = new File(DIR, "test.seq");

  @BeforeClass
  public static void testWriteSequenceFile() throws IOException {
    FILE.delete();
    Configuration c = new Configuration();
    URI uri = FILE.toURI();
    SequenceFile.Writer writer
      = new SequenceFile.Writer(FileSystem.get(uri, c), c,
                                new Path(uri.toString()),
                                LongWritable.class, Text.class);
View Full Code Here

  }

  @Override
  public void open(String filePath, CompressionCodec codec,
      CompressionType cType, FlumeFormatter fmt) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
    (dstPath)) {
      fsOut = hdfs.append(dstPath);
    } else {
      fsOut = hdfs.create(dstPath);
    }
View Full Code Here

        }
      }
    }

    ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher();
    Configuration conf = regionEnv.getConfiguration();
    for (byte[] tableName: tableSet) {
      try {
        ListMultimap<String,TablePermission> perms =
          AccessControlLists.getTablePermissions(conf, tableName);
        byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
View Full Code Here

    TEST_UTIL.startMiniDFSCluster(1);
    Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade");
    // Untar our test dir.
    File untar = untar(new File(testdir.toString()));
    // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
    Configuration conf = TEST_UTIL.getConfiguration();
    FsShell shell = new FsShell(conf);
    FileSystem fs = FileSystem.get(conf);
    // find where hbase will root itself, so we can copy filesystem there
    Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
    if (!fs.isDirectory(hbaseRootDir.getParent())) {
      // mkdir at first
      fs.mkdirs(hbaseRootDir.getParent());
    }
    if(org.apache.hadoop.util.VersionInfo.getVersion().startsWith("2.")) {
      LOG.info("Hadoop version is 2.x, pre-migrating snapshot dir");
      FileSystem localFS = FileSystem.getLocal(conf);
      if(!localFS.rename(new Path(untar.toString(), HConstants.OLD_SNAPSHOT_DIR_NAME),
          new Path(untar.toString(), HConstants.SNAPSHOT_DIR_NAME))) {
        throw new IllegalStateException("Failed to move snapshot dir to 2.x expectation");
      }
    }
    doFsCommand(shell,
      new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
    doFsCommand(shell, new String [] {"-lsr", "/"});
    // See whats in minihdfs.
    Configuration toolConf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
    ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
    assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION));
    doFsCommand(shell, new String [] {"-lsr", "/"});
    TEST_UTIL.startMiniHBaseCluster(1, 1);
View Full Code Here

   * Sets the HBase table.
   *
   * @param htable  The {@link HTable} to scan.
   */
  public void setHTable(HTable htable) {
    Configuration conf = htable.getConfiguration();
    logScannerActivity = conf.getBoolean(
      ScannerCallable.LOG_SCANNER_ACTIVITY, false);
    logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
    this.htable = htable;
  }
View Full Code Here

    return new TableSnapshotRegionRecordReader();
  }

  @Override
  public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
    Configuration conf = job.getConfiguration();
    String snapshotName = getSnapshotName(conf);

    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
    FileSystem fs = rootDir.getFileSystem(conf);

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);

    Set<String> snapshotRegionNames
      = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
    if (snapshotRegionNames == null) {
      throw new IllegalArgumentException("Snapshot seems empty");
    }

    // load table descriptor
    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs,
        snapshotDir);

    Scan scan = TableMapReduceUtil.convertStringToScan(conf
      .get(TableInputFormat.SCAN));
    Path tableDir = new Path(conf.get(TABLE_DIR_KEY));

    List<InputSplit> splits = new ArrayList<InputSplit>();
    for (String regionName : snapshotRegionNames) {
      // load region descriptor
      Path regionDir = new Path(snapshotDir, regionName);
View Full Code Here

   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
   * After the job is finished, restoreDir can be deleted.
   * @throws IOException if an error occurs
   */
  public static void setInput(Job job, String snapshotName, Path restoreDir) throws IOException {
    Configuration conf = job.getConfiguration();
    conf.set(SNAPSHOT_NAME_KEY, snapshotName);

    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
    FileSystem fs = rootDir.getFileSystem(conf);

    restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

    // TODO: restore from record readers to parallelize.
    RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

    conf.set(TABLE_DIR_KEY, restoreDir.toString());
  }
View Full Code Here

    @Override
    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
        InterruptedException {

      Configuration conf = context.getConfiguration();
      this.split = (TableSnapshotRegionSplit) split;
      String regionName = this.split.regionName;
      String snapshotName = getSnapshotName(conf);
      Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
      FileSystem fs = rootDir.getFileSystem(conf);

      Path tmpRootDir = new Path(conf.get(TABLE_DIR_KEY)); // This is the user specified root
      // directory where snapshot was restored

      Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);

      //load table descriptor
      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);

      //load region descriptor
      Path regionDir = new Path(snapshotDir, regionName);
      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);

      // create scan
      String scanStr = conf.get(TableInputFormat.SCAN);
      if (scanStr == null) {
        throw new IllegalArgumentException("A Scan is not configured for this job");
      }
      scan = TableMapReduceUtil.convertStringToScan(scanStr);
      scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); // region is immutable, this should be fine,
View Full Code Here

    @Override
    public void map(ImmutableBytesWritable row, final Result value,
                    Context context)
        throws IOException {
      if (replicatedScanner == null) {
        Configuration conf = context.getConfiguration();
        final Scan scan = new Scan();
        scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
        long startTime = conf.getLong(NAME + ".startTime", 0);
        long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
        String families = conf.get(NAME + ".families", null);
        if(families != null) {
          String[] fams = families.split(",");
          for(String fam : fams) {
            scan.addFamily(Bytes.toBytes(fam));
          }
        }
        scan.setTimeRange(startTime, endTime);
        HConnectionManager.execute(new HConnectable<Void>(conf) {
          @Override
          public Void connect(HConnection conn) throws IOException {
            String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
            Configuration peerConf = HBaseConfiguration.create(conf);
            ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);

            HTable replicatedTable = new HTable(peerConf, conf.get(NAME + ".tableName"));
            scan.setStartRow(value.getRow());
            replicatedScanner = replicatedTable.getScanner(scan);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.