Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration


    // will hang and fail.

    LOG.debug("Launching subprocedure for snapshot " + snapshot.getName() + " from table "
        + snapshot.getTable());
    ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher();
    Configuration conf = rss.getConfiguration();
    long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY,
        SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
    long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY,
        SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT);

    switch (snapshot.getType()) {
    case FLUSH:
      SnapshotSubprocedurePool taskManager =
View Full Code Here


  ServerManager(final Server master, final MasterServices services,
      final boolean connect) throws ZooKeeperConnectionException {
    this.master = master;
    this.services = services;
    Configuration c = master.getConfiguration();
    maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
    warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
    this.deadservers = new DeadServer();
    this.connection = connect ? HConnectionManager.getConnection(c) : null;
  }
View Full Code Here

    private Path inputArchive;
    private Path inputRoot;

    @Override
    public void setup(Context context) {
      Configuration conf = context.getConfiguration();
      verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true);

      filesGroup = conf.get(CONF_FILES_GROUP);
      filesUser = conf.get(CONF_FILES_USER);
      filesMode = (short)conf.getInt(CONF_FILES_MODE, 0);
      outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT));
      inputRoot = new Path(conf.get(CONF_INPUT_ROOT));

      inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
      outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);

      try {
View Full Code Here

  throws IOException {
    job.setInputFormatClass(inputFormatClass);
    if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
    if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
    job.setMapperClass(mapper);
    Configuration conf = job.getConfiguration();
    HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
    conf.set(TableInputFormat.INPUT_TABLE, table);
    conf.set(TableInputFormat.SCAN, convertScanToString(scan));
    if (addDependencyJars) {
      addDependencyJars(job);
    }
    initCredentials(job);
  }
View Full Code Here

        // init credentials for remote cluster
        String quorumAddress = job.getConfiguration().get(
            TableOutputFormat.QUORUM_ADDRESS);
        if (quorumAddress != null) {
          String[] parts = ZKUtil.transformClusterKey(quorumAddress);
          Configuration peerConf = HBaseConfiguration.create(job
              .getConfiguration());
          peerConf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]);
          peerConf.set("hbase.zookeeper.client.port", parts[1]);
          peerConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]);
          User.getCurrent().obtainAuthTokenForJob(peerConf, job);
        }
       
        User.getCurrent().obtainAuthTokenForJob(job.getConfiguration(), job);
      } catch (InterruptedException ie) {
View Full Code Here

  public static void initTableReducerJob(String table,
    Class<? extends TableReducer> reducer, Job job,
    Class partitioner, String quorumAddress, String serverClass,
    String serverImpl, boolean addDependencyJars) throws IOException {

    Configuration conf = job.getConfiguration();   
    HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
    job.setOutputFormatClass(TableOutputFormat.class);
    if (reducer != null) job.setReducerClass(reducer);
    conf.set(TableOutputFormat.OUTPUT_TABLE, table);
    // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
    if (quorumAddress != null) {
      // Calling this will validate the format
      ZKUtil.transformClusterKey(quorumAddress);
      conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
    }
    if (serverClass != null && serverImpl != null) {
      conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
      conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
    }
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(Writable.class);
    if (partitioner == HRegionPartitioner.class) {
      job.setPartitionerClass(HRegionPartitioner.class);
View Full Code Here

      System.err.println("Missing snapshot name!");
      printUsageAndExit();
      return 1;
    }

    Configuration conf = getConf();
    fs = FileSystem.get(conf);
    rootDir = FSUtils.getRootDir(conf);

    // Load snapshot information
    if (!loadSnapshotInfo(snapshotName)) {
View Full Code Here

   * Collect the hfiles and logs statistics of the snapshot and
   * dump the file list if requested and the collected information.
   */
  private void printFiles(final boolean showFiles) throws IOException {
    final String table = snapshotDesc.getTable();
    final Configuration conf = getConf();

    if (showFiles) {
      System.out.println("Snapshot Files");
      System.out.println("----------------------------------------");
    }
View Full Code Here

  private int tickTime = 0;

  private Configuration configuration;

  public MiniZooKeeperCluster() {
    this(new Configuration());
  }
View Full Code Here

    this.serverManager = serverManager;
    this.catalogTracker = catalogTracker;
    this.executorService = service;
    this.regionsToReopen = Collections.synchronizedMap
                           (new HashMap<String, HRegionInfo> ());
    Configuration conf = master.getConfiguration();
    this.timeoutMonitor = new TimeoutMonitor(
      conf.getInt("hbase.master.assignment.timeoutmonitor.period", 10000),
      master, serverManager,
      conf.getInt("hbase.master.assignment.timeoutmonitor.timeout", 1800000));
    this.timerUpdater = new TimerUpdater(conf.getInt(
        "hbase.master.assignment.timerupdater.period", 10000), master);
    Threads.setDaemonThreadRunning(timerUpdater.getThread(),
        master.getServerName() + ".timerUpdater");
    this.zkTable = new ZKTable(this.master.getZooKeeper());
    this.maximumAssignmentAttempts =
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.