Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.HdfsConfiguration


    amState.getPublishedSliderConfigurations().put(
        PublishedArtifacts.HDFS_SITE_CONFIG,
        new PublishedConfiguration(
            "HDFS site settings",
            ConfigHelper.loadFromResource("hdfs-site.xml"),
            new HdfsConfiguration(true)) );


    try {
      RegistryView externalView = instanceData.externalView;
      RegisteredEndpoint webUI =
View Full Code Here


  public static int getDataNodeStorageSize(){
    return getStorageDirs().size();
  }

  public static List<URI> getStorageDirs(){
    Configuration conf = new HdfsConfiguration();
    Collection<String> dirNames = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
    return Util.stringCollectionAsURIs(dirNames);
  }
View Full Code Here

    if(startOpt == StartupOption.IMPORT) {
      // In case of IMPORT this will get rid of default directories
      // but will retain directories specified in hdfs-site.xml
      // When importing image from a checkpoint, the name-node can
      // start with empty set of storage directories.
      Configuration cE = new HdfsConfiguration(false);
      cE.addResource("core-default.xml");
      cE.addResource("core-site.xml");
      cE.addResource("hdfs-default.xml");
      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
      dirNames.removeAll(dirNames2);
      if(dirNames.isEmpty())
        LOG.warn("!!! WARNING !!!" +
          "\n\tThe NameNode currently runs without persistent storage." +
          "\n\tAny changes to the file system meta-data may be lost." +
View Full Code Here

  }

  void refreshNodes() throws IOException {
    checkOperation(OperationCategory.UNCHECKED);
    checkSuperuserPrivilege();
    getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration());
  }
View Full Code Here

  }
 
  @Test
  public void testRetryCacheConfig() {
    // By default retry configuration should be enabled
    Configuration conf = new HdfsConfiguration();
    Assert.assertNotNull(FSNamesystem.initRetryCache(conf));
   
    // If retry cache is disabled, it should not be created
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, false);
    Assert.assertNull(FSNamesystem.initRetryCache(conf));
  }
View Full Code Here

    zkServer = InMemoryZKServer.builder().build();
    zkServer.startAndWait();

    // Start YARN mini cluster
    LOG.info("Starting Mini DFS on path {}", folder);
    Configuration fsConf = new HdfsConfiguration(new Configuration());
    fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, folder.getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build();

    config = new YarnConfiguration(dfsCluster.getFileSystem().getConf());

    if (YarnUtils.isHadoop20()) {
View Full Code Here

  /** Reread include/exclude files. */
  private void refreshHostsReader(Configuration conf) throws IOException {
    // Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames.
    // Update the file names and refresh internal includes and excludes list.
    if (conf == null) {
      conf = new HdfsConfiguration();
    }
    this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
  }
View Full Code Here

    if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
      boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                                               DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
      InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
          DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
      Configuration sslConf = new HdfsConfiguration(false);
      sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
          "ssl-server.xml"));
      this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
      if(LOG.isDebugEnabled()) {
        LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
      }
View Full Code Here

   * subsequently.
   */
  public static DataNode instantiateDataNode(String args [], Configuration conf,
      SecureResources resources) throws IOException {
    if (conf == null)
      conf = new HdfsConfiguration();
   
    if (args != null) {
      // parse generic hadoop options
      GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
      args = hParser.getRemainingArgs();
View Full Code Here

   * @throws IOException on errors
   */
  static void makeBlockPoolDataDir(Collection<File> dataDirs,
      Configuration conf) throws IOException {
    if (conf == null)
      conf = new HdfsConfiguration();

    LocalFileSystem localFS = FileSystem.getLocal(conf);
    FsPermission permission = new FsPermission(conf.get(
        DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
        DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.HdfsConfiguration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.