Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem


      LOG.info("Failing over to NN 1");
      cluster.transitionToStandby(0);
      cluster.transitionToActive(1);

      assertTrue(fs.exists(TEST_PATH));
      FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
      BlockManagerTestUtil.updateState(ns1.getBlockManager());
      assertEquals(0, ns1.getPendingReplicationBlocks());
      assertEquals(0, ns1.getCorruptReplicaBlocks());
      assertEquals(0, ns1.getMissingBlocksCount());

      // write another block and a half
      AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);

      stm.close();
View Full Code Here


      .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
    restartActive();
    nn0.getRpcServer().transitionToActive(
        new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

    FSNamesystem namesystem = nn0.getNamesystem();
    String status = namesystem.getSafemode();
    assertTrue("Bad safemode status: '" + status + "'", status
        .startsWith("Safe mode is ON."));
    NameNodeAdapter.enterSafeMode(nn0, false);
    assertTrue("Failed to enter into safemode in active", namesystem
        .isInSafeMode());
    NameNodeAdapter.enterSafeMode(nn0, false);
    assertTrue("Failed to enter into safemode in active", namesystem
        .isInSafeMode());
  }
View Full Code Here

        1L);
    banner("Deleting the original blocks");
    fs.delete(new Path("/test"), true);
    banner("Restarting standby");
    restartStandby();
    FSNamesystem namesystem = nn1.getNamesystem();
    String status = namesystem.getSafemode();
    assertTrue("Bad safemode status: '" + status + "'", status
        .startsWith("Safe mode is ON."));
    NameNodeAdapter.enterSafeMode(nn1, false);
    assertTrue("Failed to enter into safemode in standby", namesystem
        .isInSafeMode());
    NameNodeAdapter.enterSafeMode(nn1, false);
    assertTrue("Failed to enter into safemode in standby", namesystem
        .isInSafeMode());
  }
View Full Code Here

    // (only ~15MB)
    URI sharedUri = cluster.getSharedEditsDir(0, 1);
    File sharedDir = new File(sharedUri.getPath(), "current");
    File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
        "testCheckpointCancellation-tmp");
    FSNamesystem fsn = cluster.getNamesystem(0);
    FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
        fsn.getLastInodeId() + 1);
    String fname = NNStorage.getInProgressEditsFileName(3);
    new File(tmpDir, fname).renameTo(new File(sharedDir, fname));

    // Checkpoint as fast as we can, in a tight loop.
    cluster.getConfiguration(1).setInt(
View Full Code Here

  public void testRetryCacheOnStandbyNN() throws Exception {
    // 1. run operations
    DFSTestUtil.runOperations(cluster, dfs, conf, BlockSize, 0);
   
    // check retry cache in NN1
    FSNamesystem fsn0 = cluster.getNamesystem(0);
    LightWeightCache<CacheEntry, CacheEntry> cacheSet =
        (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
    assertEquals(20, cacheSet.size());
   
    Map<CacheEntry, CacheEntry> oldEntries =
        new HashMap<CacheEntry, CacheEntry>();
    Iterator<CacheEntry> iter = cacheSet.iterator();
    while (iter.hasNext()) {
      CacheEntry entry = iter.next();
      oldEntries.put(entry, entry);
    }
   
    // 2. Failover the current standby to active.
    cluster.getNameNode(0).getRpcServer().rollEditLog();
    cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
   
    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
   
    // 3. check the retry cache on the new active NN
    FSNamesystem fsn1 = cluster.getNamesystem(1);
    cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
        .getRetryCache().getCacheSet();
    assertEquals(20, cacheSet.size());
    iter = cacheSet.iterator();
    while (iter.hasNext()) {
      CacheEntry entry = iter.next();
View Full Code Here

    try {
      cluster = new MiniDFSCluster(conf, 1, true, null);
      cluster.waitActive();

      FSNamesystem fsn = cluster.getNameNode().getNamesystem();

      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
      ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=NameNodeInfo");
      // get attribute "HostName"
      String hostname = (String) mbs.getAttribute(mxbeanName, "HostName");
      Assert.assertEquals(fsn.getHostName(), hostname);
      // get attribute "Version"
      String version = (String) mbs.getAttribute(mxbeanName, "Version");
      Assert.assertEquals(fsn.getVersion(), version);
      Assert.assertTrue(version.equals(VersionInfo.getVersion()
              + ", r" + VersionInfo.getRevision()));
      // get attribute "Used"
      Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
      Assert.assertEquals(fsn.getUsed(), used.longValue());
      // get attribute "Total"
      Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
      Assert.assertEquals(fsn.getTotal(), total.longValue());
      // get attribute "safemode"
      String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
      Assert.assertEquals(fsn.getSafemode(), safemode);
      // get attribute nondfs
      Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
      Assert.assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
      // get attribute percentremaining
      Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
          "PercentRemaining"));
      Assert.assertEquals(fsn.getPercentRemaining(), percentremaining
          .floatValue());
      // get attribute Totalblocks
      Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
      Assert.assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
      // get attribute alivenodeinfo
      String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
          "LiveNodes"));
      Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
      // get attribute deadnodeinfo
      String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
          "DeadNodes"));
      Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
      // get attribute NameDirStatuses
      String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
          "NameDirStatuses"));
      Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
      Map<String, Map<String, String>> statusMap =
        (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
      Collection<File> nameDirs = cluster.getNameDirs();
      for (File nameDir : nameDirs) {
        System.out.println("Checking for the presence of " + nameDir +
View Full Code Here

   
    try {
      cluster = new MiniDFSCluster(conf, 1, true, null);
      cluster.waitActive();
     
      FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
     
      // Ensure the data reported for each data node is right
      ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
      ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
      namesystem.DFSNodesStatus(live, dead);
     
      assertTrue(live.size() == 1);
     
      long used, remaining, configCapacity, nonDFSUsed;
      float percentUsed, percentRemaining;
     
      for (final DatanodeDescriptor datanode : live) {
        used = datanode.getDfsUsed();
        remaining = datanode.getRemaining();
        nonDFSUsed = datanode.getNonDfsUsed();
        configCapacity = datanode.getCapacity();
        percentUsed = datanode.getDfsUsedPercent();
        percentRemaining = datanode.getRemainingPercent();
       
        LOG.info("Datanode configCapacity " + configCapacity
            + " used " + used + " non DFS used " + nonDFSUsed
            + " remaining " + remaining + " perentUsed " + percentUsed
            + " percentRemaining " + percentRemaining);
       
        assertTrue(configCapacity == (used + remaining + nonDFSUsed));
        assertTrue(percentUsed == ((100.0f * (float)used)/(float)configCapacity));
        assertTrue(percentRemaining == ((100.0f * (float)remaining)/(float)configCapacity));
      }  
     
      DF df = new DF(new File(cluster.getDataDirectory()), conf);
    
      //
      // Currently two data directories are created by the data node
      // in the MiniDFSCluster. This results in each data directory having
      // capacity equals to the disk capacity of the data directory.
      // Hence the capacity reported by the data node is twice the disk space
      // the disk capacity
      //
      // So multiply the disk capacity and reserved space by two
      // for accommodating it
      //
      int numOfDataDirs = 2;
     
      long diskCapacity = numOfDataDirs * df.getCapacity();
      reserved *= numOfDataDirs;
     
      configCapacity = namesystem.getCapacityTotal();
      used = namesystem.getCapacityUsed();
      nonDFSUsed = namesystem.getCapacityUsedNonDFS();
      remaining = namesystem.getCapacityRemaining();
      percentUsed = namesystem.getCapacityUsedPercent();
      percentRemaining = namesystem.getCapacityRemainingPercent();
     
      LOG.info("Data node directory " + cluster.getDataDirectory());
          
      LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "
          + configCapacity + " reserved " + reserved + " used " + used
View Full Code Here

    Path file2 = new Path("decommission1.dat");
    FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
    Thread.sleep(5000);

    FSNamesystem fsn = cluster.getNamesystem();
    for (int iteration = 0; iteration < numDatanodes; iteration++) {
      String downnode = decommissionNode(fsn, conf, client, localFileSys,
          iteration);
      decommissionedNodes.add(downnode);
      Thread.sleep(5000);
      ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
          .getDecommissioningNodes();
      if (iteration == 0) {
        assertEquals(decommissioningNodes.size(), 1);
        DatanodeDescriptor decommNode = decommissioningNodes.get(0);
        checkDecommissionStatus(decommNode, 4, 0, 2);
      } else {
        assertEquals(decommissioningNodes.size(), 2);
        DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
        DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
        checkDecommissionStatus(decommNode1, 4, 4, 2);
        checkDecommissionStatus(decommNode2, 4, 4, 2);
      }
    }
    // Call refreshNodes on FSNamesystem with empty exclude file.
    // This will remove the datanodes from decommissioning list and
    // make them available again.
    writeConfigFile(localFileSys, excludeFile, null);
    fsn.refreshNodes(conf);
    st1.close();
    cleanupFile(fileSys, file1);
    cleanupFile(fileSys, file2);
    cleanupFile(localFileSys, dir);
  }
View Full Code Here

   * Tests metasave
   */
  @Test
  public void testMetaSave() throws IOException, InterruptedException {

    final FSNamesystem namesystem = cluster.getNamesystem();

    for (int i = 0; i < 2; i++) {
      Path file = new Path("/filestatus" + i);
      createFile(fileSys, file);
    }

    cluster.stopDataNode(1);
    // wait for namenode to discover that a datanode is dead
    Thread.sleep(15000);
    namesystem.setReplication("/filestatus0", (short) 4);

    namesystem.metaSave("metasave.out.txt");

    // Verification
    String logFile = System.getProperty("hadoop.log.dir") + "/"
        + "metasave.out.txt";
    FileInputStream fstream = new FileInputStream(logFile);
View Full Code Here

  /**
   * Set the softLimit and hardLimit of client lease periods
   */
  void setLeasePeriod(long soft, long hard) {
    final FSNamesystem namesystem = getNamesystem();
    namesystem.leaseManager.setLeasePeriod(soft, hard);
    namesystem.lmthread.interrupt();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.