Examples of corruptReplicas()


Examples of org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.corruptReplicas()

 
  private void validateNumberReplicas(int expectedReplicas) throws IOException {
    NumberReplicas numberReplicas = blockManager.countNodes(block);
    assertThat(numberReplicas.liveReplicas(), is(expectedReplicas));
    assertThat(numberReplicas.excessReplicas(), is(0));
    assertThat(numberReplicas.corruptReplicas(), is(0));
    assertThat(numberReplicas.decommissionedReplicas(), is(0));
    assertThat(numberReplicas.replicasOnStaleNodes(), is(0));
   
    BlockManagerTestUtil.updateState(blockManager);
    assertThat(blockManager.getUnderReplicatedBlocksCount(), is(0L));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.corruptReplicas()

    // There should now be only 1 *location* for the block as the READ_ONLY_SHARED is corrupt
    waitForLocations(1);
   
    // However, the corrupt READ_ONLY_SHARED replica should *not* affect the overall corrupt replicas count
    NumberReplicas numberReplicas = blockManager.countNodes(block);
    assertThat(numberReplicas.corruptReplicas(), is(0));
  }

}
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

      nn.getNamesystem().restartReplicationWork();
     
      DFSTestUtil.waitReplication(fs, path, (short)3);
      NumberReplicas num = nn.getNamesystem().countNodes(
          firstBlock.getBlock());
      assertEquals(0, num.corruptReplicas());

    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

        // l: == live:, d: == decommissioned c: == corrupt e: == excess
        out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
                  " (replicas:" +
                  " l: " + numReplicas.liveReplicas() +
                  " d: " + numReplicas.decommissionedReplicas() +
                  " c: " + numReplicas.corruptReplicas() +
                  " e: " + numReplicas.excessReplicas() + ") ");

        Collection<DatanodeDescriptor> corruptNodes =
                                      corruptReplicas.getNodes(block);
       
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

      processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint);
    }
    // If the file replication has reached desired value
    // we can remove any corrupt replicas the block may have
    int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
    int numCorruptNodes = num.corruptReplicas();
    if (numCorruptNodes != corruptReplicasCount) {
      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
          storedBlock + "blockMap has " + numCorruptNodes +
          " but corrupt replicas map has " + corruptReplicasCount);
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

        // l: == live:, d: == decommissioned c: == corrupt e: == excess
        out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
                  " (replicas:" +
                  " l: " + numReplicas.liveReplicas() +
                  " d: " + numReplicas.decommissionedReplicas() +
                  " c: " + numReplicas.corruptReplicas() +
                  " e: " + numReplicas.excessReplicas() + ") ");

        Collection<DatanodeDescriptor> corruptNodes =
                                      corruptReplicas.getNodes(block);
       
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

      processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint);
    }
    // If the file replication has reached desired value
    // we can remove any corrupt replicas the block may have
    int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
    int numCorruptNodes = num.corruptReplicas();
    if (numCorruptNodes != corruptReplicasCount) {
      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
          storedBlock + "blockMap has " + numCorruptNodes +
          " but corrupt replicas map has " + corruptReplicasCount);
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

      nn.getNamesystem().restartReplicationWork();

      DFSTestUtil.waitReplication(fs, path, (short)3);
      NumberReplicas num = nn.getNamesystem().countNodes(
          firstBlock.getBlock());
      assertEquals(0, num.corruptReplicas());

    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.corruptReplicas()

      nn.getNamesystem().restartReplicationWork();

      DFSTestUtil.waitReplication(fs, path, (short)3);
      NumberReplicas num = nn.getNamesystem().countNodes(
          firstBlock.getBlock());
      assertEquals(0, num.corruptReplicas());

    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.