Examples of FSVolume


Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

  }
 
  private void setup(FSDataset dataSet) throws IOException {
    // setup replicas map
    ReplicasMap replicasMap = dataSet.volumeMap;
    FSVolume vol = dataSet.volumes.getNextVolume(0);
    ReplicaInfo replicaInfo = new FinalizedReplica(
        blocks[FINALIZED], vol, vol.getDir());
    replicasMap.add(replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(new ReplicaInPipeline(
        blocks[TEMPORARY].getBlockId(),
        blocks[TEMPORARY].getGenerationStamp(), vol,
        vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
   
    replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol,
        vol.createRbwFile(blocks[RBW]).getParentFile(), null);
    replicasMap.add(replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol,
        vol.createRbwFile(blocks[RWR]).getParentFile()));
    replicasMap.add(new ReplicaUnderRecovery(
        new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));   
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

        new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));   
  }
 
  private void testAppend(FSDataset dataSet) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp()+1;
    FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume();
    long available = v.getCapacity()-v.getDfsUsed();
    long expectedLen = blocks[FINALIZED].getNumBytes();
    try {
      v.decDfsUsed(-available);
      blocks[FINALIZED].setNumBytes(expectedLen+100);
      dataSet.append(blocks[FINALIZED], newGS, expectedLen);
      Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
    } catch (DiskOutOfSpaceException e) {
      Assert.assertTrue(e.getMessage().startsWith(
          "Insufficient space for appending to "));
    }
    v.decDfsUsed(available);
    blocks[FINALIZED].setNumBytes(expectedLen);

    newGS = blocks[RBW].getGenerationStamp()+1;
    dataSet.append(blocks[FINALIZED], newGS,
        blocks[FINALIZED].getNumBytes())// successful
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

        new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
        new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
    };
   
    ReplicasMap replicasMap = dataSet.volumeMap;
    FSVolume vol = dataSet.volumes.getNextVolume(0);
    ReplicaInfo replicaInfo = new FinalizedReplica(
        blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
    replicasMap.add(bpid, replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(bpid, new ReplicaInPipeline(
        blocks[TEMPORARY].getBlockId(),
        blocks[TEMPORARY].getGenerationStamp(), vol,
        vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
   
    replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
        vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
    replicasMap.add(bpid, replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
        blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
            blocks[RWR].getLocalBlock()).getParentFile()));
    replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
        .getLocalBlock(), vol, vol.getDir()), 2007));   
   
    return blocks;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

    return blocks;
  }
 
  private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp()+1;
    FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
        .getVolume();
    long available = v.getCapacity()-v.getDfsUsed();
    long expectedLen = blocks[FINALIZED].getNumBytes();
    try {
      v.decDfsUsed(bpid, -available);
      blocks[FINALIZED].setNumBytes(expectedLen+100);
      dataSet.append(blocks[FINALIZED], newGS, expectedLen);
      Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
    } catch (DiskOutOfSpaceException e) {
      Assert.assertTrue(e.getMessage().startsWith(
          "Insufficient space for appending to "));
    }
    v.decDfsUsed(bpid, available);
    blocks[FINALIZED].setNumBytes(expectedLen);

    newGS = blocks[RBW].getGenerationStamp()+1;
    dataSet.append(blocks[FINALIZED], newGS,
        blocks[FINALIZED].getNumBytes())// successful
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

      out = fs.create(src);
      out.write(writeBuf);
      out.hflush();
      DataNode dn = cluster.getDataNodes().get(0);
      for (FSVolumeInterface v : dn.data.getVolumes()) {
        FSVolume volume = (FSVolume)v;
        File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
        File rbwDir = new File(currentDir, "rbw");
        for (File file : rbwDir.listFiles()) {
          if (isCorrupt && Block.isBlockFilename(file)) {
            new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt
          }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

        new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
        new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
    };
   
    ReplicasMap replicasMap = dataSet.volumeMap;
    FSVolume vol = dataSet.volumes.getNextVolume(0);
    ReplicaInfo replicaInfo = new FinalizedReplica(
        blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
    replicasMap.add(bpid, replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(bpid, new ReplicaInPipeline(
        blocks[TEMPORARY].getBlockId(),
        blocks[TEMPORARY].getGenerationStamp(), vol,
        vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
   
    replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
        vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
    replicasMap.add(bpid, replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
        blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
            blocks[RWR].getLocalBlock()).getParentFile()));
    replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
        .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));   
   
    return blocks;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

    return blocks;
  }
 
  private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp()+1;
    final FSVolume v = (FSVolume)dataSet.volumeMap.get(
        bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
    long available = v.getCapacity()-v.getDfsUsed();
    long expectedLen = blocks[FINALIZED].getNumBytes();
    try {
      v.decDfsUsed(bpid, -available);
      blocks[FINALIZED].setNumBytes(expectedLen+100);
      dataSet.append(blocks[FINALIZED], newGS, expectedLen);
      Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
    } catch (DiskOutOfSpaceException e) {
      Assert.assertTrue(e.getMessage().startsWith(
          "Insufficient space for appending to "));
    }
    v.decDfsUsed(bpid, available);
    blocks[FINALIZED].setNumBytes(expectedLen);

    newGS = blocks[RBW].getGenerationStamp()+1;
    dataSet.append(blocks[FINALIZED], newGS,
        blocks[FINALIZED].getNumBytes())// successful
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

   
    int startVolume = curVolume;
    long maxAvailable = 0;
   
    while (true) {
      FSVolume volume = volumes.get(curVolume);
      curVolume = (curVolume + 1) % volumes.size();
      long availableVolumeSize = volume.getAvailable();
      if (availableVolumeSize > blockSize) { return volume; }
     
      if (availableVolumeSize > maxAvailable) {
        maxAvailable = availableVolumeSize;
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

  }
 
  private void setup(FSDataset dataSet) throws IOException {
    // setup replicas map
    ReplicasMap replicasMap = dataSet.volumeMap;
    FSVolume vol = dataSet.volumes.getNextVolume(0);
    ReplicaInfo replicaInfo = new FinalizedReplica(
        blocks[FINALIZED], vol, vol.getDir());
    replicasMap.add(replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(new ReplicaInPipeline(
        blocks[TEMPORARY].getBlockId(),
        blocks[TEMPORARY].getGenerationStamp(), vol,
        vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
   
    replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol,
        vol.createRbwFile(blocks[RBW]).getParentFile(), null);
    replicasMap.add(replicaInfo);
    replicaInfo.getBlockFile().createNewFile();
    replicaInfo.getMetaFile().createNewFile();
   
    replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol,
        vol.createRbwFile(blocks[RWR]).getParentFile()));
    replicasMap.add(new ReplicaUnderRecovery(
        new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));   
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume

        new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));   
  }
 
  private void testAppend(FSDataset dataSet) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp()+1;
    FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume();
    long available = v.getCapacity()-v.getDfsUsed();
    long expectedLen = blocks[FINALIZED].getNumBytes();
    try {
      v.decDfsUsed(-available);
      blocks[FINALIZED].setNumBytes(expectedLen+100);
      dataSet.append(blocks[FINALIZED], newGS, expectedLen);
      Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
    } catch (DiskOutOfSpaceException e) {
      Assert.assertTrue(e.getMessage().startsWith(
          "Insufficient space for appending to "));
    }
    v.decDfsUsed(available);
    blocks[FINALIZED].setNumBytes(expectedLen);

    newGS = blocks[RBW].getGenerationStamp()+1;
    dataSet.append(blocks[FINALIZED], newGS,
        blocks[FINALIZED].getNumBytes())// successful
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.