Package org.apache.hadoop.hdfs.server.datanode.FSDataset

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset.ActiveFile


    lock.writeLock().lock();
    try {
      //
      // Is it already in the create process?
      //
      ActiveFile activeFile = volumeMap.getOngoingCreates(namespaceId, b);
      if (activeFile != null) {
        f = activeFile.file;
        threads = activeFile.threads;

        if (!isRecovery) {
          throw new BlockAlreadyExistsException("Block " + b +
                                  " has already been started (though not completed), and thus cannot be created.");
        } else {
          for (Thread thread:threads) {
            thread.interrupt();
          }
        }
        volumeMap.removeOngoingCreates(namespaceId, b);
      }
      FSVolume v = null;
      if (!isRecovery) {
        v = volumes.getNextVolume(blockSize);
        // create temporary file to hold block in the designated volume
        f = createTmpFile(namespaceId, v, b, replicationRequest);
      } else if (f != null) {
        DataNode.LOG.info("Reopen already-open Block for append " + b);
        // create or reuse temporary file to hold block in the designated volume
        v = volumeMap.get(namespaceId, b).getVolume();
        volumeMap.add(namespaceId, b, new DatanodeBlockInfo(v, f,
            DatanodeBlockInfo.UNFINALIZED));
      } else {
        // reopening block for appending to it.
        DataNode.LOG.info("Reopen Block for append " + b);
        v = volumeMap.get(namespaceId, b).getVolume();
        f = createTmpFile(namespaceId, v, b, replicationRequest);
        File blkfile = getBlockFile(namespaceId, b);
        File oldmeta = getMetaFile(namespaceId, b);
        File newmeta = getMetaFile(f, b);

        // rename meta file to tmp directory
        DataNode.LOG.debug("Renaming " + oldmeta + " to " + newmeta);
        if (!oldmeta.renameTo(newmeta)) {
          throw new IOException("Block " + b + " reopen failed. " +
                                " Unable to move meta file  " + oldmeta +
                                " to tmp dir " + newmeta);
        }

        // rename block file to tmp directory
        DataNode.LOG.debug("Renaming " + blkfile + " to " + f);
        if (!blkfile.renameTo(f)) {
          if (!f.delete()) {
            throw new IOException("Block " + b + " reopen failed. " +
                                  " Unable to remove file " + f);
          }
          if (!blkfile.renameTo(f)) {
            throw new IOException("Block " + b + " reopen failed. " +
                                  " Unable to move block file " + blkfile +
                                  " to tmp dir " + f);
          }
        }
      }
      if (f == null) {
        DataNode.LOG.warn("Block " + b + " reopen failed " +
                          " Unable to locate tmp file.");
        throw new IOException("Block " + b + " reopen failed " +
                              " Unable to locate tmp file.");
      }
      // If this is a replication request, then this is not a permanent
      // block yet, it could get removed if the datanode restarts. If this
      // is a write or append request, then it is a valid block.
      if (replicationRequest) {
        volumeMap.add(namespaceId, b, new DatanodeBlockInfo(v));
      } else {
        volumeMap.add(namespaceId, b, new DatanodeBlockInfo(v, f, -1));
      }
      volumeMap.addOngoingCreates(namespaceId, b, new ActiveFile(f, threads));
     
    } finally {
      lock.writeLock().unlock();
    }
View Full Code Here


  public void finalizeBlockInternal(int namespaceId, Block b, boolean reFinalizeOk)
    throws IOException {
    lock.writeLock().lock();
    DatanodeBlockInfo replicaInfo = volumeMap.get(namespaceId, b);
    try {
      ActiveFile activeFile = volumeMap.getOngoingCreates(namespaceId, b);
      if (activeFile == null) {
        if (reFinalizeOk) {
          return;
        } else {
          throw new IOException("Block " + b + " is already finalized.");
        }
      }
      File f = activeFile.file;
      if (f == null || !f.exists()) {
        throw new IOException("No temporary file " + f + " for block " + b);
      }
      FSVolume v = replicaInfo.getVolume();
      if (v == null) {
        throw new IOException("No volume for temporary file " + f +
                              " for block " + b);
      }
         
      File dest = null;
      dest = v.addBlock(namespaceId, b, f);
      volumeMap.add(namespaceId, b,
          new DatanodeBlockInfo(v, dest, activeFile.getBytesOnDisk()));
      volumeMap.removeOngoingCreates(namespaceId, b);
    } finally {
      lock.writeLock().unlock();
    }
  }
View Full Code Here

    FSVolume v = blockInfo.getVolume();
    if (v == null) {
      DataNode.LOG.warn("No volume for block " + b);
      return false; // block is not finalized
    }
    ActiveFile activeFile = volumeMap.getOngoingCreates(namespaceId, b);
    if (activeFile != null) {
      if (validate) {
        File f = activeFile.file;
        if (f == null || !f.exists()) {
          // we should never get into this position.
View Full Code Here

   */
  public void unfinalizeBlock(int namespaceId, Block b) throws IOException {
    lock.writeLock().lock();
    try {
      // remove the block from in-memory data structure
      ActiveFile activefile = volumeMap.removeOngoingCreates(namespaceId, b);
      if (activefile == null) {
        return;
      }
      volumeMap.remove(namespaceId, b);
     
View Full Code Here

      if (stored == null) {
        return null;
      }

      ActiveFile activeFile = volumeMap.getOngoingCreates(namespaceId, stored);
      boolean isRecovery = (activeFile != null) && activeFile.wasRecoveredOnStartup;


      BlockRecoveryInfo info = new BlockRecoveryInfo(stored, isRecovery);
      if (DataNode.LOG.isDebugEnabled()) {
View Full Code Here

    List<Thread> threads = null;
    // We do not want to create a BBW, hence treat this as a replication
    // request.
    File dstBlockFile = createTmpFile(dstNamespaceId, dstVol, dstBlock, true);
    volumeMap.addOngoingCreates(dstNamespaceId, dstBlock,
        new ActiveFile(dstBlockFile, threads));
    return dstBlockFile;
  }
View Full Code Here

   *
   * @param block
   * @throws CloneNotSupportedException
   */
  void copyOngoingCreates(Block block) throws CloneNotSupportedException {
    ActiveFile af = ongoingCreates.get(block);
    if (af == null) {
      return;
    }

    ongoingCreates.put(block, af.getClone());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.FSDataset.ActiveFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.