Examples of PipelineAck


Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
            long seqno = ack.getSeqno();
            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
              continue;
            } else if (seqno == -2) {
              // no nothing
            } else {
              Packet one = null;
              synchronized (ackQueue) {
                one = ackQueue.getFirst();
              }
              if (one.seqno != seqno) {
                throw new IOException("Responseprocessor: Expecting seqno " +
                                      " for block " + block + " " +
                                      one.seqno + " but received " + seqno);
              }
              lastPacketInBlock = one.lastPacketInBlock;
            }

            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=&& clientRunning; i--) {
              short reply = ack.getReply(i);
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                                      " for block " + block +
                                      " from datanode " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

           * as an UNKNOWN value.
           */
          long expected = PipelineAck.UNKOWN_SEQNO;
          long seqno = PipelineAck.UNKOWN_SEQNO;;

          PipelineAck ack = new PipelineAck();
          boolean localMirrorError = mirrorError;
          try {
            Packet pkt = null;
            synchronized (this) {
              // wait for a packet to arrive
              while (running && datanode.shouldRun && ackQueue.size() == 0) {
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                            " seqno = " + seqno +
                            " for block " + block +
                            " waiting for local datanode to finish write.");
                  }
                  wait();
                }
                if (!running || !datanode.shouldRun) {
                  break;
                }
                pkt = ackQueue.removeFirst();
                expected = pkt.seqno;
                notifyAll();
              }
              // receive an ack if DN is not the last one in the pipeline
              if (numTargets > 0 && !localMirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                      " for block " + block + " got " + ack);
                }
                seqno = ack.getSeqno();
                // verify seqno
                if (seqno != expected) {
                  throw new IOException("PacketResponder " + numTargets +
                      " for block " + block +
                      " expected seqno:" + expected +
                      " received:" + seqno);
                }
              }
              lastPacketInBlock = pkt.lastPacketInBlock;
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.incrBlocksWritten();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message
            short[] replies = null;
            if (mirrorError) { // no ack is read
              replies = new short[2];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              replies[1] = DataTransferProtocol.OP_STATUS_ERROR;
            } else {
              short ackLen = numTargets == 0 ? 0 : ack.getNumOfReplies();
              replies = new short[1+ackLen];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
           
            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=0 && clientRunning; i--) {
                short reply = ack.getReply(i)
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {   
                errorIndex = i; // first bad datanode  
                throw new IOException("Bad response " + reply +  
                      " for block " + block +  
                      " from datanode " +    
                      targets[i].getName());   
              }  
            }

            long seqno = ack.getSeqno();
            assert seqno != PipelineAck.UNKOWN_SEQNO :
              "Ack for unkown seqno should be a failed ack: " + ack;
            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
              continue;
            }

            Packet one = null;
            synchronized (ackQueue) {
              one = ackQueue.getFirst();
            }
           
            if (one.seqno != seqno) {
              throw new IOException("Responseprocessor: Expecting seqno " +
                                    " for block " + block + " " +
                                    one.seqno + " but received " + seqno);
            }
            lastPacketInBlock = one.lastPacketInBlock;

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

           * as an UNKNOWN value.
           */
          long expected = PipelineAck.UNKOWN_SEQNO;
          long seqno = PipelineAck.UNKOWN_SEQNO;;

          PipelineAck ack = new PipelineAck();
          boolean localMirrorError = mirrorError;
          try {
            Packet pkt = null;
            synchronized (this) {
              // wait for a packet to arrive
              while (running && datanode.shouldRun && ackQueue.size() == 0) {
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                            " seqno = " + seqno +
                            " for block " + block +
                            " waiting for local datanode to finish write.");
                  }
                  wait();
                }
                if (!running || !datanode.shouldRun) {
                  break;
                }
                pkt = ackQueue.removeFirst();
                expected = pkt.seqno;
                notifyAll();
              }
              // receive an ack if DN is not the last one in the pipeline
              if (numTargets > 0 && !localMirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                      " for block " + block + " got " + ack);
                }
                seqno = ack.getSeqno();
                // verify seqno
                if (seqno != expected) {
                  throw new IOException("PacketResponder " + numTargets +
                      " for block " + block +
                      " expected seqno:" + expected +
                      " received:" + seqno);
                }
              }
              lastPacketInBlock = pkt.lastPacketInBlock;
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.incrBlocksWritten();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received " + block + " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message
            short[] replies = null;
            if (mirrorError) { // no ack is read
              replies = new short[2];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              replies[1] = DataTransferProtocol.OP_STATUS_ERROR;
            } else {
              short ackLen = numTargets == 0 ? 0 : ack.getNumOfReplies();
              replies = new short[1+ackLen];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
           
            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=0 && clientRunning; i--) {
                short reply = ack.getReply(i)
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {   
                errorIndex = i; // first bad datanode  
                throw new IOException("Bad response " + reply +  
                      " for block " + block +  
                      " from datanode " +    
                      targets[i].getName());   
              }  
            }

            long seqno = ack.getSeqno();
            assert seqno != PipelineAck.UNKOWN_SEQNO :
              "Ack for unkown seqno should be a failed ack: " + ack;
            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
              continue;
            }

            Packet one = null;
            synchronized (ackQueue) {
              one = ackQueue.getFirst();
            }
           
            if (one.seqno != seqno) {
              throw new IOException("Responseprocessor: Expecting seqno " +
                                    " for block " + block + " " +
                                    one.seqno + " but received " + seqno);
            }
            lastPacketInBlock = one.lastPacketInBlock;

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();

        while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (DFSClient.LOG.isDebugEnabled()) {
              DFSClient.LOG.debug("DFSClient " + ack);
            }
           
            long seqno = ack.getSeqno();
            // processes response status from datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=&& dfsClient.clientRunning; i--) {
              final DataTransferProtocol.Status reply = ack.getReply(i);
              if (reply != SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                    " for block " + block +
                    " from datanode " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

    sendOut.writeInt(0);           // zero checksum
       
    //ok finally write a block with 0 len
    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
    new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
    sendRecvData(description, false);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

   
    // bad data chunk length
    sendOut.writeInt(-1-random.nextInt(oneMil));
    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
    new PipelineAck(100, new Status[]{ERROR}).write(recvOut);
    sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId,
                 true);

    // test for writing a valid zero size block
    sendBuf.reset();
    recvBuf.reset();
    DataTransferProtocol.Sender.opWriteBlock(sendOut,
        ++newBlockId, 0L, 0,
        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
        new DatanodeInfo[1], BlockAccessToken.DUMMY_TOKEN);
    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
    sendOut.writeInt(512);         // checksum size
    sendOut.writeInt(8);           // size of packet
    sendOut.writeLong(0);          // OffsetInBlock
    sendOut.writeLong(100);        // sequencenumber
    sendOut.writeBoolean(true);    // lastPacketInBlock

    sendOut.writeInt(0);           // chunk length
    sendOut.writeInt(0);           // zero checksum
    sendOut.flush();
    //ok finally write a block with 0 len
    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
    new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
    sendRecvData("Writing a zero len block blockid " + newBlockId, false);
   
    /* Test OP_READ_BLOCK */

    // bad block id
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

        boolean isInterrupted = false;
        try {
            Packet pkt = null;
            long expected = -2;
            PipelineAck ack = new PipelineAck();
            long seqno = PipelineAck.UNKOWN_SEQNO;
            try {
              if (numTargets != 0 && !mirrorError) {// not the last DN & no mirror error
                // read an ack from downstream datanode
                ack.readFields(mirrorIn);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets + " got " + ack);
                }
                seqno = ack.getSeqno();
              }
              if (seqno != PipelineAck.UNKOWN_SEQNO || numTargets == 0) {
                synchronized (this) {
                  while (running && datanode.shouldRun && ackQueue.size() == 0) {
                    if (LOG.isDebugEnabled()) {
                      LOG.debug("PacketResponder " + numTargets +
                                " seqno = " + seqno +
                                " for block " + block +
                                " waiting for local datanode to finish write.");
                    }
                    wait();
                  }
                  if (!running || !datanode.shouldRun) {
                    break;
                  }
                  pkt = ackQueue.getFirst();
                  expected = pkt.seqno;
                  if (numTargets > 0 && seqno != expected) {
                    throw new IOException("PacketResponder " + numTargets +
                                          " for block " + block +
                                          " expected seqno:" + expected +
                                          " received:" + seqno);
                  }
                  lastPacketInBlock = pkt.lastPacketInBlock;
                }
              }
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                      " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              running = false;
              continue;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(replicaInfo.getNumBytes());
              datanode.data.finalizeBlock(block);
              datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message
            Status[] replies = null;
            if (mirrorError) { // ack read error
              replies = new Status[2];
              replies[0] = SUCCESS;
              replies[1] = ERROR;
            } else {
              short ackLen = numTargets == 0 ? 0 : ack.getNumOfReplies();
              replies = new Status[1+ackLen];
              replies[0] = SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
           
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
            }
            if (pkt != null) {
              // remove the packet from the ack queue
              removeAckHead();
              // update bytes acked
              if (replyAck.isSuccess() &&
                  pkt.lastByteInBlock>replicaInfo.getBytesAcked()) {
                replicaInfo.setBytesAcked(pkt.lastByteInBlock);
              }
            }
        } catch (IOException e) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
            long seqno = ack.getSeqno();
            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
              continue;
            } else if (seqno == -2) {
              // no nothing
            } else {
              Packet one = null;
              synchronized (ackQueue) {
                one = ackQueue.getFirst();
              }
              if (one.seqno != seqno) {
                throw new IOException("Responseprocessor: Expecting seqno " +
                                      " for block " + block + " " +
                                      one.seqno + " but received " + seqno);
              }
              lastPacketInBlock = one.lastPacketInBlock;
            }

            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=&& clientRunning; i--) {
              short reply = ack.getReply(i);
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                                      " for block " + block +
                                      " from datanode " +
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.