Examples of PipelineAck


Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
           
            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=0 && clientRunning; i--) {
                short reply = ack.getReply(i)
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {   
                errorIndex = i; // first bad datanode  
                throw new IOException("Bad response " + reply +  
                      " for block " + block +  
                      " from datanode " +    
                      targets[i].getName());   
              }  
            }

            long seqno = ack.getSeqno();
            assert seqno != PipelineAck.UNKOWN_SEQNO :
              "Ack for unkown seqno should be a failed ack: " + ack;
            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
              continue;
            }

            Packet one = null;
            synchronized (ackQueue) {
              one = ackQueue.getFirst();
            }
           
            if (one.seqno != seqno) {
              throw new IOException("Responseprocessor: Expecting seqno " +
                                    " for block " + block + " " +
                                    one.seqno + " but received " + seqno);
            }
            lastPacketInBlock = one.lastPacketInBlock;

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

           * as an UNKNOWN value.
           */
          long expected = PipelineAck.UNKOWN_SEQNO;
          long seqno = PipelineAck.UNKOWN_SEQNO;;

          PipelineAck ack = new PipelineAck();
          boolean localMirrorError = mirrorError;
          try {
            Packet pkt = null;
            synchronized (this) {
              // wait for a packet to arrive
              while (running && datanode.shouldRun && ackQueue.size() == 0) {
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                            " seqno = " + seqno +
                            " for block " + block +
                            " waiting for local datanode to finish write.");
                  }
                  wait();
                }
                if (!running || !datanode.shouldRun) {
                  break;
                }
                pkt = ackQueue.removeFirst();
                expected = pkt.seqno;
                notifyAll();
              }
              // receive an ack if DN is not the last one in the pipeline
              if (numTargets > 0 && !localMirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                      " for block " + block + " got " + ack);
                }
                seqno = ack.getSeqno();
                // verify seqno
                if (seqno != expected) {
                  throw new IOException("PacketResponder " + numTargets +
                      " for block " + block +
                      " expected seqno:" + expected +
                      " received:" + seqno);
                }
              }
              lastPacketInBlock = pkt.lastPacketInBlock;
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.incrBlocksWritten();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message
            short[] replies = null;
            if (mirrorError) { // no ack is read
              replies = new short[2];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              replies[1] = DataTransferProtocol.OP_STATUS_ERROR;
            } else {
              short ackLen = numTargets == 0 ? 0 : ack.getNumOfReplies();
              replies = new short[1+ackLen];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
           
            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=0 && clientRunning; i--) {
                short reply = ack.getReply(i)
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {   
                errorIndex = i; // first bad datanode  
                throw new IOException("Bad response " + reply +  
                      " for block " + block +  
                      " from datanode " +    
                      targets[i].getName());   
              }  
            }

            long seqno = ack.getSeqno();
            assert seqno != PipelineAck.UNKOWN_SEQNO :
              "Ack for unkown seqno should be a failed ack: " + ack;
            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
              continue;
            }

            Packet one = null;
            synchronized (ackQueue) {
              one = ackQueue.getFirst();
            }
           
            if (one.seqno != seqno) {
              throw new IOException("Responseprocessor: Expecting seqno " +
                                    " for block " + block + " " +
                                    one.seqno + " but received " + seqno);
            }
            lastPacketInBlock = one.lastPacketInBlock;

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

        long totalAckTimeNanos = 0;
        boolean isInterrupted = false;
        try {
          Packet pkt = null;
          long expected = -2;
          PipelineAck ack = new PipelineAck();
          long seqno = PipelineAck.UNKOWN_SEQNO;
          long ackRecvNanoTime = 0;
          try {
            if (type != PacketResponderType.LAST_IN_PIPELINE && !mirrorError) {
              // read an ack from downstream datanode
              ack.readFields(downstreamIn);
              ackRecvNanoTime = System.nanoTime();
              if (LOG.isDebugEnabled()) {
                LOG.debug(myString + " got " + ack);
              }
              // Process an OOB ACK.
              Status oobStatus = ack.getOOBStatus();
              if (oobStatus != null) {
                LOG.info("Relaying an out of band ack of type " + oobStatus);
                sendAckUpstream(ack, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
                    Status.SUCCESS);
                continue;
              }
              seqno = ack.getSeqno();
            }
            if (seqno != PipelineAck.UNKOWN_SEQNO
                || type == PacketResponderType.LAST_IN_PIPELINE) {
              pkt = waitForAckHead(seqno);
              if (!isRunning()) {
                break;
              }
              expected = pkt.seqno;
              if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE
                  && seqno != expected) {
                throw new IOException(myString + "seqno: expected=" + expected
                    + ", received=" + seqno);
              }
              if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
                // The total ack time includes the ack times of downstream
                // nodes.
                // The value is 0 if this responder doesn't have a downstream
                // DN in the pipeline.
                totalAckTimeNanos = ackRecvNanoTime - pkt.ackEnqueueNanoTime;
                // Report the elapsed time from ack send to ack receive minus
                // the downstream ack time.
                long ackTimeNanos = totalAckTimeNanos
                    - ack.getDownstreamAckTimeNanos();
                if (ackTimeNanos < 0) {
                  if (LOG.isDebugEnabled()) {
                    LOG.debug("Calculated invalid ack time: " + ackTimeNanos
                        + "ns.");
                  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

          throw new IOException("Shutting down writer and responder "
              + "since the down streams reported the data sent by this "
              + "thread is corrupt");
        }
      }
      PipelineAck replyAck = new PipelineAck(seqno, replies,
          totalAckTimeNanos);
      if (replyAck.isSuccess()
          && offsetInBlock > replicaInfo.getBytesAcked()) {
        replicaInfo.setBytesAcked(offsetInBlock);
      }
      // send my ack back to upstream datanode
      long begin = Time.monotonicNow();
      replyAck.write(upstreamOut);
      upstreamOut.flush();
      long duration = Time.monotonicNow() - begin;
      if (duration > datanodeSlowLogThresholdMs) {
        LOG.warn("Slow PacketResponder send ack to upstream took " + duration
            + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

        long totalAckTimeNanos = 0;
        boolean isInterrupted = false;
        try {
            Packet pkt = null;
            long expected = -2;
            PipelineAck ack = new PipelineAck();
            long seqno = PipelineAck.UNKOWN_SEQNO;
            long ackRecvNanoTime = 0;
            try {
              if (type != PacketResponderType.LAST_IN_PIPELINE
                  && !mirrorError) {
                // read an ack from downstream datanode
                ack.readFields(downstreamIn);
                ackRecvNanoTime = System.nanoTime();
                if (LOG.isDebugEnabled()) {
                  LOG.debug(myString + " got " + ack);
                }
                seqno = ack.getSeqno();
              }
              if (seqno != PipelineAck.UNKOWN_SEQNO
                  || type == PacketResponderType.LAST_IN_PIPELINE) {
                synchronized (this) {
                  while (running && datanode.shouldRun && ackQueue.size() == 0) {
                    if (LOG.isDebugEnabled()) {
                      LOG.debug(myString + ": seqno=" + seqno +
                                " waiting for local datanode to finish write.");
                    }
                    wait();
                  }
                  if (!running || !datanode.shouldRun) {
                    break;
                  }
                  pkt = ackQueue.getFirst();
                  expected = pkt.seqno;
                  if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE
                      && seqno != expected) {
                    throw new IOException(myString + "seqno: expected="
                        + expected + ", received=" + seqno);
                  }
                  if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
                    // The total ack time includes the ack times of downstream nodes.
                    // The value is 0 if this responder doesn't have a downstream
                    // DN in the pipeline.
                    totalAckTimeNanos = ackRecvNanoTime - pkt.ackEnqueueNanoTime;
                    // Report the elapsed time from ack send to ack receive minus
                    // the downstream ack time.
                    long ackTimeNanos = totalAckTimeNanos - ack.getDownstreamAckTimeNanos();
                    if (ackTimeNanos < 0) {
                      if (LOG.isDebugEnabled()) {
                        LOG.debug("Calculated invalid ack time: " + ackTimeNanos + "ns.");
                      }
                    } else {
                      datanode.metrics.addPacketAckRoundTripTimeNanos(ackTimeNanos);
                    }
                  }
                  lastPacketInBlock = pkt.lastPacketInBlock;
                }
              }
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info(myString, ioe);
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info(myString + ": Thread is interrupted.");
              running = false;
              continue;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock) {
              BlockReceiver.this.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(replicaInfo.getNumBytes());
              datanode.data.finalizeBlock(block);
              datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() && isClient) {
                long offset = 0;
                DatanodeRegistration dnR =
                  datanode.getDNRegistrationForBP(block.getBlockPoolId());
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      inAddr, myAddr, block.getNumBytes(),
                      "HDFS_WRITE", clientname, offset,
                      dnR.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block + " of size "
                    + block.getNumBytes() + " from " + inAddr);
              }
            }

            // construct my ack message
            Status[] replies = null;
            if (mirrorError) { // ack read error
              replies = new Status[2];
              replies[0] = Status.SUCCESS;
              replies[1] = Status.ERROR;
            } else {
              short ackLen = type == PacketResponderType.LAST_IN_PIPELINE? 0
                  : ack.getNumOfReplies();
              replies = new Status[1+ackLen];
              replies[0] = Status.SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies, totalAckTimeNanos);
           
            if (replyAck.isSuccess() &&
                 pkt.offsetInBlock > replicaInfo.getBytesAcked())
                replicaInfo.setBytesAcked(pkt.offsetInBlock);

            // send my ack back to upstream datanode
            replyAck.write(upstreamOut);
            upstreamOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug(myString + ", replyAck=" + replyAck);
            }
            if (pkt != null) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

      @Override
      public void run() {

        setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();

        while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (DFSClient.LOG.isDebugEnabled()) {
              DFSClient.LOG.debug("DFSClient " + ack);
            }
           
            long seqno = ack.getSeqno();
            // processes response status from datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=&& dfsClient.clientRunning; i--) {
              final Status reply = ack.getReply(i);
              if (reply != SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                    " for block " + block +
                    " from datanode " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

      @Override
      public void run() {

        setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();

        while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            long begin = Time.monotonicNow();
            ack.readFields(blockReplyStream);
            long duration = Time.monotonicNow() - begin;
            if (duration > dfsclientSlowLogThresholdMs
                && ack.getSeqno() != Packet.HEART_BEAT_SEQNO) {
              DFSClient.LOG
                  .warn("Slow ReadProcessor read fields took " + duration
                      + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: "
                      + ack + ", targets: " + Arrays.asList(targets));
            } else if (DFSClient.LOG.isDebugEnabled()) {
              DFSClient.LOG.debug("DFSClient " + ack);
            }

            long seqno = ack.getSeqno();
            // processes response status from datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=&& dfsClient.clientRunning; i--) {
              final Status reply = ack.getReply(i);
              // Restart will not be treated differently unless it is
              // the local node or the only one in the pipeline.
              if (PipelineAck.isRestartOOBStatus(reply) &&
                  shouldWaitForRestart(i)) {
                restartDeadline = dfsClient.getConf().datanodeRestartTimeout +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

    hdr.write(sendOut);
    sendOut.writeInt(0);           // zero checksum

    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
    sendRecvData(description, false);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck

      -1 - random.nextInt(oneMil), // bad datalen
      false);
    hdr.write(sendOut);

    sendResponse(Status.SUCCESS, "", null, recvOut);
    new PipelineAck(100, new Status[]{Status.ERROR}).write(recvOut);
    sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId,
                 true);

    // test for writing a valid zero size block
    sendBuf.reset();
    recvBuf.reset();
    sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
        new DatanodeInfo[1], null,
        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
        DEFAULT_CHECKSUM);

    hdr = new PacketHeader(
      8,     // size of packet
      0,     // OffsetInBlock
      100,   // sequencenumber
      true,  // lastPacketInBlock
      0,     // chunk length
      false);   
    hdr.write(sendOut);
    sendOut.writeInt(0);           // zero checksum
    sendOut.flush();
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
    sendRecvData("Writing a zero len block blockid " + newBlockId, false);
   
    /* Test OP_READ_BLOCK */

    String bpid = cluster.getNamesystem().getBlockPoolId();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.