Examples of PipelineAck


Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

                }
              }
              lastPacket = true;
            }

            new PipelineAck(expected, new short[]{
                DataTransferProtocol.OP_STATUS_SUCCESS}).write(replyOut);
            replyOut.flush();
        } catch (Exception e) {
          LOG.warn("IOException in BlockReceiver.lastNodeRun: ", e);
          if (running) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

             * as an UNKNOWN value.
             */
            long expected = -2;
            long seqno = -2;

            PipelineAck ack = new PipelineAck();
            try {
              if (!mirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn, numTargets);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets + " got " + ack);
                }
                seqno = ack.getSeqno();
              }
              if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
                ack.write(replyOut); // send keepalive
                replyOut.flush();
                continue;
              } else if (seqno >= 0 || mirrorError) {
                Packet pkt = null;
                synchronized (this) {
                  while (running && datanode.shouldRun && ackQueue.size() == 0) {
                    if (LOG.isDebugEnabled()) {
                      LOG.debug("PacketResponder " + numTargets +
                                " seqno = " + seqno +
                                " for block " + block +
                                " waiting for local datanode to finish write.");
                    }
                    wait();
                  }
                  if (!running || !datanode.shouldRun) {
                    break;
                  }
                  pkt = ackQueue.removeFirst();
                  expected = pkt.seqno;
                  notifyAll();
                  if (seqno != expected && !mirrorError) {
                    throw new IOException("PacketResponder " + numTargets +
                                          " for block " + block +
                                          " expected seqno:" + expected +
                                          " received:" + seqno);
                  }
                  lastPacketInBlock = pkt.lastPacketInBlock;
                }
              }
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
              isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));

              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.blocksWritten.inc();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message.
            short[] replies = new short[1 + numTargets];
            if (mirrorError) { // no ack is read
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              // Fill all downstream nodes with ERROR - the client will
              // eject the first node with ERROR status (our mirror)
              for (int i = 1; i < replies.length; i++) {
                replies[i] = DataTransferProtocol.OP_STATUS_ERROR;
              }
            } else {
              replies = new short[1+numTargets];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<numTargets; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream, targets.length);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient " + ack);
            }
            long seqno = ack.getSeqno();
            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
              continue;
            } else if (seqno == -2) {
              // This signifies that some pipeline node failed to read downstream
              // and therefore has no idea what sequence number the message corresponds
              // to. So, we don't try to match it up with an ack.
              assert ! ack.isSuccess();
            } else {
              Packet one = null;
              synchronized (ackQueue) {
                one = ackQueue.getFirst();
              }
              if (one.seqno != seqno) {
                throw new IOException("Responseprocessor: Expecting seqno " +
                                      " for block " + block +
                                      one.seqno + " but received " + seqno);
              }
              lastPacketInBlock = one.lastPacketInBlock;
            }

            // processes response status from all datanodes.
            for (int i = 0; i < targets.length && clientRunning; i++) {
              short reply = ack.getReply(i);
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                                      " for block " + block +
                                      " from datanode " +
                                      targets[i].getName());
              }
            }

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
      while (running && datanode.shouldRun && !lastPacketInBlock) {
        Packet pkt = null;
          try {
            long expected = PipelineAck.UNKOWN_SEQNO;
            PipelineAck ack = new PipelineAck();
            long seqno = PipelineAck.UNKOWN_SEQNO;
            boolean localMirrorError = mirrorError;
            try {
              synchronized (this) {
                // wait for a packet to arrive
                while (running && datanode.shouldRun && ackQueue.size() == 0) {
                  if (LOG.isDebugEnabled()) {
                    LOG.debug("PacketResponder " + numTargets +
                              " seqno = " + seqno +
                              " for block " + block +
                              " waiting for local datanode to finish write.");
                  }
                  wait();
                }
                if (!running || !datanode.shouldRun) {
                  break;
                }
                pkt = ackQueue.removeFirst();
                expected = pkt.seqno;
                notifyAll();
              }
              // receive an ack if DN is not the last one in the pipeline
              if (numTargets > 0 && !localMirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn, numTargets);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                      " for block " + block + " got " + ack);
                }
                seqno = ack.getSeqno();
                // verify seqno
                if (seqno != expected) {
                  throw new IOException("PacketResponder " + numTargets +
                      " for block " + block +
                      " expected seqno:" + expected +
                      " received:" + seqno);
                }
              }

              assert pkt != null;
              try {
                pkt.waitForPersistent();
              } catch (InterruptedException ine) {
                isInterrupted = true;
                LOG.info("PacketResponder " + block +  " " + numTargets +
                    " : Thread is interrupted when waiting for data persistent.");
                break;
              }
             
              lastPacketInBlock = pkt.lastPacketInBlock;
              if (pkt.seqno >= 0) {
                replicaBeingWritten.setBytesAcked(pkt.offsetInBlock);
              }
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(namespaceId, block);
              datanode.myMetrics.blocksWritten.inc();
              datanode.notifyNamenodeReceivedBlock(namespaceId, block, null);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.getDNRegistrationForNS(namespaceId).getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message
            short[] replies = null;
            if (mirrorError) { // no ack is read
              replies = new short[2];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              replies[1] = DataTransferProtocol.OP_STATUS_ERROR;
            } else {
              short ackLen = numTargets == 0 ? 0 : ack.getNumOfReplies();
              replies = new short[1+ackLen];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<ackLen; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream, targets.length);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient " + ack);
            }
            long seqno = ack.getSeqno();
            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
              continue;
            } else if (seqno == -2) {
              // This signifies that some pipeline node failed to read downstream
              // and therefore has no idea what sequence number the message corresponds
              // to. So, we don't try to match it up with an ack.
              assert ! ack.isSuccess();
            } else {
              Packet one = null;
              synchronized (ackQueue) {
                one = ackQueue.getFirst();
              }
              if (one.seqno != seqno) {
                throw new IOException("Responseprocessor: Expecting seqno " +
                                      " for block " + block +
                                      one.seqno + " but received " + seqno);
              }
              lastPacketInBlock = one.lastPacketInBlock;
            }

            // processes response status from all datanodes.
            for (int i = 0; i < targets.length && clientRunning; i++) {
              short reply = ack.getReply(i);
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                errorIndex = i; // first bad datanode
                throw new IOException("Bad response " + reply +
                                      " for block " + block +
                                      " from datanode " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

                }
              }
              lastPacket = true;
            }

            new PipelineAck(expected, new short[]{
                DataTransferProtocol.OP_STATUS_SUCCESS}).write(replyOut);
            replyOut.flush();
        } catch (Exception e) {
          if (running) {
            LOG.info("PacketResponder " + block + " " + numTargets +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

             * but has no valid sequence number to use. Thus, -2 is used
             * as an UNKNOWN value.
             */
            long expected = -2;

            PipelineAck ack = new PipelineAck();
            try {
              // read an ack from downstream datanode
              ack.readFields(mirrorIn, numTargets);
              if (LOG.isDebugEnabled()) {
                LOG.debug("PacketResponder " + numTargets + " got " + ack);
              }
              long seqno = ack.getSeqno();
              didRead = true;
              if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
                ack.write(replyOut); // send keepalive
                replyOut.flush();
                continue;
              } else if (seqno == -2) {
                // A downstream node must have failed to read an ack. We need
                // to forward this on.
                assert ! ack.isSuccess();
              } else {
                if (seqno < 0) {
                  throw new IOException("Received an invalid negative sequence number. "
                                        + "Ack = " + ack);
                }
                assert seqno >= 0;

                Packet pkt = null;
                synchronized (this) {
                  while (running && datanode.shouldRun && ackQueue.size() == 0) {
                    if (LOG.isDebugEnabled()) {
                      LOG.debug("PacketResponder " + numTargets +
                                " seqno = " + seqno +
                                " for block " + block +
                                " waiting for local datanode to finish write.");
                    }
                    wait();
                  }
                  pkt = ackQueue.removeFirst();
                  expected = pkt.seqno;
                  notifyAll();
                  if (seqno != expected) {
                    throw new IOException("PacketResponder " + numTargets +
                                          " for block " + block +
                                          " expected seqno:" + expected +
                                          " received:" + seqno);
                  }
                  lastPacketInBlock = pkt.lastPacketInBlock;
                }
              }
            } catch (Throwable e) {
              if (running) {
                LOG.info("PacketResponder " + block + " " + numTargets +
                         " Exception " + StringUtils.stringifyException(e));
                running = false;
              }
            }

            if (Thread.interrupted()) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect a timout on heartbeats and will declare that
               * this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              running = false;
              continue;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.blocksWritten.inc();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName,
                      datanode.dnRegistration.getStorageID(), block));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            // construct my ack message.
            short[] replies = new short[1 + numTargets];
            if (!didRead) { // no ack is read
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              // Fill all downstream nodes with ERROR - the client will
              // eject the first node with ERROR status (our mirror)
              for (int i = 1; i < replies.length; i++) {
                replies[i] = DataTransferProtocol.OP_STATUS_ERROR;
              }
            } else {
              replies = new short[1+numTargets];
              replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
              for (int i=0; i<numTargets; i++) {
                replies[i+1] = ack.getReply(i);
              }
            }
            PipelineAck replyAck = new PipelineAck(expected, replies);
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
            }

            // If we forwarded an error response from a downstream datanode
            // and we are acting on behalf of a client, then we quit. The
            // client will drive the recovery mechanism.
            if (!replyAck.isSuccess() && receiver.clientName.length() > 0) {
              running = false;
            }
        } catch (IOException e) {
          if (running) {
            LOG.info("PacketResponder " + block + " " + numTargets +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      }

      public void run() {

        this.setName("ResponseProcessor for block " + block);
        PipelineAck ack = new PipelineAck();
 
        while (!closed && clientRunning && !lastPacketInBlock) {
          // process responses from datanodes.
          try {
            // read an ack from the pipeline
            ack.readFields(blockReplyStream);
            if (LOG.isDebugEnabled()) {
              LOG.debug("DFSClient for block " + block + " " + ack);
            }
           
            // processes response status from all datanodes.
            for (int i = ack.getNumOfReplies()-1; i >=0 && clientRunning; i--) {
                short reply = ack.getReply(i)
              if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {   
                errorIndex = i; // first bad datanode  
                throw new IOException("Bad response " + reply +  
                      " for block " + block +  
                      " from datanode " +    
                      targets[i].getName());   
              }  
            }

            long seqno = ack.getSeqno();
            assert seqno != PipelineAck.UNKOWN_SEQNO :
              "Ack for unkown seqno should be a failed ack: " + ack;
            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
              continue;
            }

            Packet one = null;
            synchronized (ackQueue) {
              one = ackQueue.getFirst();
            }
           
            if (one.seqno != seqno) {
              throw new IOException("Responseprocessor: Expecting seqno " +
                                    " for block " + block + " " +
                                    one.seqno + " but received " + seqno);
            }
            lastPacketInBlock = one.lastPacketInBlock;

            synchronized (ackQueue) {
              assert ack.getSeqno() == lastAckedSeqno + 1;
              lastAckedSeqno = ack.getSeqno();
              ackQueue.removeFirst();
              ackQueue.notifyAll();
            }
          } catch (Exception e) {
            if (!closed) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

                }
              }
              lastPacket = true;
            }

            new PipelineAck(expected, new short[]{
                DataTransferProtocol.OP_STATUS_SUCCESS}).write(replyOut);
            replyOut.flush();
        } catch (Exception e) {
          LOG.warn("IOException in BlockReceiver.lastNodeRun: ", e);
          if (running) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck

      final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
      while (running && datanode.shouldRun && !lastPacketInBlock) {

        try {
            long expected = -2;
            PipelineAck ack = new PipelineAck();
            long seqno = -2;
            try {
              if (!mirrorError) {
                // read an ack from downstream datanode
                ack.readFields(mirrorIn);
                if (LOG.isDebugEnabled()) {
                  LOG.debug("PacketResponder " + numTargets +
                      " for block " + block + " got " + ack);
                }
                seqno = ack.getSeqno();
              }
              if (seqno >= 0 || mirrorError) {
                Packet pkt = null;
                synchronized (this) {
                  while (running && datanode.shouldRun && ackQueue.size() == 0) {
                    if (LOG.isDebugEnabled()) {
                      LOG.debug("PacketResponder " + numTargets +
                                " seqno = " + seqno +
                                " for block " + block +
                                " waiting for local datanode to finish write.");
                    }
                    wait();
                  }
                  if (!running || !datanode.shouldRun) {
                    break;
                  }
                  pkt = ackQueue.removeFirst();
                  expected = pkt.seqno;
                  notifyAll();
                  if (seqno != expected && !mirrorError) {
                    throw new IOException("PacketResponder " + numTargets +
                                          " for block " + block +
                                          " expected seqno:" + expected +
                                          " received:" + seqno);
                  }
                  lastPacketInBlock = pkt.lastPacketInBlock;
                }
              }
            } catch (InterruptedException ine) {
              isInterrupted = true;
            } catch (IOException ioe) {
              if (Thread.interrupted()) {
                isInterrupted = true;
              } else {
                // continue to run even if can not read from mirror
                // notify client of the error
                // and wait for the client to shut down the pipeline
                mirrorError = true;
                LOG.info("PacketResponder " + block + " " + numTargets +
                    " Exception " + StringUtils.stringifyException(ioe));
              }
            }

            if (Thread.interrupted() || isInterrupted) {
              /* The receiver thread cancelled this thread.
               * We could also check any other status updates from the
               * receiver thread (e.g. if it is ok to write to replyOut).
               * It is prudent to not send any more status back to the client
               * because this datanode has a problem. The upstream datanode
               * will detect that this datanode is bad, and rightly so.
               */
              LOG.info("PacketResponder " + block +  " " + numTargets +
                       " : Thread is interrupted.");
              break;
            }
           
            // If this is the last packet in block, then close block
            // file and finalize the block before responding success
            if (lastPacketInBlock && !receiver.finalized) {
              receiver.close();
              final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
              block.setNumBytes(receiver.offsetInBlock);
              datanode.data.finalizeBlock(block);
              datanode.myMetrics.incrBlocksWritten();
              datanode.notifyNamenodeReceivedBlock(block,
                  DataNode.EMPTY_DEL_HINT);
              if (ClientTraceLog.isInfoEnabled() &&
                  receiver.clientName.length() > 0) {
                long offset = 0;
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      receiver.inAddr, receiver.myAddr, block.getNumBytes(),
                      "HDFS_WRITE", receiver.clientName, offset,
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block +
                         " of size " + block.getNumBytes() +
                         " from " + receiver.inAddr);
              }
            }

            PipelineAck replyAck;
            if (seqno == PipelineAck.HEART_BEAT.getSeqno()) {
              replyAck = ack;  // continue to send keep alive
            } else {
              // construct my ack message
              short[] replies = null;
              if (mirrorError) { // no ack is read
                replies = new short[2];
                replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
                replies[1] = DataTransferProtocol.OP_STATUS_ERROR;
              } else {
                replies = new short[1+ack.getNumOfReplies()];
                replies[0] = DataTransferProtocol.OP_STATUS_SUCCESS;
                for (int i=0; i<ack.getNumOfReplies(); i++) {
                  replies[i+1] = ack.getReply(i);
                }
              }
              replyAck = new PipelineAck(expected, replies);
            }
            // send my ack back to upstream datanode
            replyAck.write(replyOut);
            replyOut.flush();
            if (LOG.isDebugEnabled()) {
              LOG.debug("PacketResponder " + numTargets +
                        " for block " + block +
                        " responded an ack: " + replyAck);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.