Examples of BlockOpResponseProto


Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

    ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
      .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
      .setChunkOffset(blockSender.getOffset())
      .build();
     
    BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setReadOpChecksumInfo(ckInfo)
      .build();
    response.writeDelimitedTo(out);
    out.flush();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          source.getStorageID(), proxySource.getDatanode());
    }
   
    /* Receive a block copy response from the input stream */
    private void receiveResponse(DataInputStream in) throws IOException {
      BlockOpResponseProto response = BlockOpResponseProto.parseFrom(
          vintPrefixed(in));
      if (response.getStatus() != Status.SUCCESS) {
        if (response.getStatus() == Status.ERROR_ACCESS_TOKEN)
          throw new IOException("block move failed due to access token error");
        throw new IOException("block move is failed: " +
            response.getMessage());
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

    //

    DataInputStream in = new DataInputStream(
        new BufferedInputStream(peer.getInputStream(), bufferSize));
   
    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
        PBHelper.vintPrefixed(in));
    RemoteBlockReader2.checkSuccess(status, peer, block, file);
    ReadOpChecksumInfoProto checksumInfo =
      status.getReadOpChecksumInfo();
    DataChecksum checksum = DataTransferProtoUtil.fromProto(
        checksumInfo.getChecksum());
    //Warning when we get CHECKSUM_NULL?
   
    // Read the first chunk offset.
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

        new DataOutputStream(new BufferedOutputStream(
          peer.getOutputStream()));
    new Sender(out).requestShortCircuitFds(block, blockToken, 1);
    DataInputStream in =
        new DataInputStream(peer.getInputStream());
    BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
        PBHelper.vintPrefixed(in));
    DomainSocket sock = peer.getDomainSocket();
    switch (resp.getStatus()) {
    case SUCCESS:
      BlockReaderLocal reader = null;
      byte buf[] = new byte[1];
      FileInputStream fis[] = new FileInputStream[2];
      sock.recvFileInputStreams(fis, buf, 0, buf.length);
      try {
        reader = new BlockReaderLocal(conf, file, block,
            startOffset, len, fis[0], fis[1], datanodeID, verifyChecksum,
            fisCache);
      } finally {
        if (reader == null) {
          IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
        }
      }
      return reader;
    case ERROR_UNSUPPORTED:
      if (!resp.hasShortCircuitAccessVersion()) {
        DFSClient.LOG.warn("short-circuit read access is disabled for " +
            "DataNode " + datanodeID + ".  reason: " + resp.getMessage());
        domSockFactory.disableShortCircuitForPath(sock.getPath());
      } else {
        DFSClient.LOG.warn("short-circuit read access for the file " +
            file + " is disabled for DataNode " + datanodeID +
            ".  reason: " + resp.getMessage());
      }
      return null;
    case ERROR_ACCESS_TOKEN:
      String msg = "access control error while " +
          "attempting to set up short-circuit access to " +
          file + resp.getMessage();
      DFSClient.LOG.debug(msg);
      throw new InvalidBlockTokenException(msg);
    default:
      DFSClient.LOG.warn("error while attempting to set up short-circuit " +
          "access to " + file + ": " + resp.getMessage());
      domSockFactory.disableShortCircuitForPath(sock.getPath());
      return null;
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
            targets);
        out.flush();

        //ack
        BlockOpResponseProto response =
          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
        if (SUCCESS != response.getStatus()) {
          throw new IOException("Failed to add a datanode");
        }
      } finally {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
              nodes, null, recoveryFlag? stage.getRecoveryStage() : stage,
              nodes.length, block.getNumBytes(), bytesSent, newGS, checksum);
 
          // receive ack for connect
          BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
              PBHelper.vintPrefixed(blockReplyStream));
          pipelineStatus = resp.getStatus();
          firstBadLink = resp.getFirstBadLink();
         
          if (pipelineStatus != SUCCESS) {
            if (pipelineStatus == Status.ERROR_ACCESS_TOKEN) {
              throw new InvalidBlockTokenException(
                  "Got access token error for connect ack with firstBadLink as "
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

                + Op.BLOCK_CHECKSUM + ", block=" + block);
          }
          // get block MD5
          new Sender(out).blockChecksum(block, lb.getBlockToken());

          final BlockOpResponseProto reply =
            BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));

          if (reply.getStatus() != Status.SUCCESS) {
            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
              throw new InvalidBlockTokenException();
            } else {
              throw new IOException("Bad response " + reply + " for block "
                  + block + " from datanode " + datanodes[j]);
            }
          }
         
          OpBlockChecksumResponseProto checksumData =
            reply.getChecksumResponse();

          //read byte-per-checksum
          final int bpc = checksumData.getBytesPerCrc();
          if (i == 0) { //first block
            bytesPerCRC = bpc;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
          HdfsConstants.SMALL_BUFFER_SIZE));
      DataInputStream in = new DataInputStream(pair.in);
 
      new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
      final BlockOpResponseProto reply =
          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
     
      if (reply.getStatus() != Status.SUCCESS) {
        if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
          throw new InvalidBlockTokenException();
        } else {
          throw new IOException("Bad response " + reply + " trying to read "
              + lb.getBlock() + " from datanode " + dn);
        }
      }
     
      return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
    } finally {
      IOUtils.cleanup(null, pair.in, pair.out);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          source.getStorageID(), proxySource.getDatanode());
    }
   
    /* Receive a block copy response from the input stream */
    private void receiveResponse(DataInputStream in) throws IOException {
      BlockOpResponseProto response = BlockOpResponseProto.parseFrom(
          vintPrefixed(in));
      if (response.getStatus() != Status.SUCCESS) {
        if (response.getStatus() == Status.ERROR_ACCESS_TOKEN)
          throw new IOException("block move failed due to access token error");
        throw new IOException("block move is failed: " +
            response.getMessage());
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          mirrorOut.flush();

          // read connect ack (only for clients, not for replication req)
          if (isClient) {
            BlockOpResponseProto connectAck =
              BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(mirrorIn));
            mirrorInStatus = connectAck.getStatus();
            firstBadLink = connectAck.getFirstBadLink();
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
              LOG.info("Datanode " + targets.length +
                       " got response for connect ack " +
                       " from downstream datanode with firstbadlink as " +
                       firstBadLink);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.