Package org.apache.hadoop.hdfs.protocol.datatransfer

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.Sender


        NetUtils.getOutputStream(s, writeTimeout),
        HdfsConstants.SMALL_BUFFER_SIZE));
    final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

    // send the request
    new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
        dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
    out.flush();

    return BlockOpResponseProto.parseDelimitedFrom(in);
  }
View Full Code Here


          if (LOG.isDebugEnabled()) {
            LOG.debug("write to " + datanodes[j] + ": "
                + Op.BLOCK_CHECKSUM + ", block=" + block);
          }
          // get block MD5
          new Sender(out).blockChecksum(block, lb.getBlockToken());

          final BlockOpResponseProto reply =
            BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));

          if (reply.getStatus() != Status.SUCCESS) {
View Full Code Here

        out = new DataOutputStream(new BufferedOutputStream(
            NetUtils.getOutputStream(sock, writeTimeout),
            HdfsConstants.SMALL_BUFFER_SIZE));

        //send the TRANSFER_BLOCK request
        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
            targets);

        //ack
        in = new DataInputStream(NetUtils.getInputStream(sock));
        BlockOpResponseProto response =
View Full Code Here

       
        assert null == blockReplyStream : "Previous blockReplyStream unclosed";
        blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));

        // send the request
        new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
            nodes, null, recoveryFlag? stage.getRecoveryStage() : stage,
            nodes.length, block.getNumBytes(), bytesSent, newGS, checksum);

        // receive ack for connect
        BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
View Full Code Here

    sock.connect(NetUtils.createSocketAddr(
        destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
        source.getStorageID(), sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());
View Full Code Here

        out = new DataOutputStream(new BufferedOutputStream(
            NetUtils.getOutputStream(sock, writeTimeout),
            HdfsConstants.SMALL_BUFFER_SIZE));

        //send the TRANSFER_BLOCK request
        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
            targets);

        //ack
        in = new DataInputStream(NetUtils.getInputStream(sock));
        BlockOpResponseProto response =
View Full Code Here

       
        assert null == blockReplyStream : "Previous blockReplyStream unclosed";
        blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));

        // send the request
        new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
            nodes, null, recoveryFlag? stage.getRecoveryStage() : stage,
            nodes.length, block.getNumBytes(), bytesSent, newGS, checksum);

        // receive ack for connect
        BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
View Full Code Here

        if (isBlockTokenEnabled) {
          accessToken = blockPoolTokenSecretManager.generateToken(b,
              EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
        }

        new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode,
            stage, 0, 0, 0, 0, blockSender.getChecksum());

        // send data & checksum
        blockSender.sendBlock(out, baseStream, null);
View Full Code Here

          }
          mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut,
              HdfsConstants.SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(unbufMirrorIn);

          new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
              clientname, targets, srcDataNode, stage, pipelineSize,
              minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum,
              cachingStrategy);

          mirrorOut.flush();
View Full Code Here

          HdfsConstants.SMALL_BUFFER_SIZE));
      proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn,
          HdfsConstants.IO_FILE_BUFFER_SIZE));

      /* send request to the proxy */
      new Sender(proxyOut).copyBlock(block, blockToken);

      // receive the response from the proxy
     
      BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
          PBHelper.vintPrefixed(proxyReply));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.datatransfer.Sender

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.