Package org.apache.hadoop.hdfs.protocol.datatransfer

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.Sender


                                     PeerCache peerCache)
                                     throws IOException {
    // in and out will be closed when sock is closed (by the caller)
    final DataOutputStream out =
        new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
    new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
        verifyChecksum);
   
    //
    // Get bytes in block, set streams
    //
View Full Code Here


      DomainSocketFactory domSockFactory, boolean verifyChecksum,
      FileInputStreamCache fisCache) throws IOException {
    final DataOutputStream out =
        new DataOutputStream(new BufferedOutputStream(
          peer.getOutputStream()));
    new Sender(out).requestShortCircuitFds(block, blockToken, 1);
    DataInputStream in =
        new DataInputStream(peer.getInputStream());
    BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
        PBHelper.vintPrefixed(in));
    DomainSocket sock = peer.getDomainSocket();
View Full Code Here

        out = new DataOutputStream(new BufferedOutputStream(unbufOut,
            HdfsConstants.SMALL_BUFFER_SIZE));
        in = new DataInputStream(unbufIn);

        //send the TRANSFER_BLOCK request
        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
            targets);
        out.flush();

        //ack
        BlockOpResponseProto response =
View Full Code Here

          //
          // Xmit header info to datanode
          //
 
          // send the request
          new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
              nodes, null, recoveryFlag? stage.getRecoveryStage() : stage,
              nodes.length, block.getNumBytes(), bytesSent, newGS, checksum);
 
          // receive ack for connect
          BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
View Full Code Here

          if (LOG.isDebugEnabled()) {
            LOG.debug("write to " + datanodes[j] + ": "
                + Op.BLOCK_CHECKSUM + ", block=" + block);
          }
          // get block MD5
          new Sender(out).blockChecksum(block, lb.getBlockToken());

          final BlockOpResponseProto reply =
            BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));

          if (reply.getStatus() != Status.SUCCESS) {
View Full Code Here

    try {
      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
          HdfsConstants.SMALL_BUFFER_SIZE));
      DataInputStream in = new DataInputStream(pair.in);
 
      new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
      final BlockOpResponseProto reply =
          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
     
      if (reply.getStatus() != Status.SUCCESS) {
        if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
View Full Code Here

   
    /* Send a block replace request to the output stream*/
    private void sendRequest(DataOutputStream out) throws IOException {
      final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
      final Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
      new Sender(out).replaceBlock(eb, accessToken,
          source.getStorageID(), proxySource.getDatanode());
    }
View Full Code Here

             new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                         HdfsConstants.SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

          new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
              clientname, targets, srcDataNode, stage, pipelineSize,
              minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum);

          mirrorOut.flush();
View Full Code Here

          dnConf.socketWriteTimeout);
      proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream,
          HdfsConstants.SMALL_BUFFER_SIZE));

      /* send request to the proxy */
      new Sender(proxyOut).copyBlock(block, blockToken);

      // receive the response from the proxy
      proxyReply = new DataInputStream(new BufferedInputStream(
          NetUtils.getInputStream(proxySock), HdfsConstants.IO_FILE_BUFFER_SIZE));
      BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
View Full Code Here

        if (isBlockTokenEnabled) {
          accessToken = blockPoolTokenSecretManager.generateToken(b,
              EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
        }

        new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode,
            stage, 0, 0, 0, 0, blockSender.getChecksum());

        // send data & checksum
        blockSender.sendBlock(out, baseStream, null);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.datatransfer.Sender

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.