Package org.apache.hadoop.hdfs.protocol.datatransfer

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.Op


   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;
   
    dataXceiverServer.childSockets.add(s);
   
    try {
     
      InputStream input = socketIn;
      if (dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + s.getInetAddress() + ". Perhaps the client is running an " +
              "older version of Hadoop which does not support encryption.");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketIn.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketIn.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here


   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    try {
      dataXceiverServer.addPeer(peer, Thread.currentThread());
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer &&
          !dnConf.trustedChannelResolver.isTrusted(getClientAddress(peer))){
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
              "is running an older version of Hadoop which does not support " +
              "encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            peer.setReadTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while ((peer != null) &&
          (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
      String s = datanode.getDisplayName() + ":DataXceiver error processing "
          + ((op == null) ? "unknown" : op.name()) + " operation "
          + " src: " + remoteAddress + " dst: " + localAddress;
      if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
        // For WRITE_BLOCK, it is okay if the replica already exists since
        // client and replication may write the same block to the same datanode
        // at the same time.
View Full Code Here

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    dataXceiverServer.addPeer(peer);
    try {
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      if (dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
              "is running an older version of Hadoop which does not support " +
              "encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            peer.setReadTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    dataXceiverServer.addPeer(peer);
    try {
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      if (dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
              "is running an older version of Hadoop which does not support " +
              "encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            peer.setReadTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

  /**
   * Read/write data from/to the DataXceiverServer.
   */
  public void run() {
    int opsProcessed = 0;
    Op op = null;
    dataXceiverServer.childSockets.add(s);
    try {
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketInputWrapper.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    dataXceiverServer.addPeer(peer);
    try {
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
              "is running an older version of Hadoop which does not support " +
              "encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            peer.setReadTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

  /**
   * Read/write data from/to the DataXceiverServer.
   */
  public void run() {
    int opsProcessed = 0;
    Op op = null;
    dataXceiverServer.childSockets.add(s);
    try {
      int stdTimeout = s.getSoTimeout();

      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            s.setSoTimeout(dnConf.socketKeepaliveTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(stdTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
View Full Code Here

  /**
   * Read/write data from/to the DataXceiverServer.
   */
  public void run() {
    int opsProcessed = 0;
    Op op = null;
    dataXceiverServer.childSockets.add(s);
    try {
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketInputWrapper.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
View Full Code Here

  /**
   * Read/write data from/to the DataXceiverServer.
   */
  public void run() {
    int opsProcessed = 0;
    Op op = null;
    dataXceiverServer.childSockets.add(s);
    try {
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketInputWrapper.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;
   
    dataXceiverServer.childSockets.add(s);
   
    try {
     
      InputStream input = socketIn;
      if (dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + s.getInetAddress() + ". Perhaps the client is running an " +
              "older version of Hadoop which does not support encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketIn.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketIn.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.datatransfer.Op

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.