Package org.apache.hadoop.hdfs.net

Examples of org.apache.hadoop.hdfs.net.Peer


    // from it again.
    assertEquals(1, dfsClient.peerCache.size());
   
    // Take it out of the cache - reading should
    // give an EOF.
    Peer peer = dfsClient.peerCache.get(dn.getDatanodeId(), false);
    assertNotNull(peer);
    assertEquals(-1, peer.getInputStream().read());
  }
View Full Code Here


    }
    return false;
  }

  private Peer newTcpPeer(InetSocketAddress addr) throws IOException {
    Peer peer = null;
    boolean success = false;
    Socket sock = null;
    try {
      sock = dfsClient.socketFactory.createSocket();
      NetUtils.connect(sock, addr,
View Full Code Here

    int cacheTries = 0;
    DomainSocketFactory dsFactory = dfsClient.getDomainSocketFactory();
    BlockReader reader = null;
    final int nCachedConnRetry = dfsClient.getConf().nCachedConnRetry;
    for (; cacheTries < nCachedConnRetry; ++cacheTries) {
      Peer peer = peerCache.get(chosenNode, true);
      if (peer == null) break;
      try {
        boolean allowShortCircuitLocalReads = dfsClient.getConf().
            shortCircuitLocalReads && (!shortCircuitForbidden());
        reader = BlockReaderFactory.newBlockReader(
            dfsClient.getConf(), file, block, blockToken, startOffset,
            len, verifyChecksum, clientName, peer, chosenNode,
            dsFactory, peerCache, fileInputStreamCache,
            allowShortCircuitLocalReads, curCachingStrategy);
        return reader;
      } catch (IOException ex) {
        DFSClient.LOG.debug("Error making BlockReader with DomainSocket. " +
            "Closing stale " + peer, ex);
      } finally {
        if (reader == null) {
          IOUtils.closeQuietly(peer);
        }
      }
    }

    // Try to create a DomainPeer.
    DomainSocket domSock = dsFactory.create(dnAddr, this);
    if (domSock != null) {
      Peer peer = new DomainPeer(domSock);
      try {
        boolean allowShortCircuitLocalReads = dfsClient.getConf().
            shortCircuitLocalReads && (!shortCircuitForbidden());
        reader = BlockReaderFactory.newBlockReader(
            dfsClient.getConf(), file, block, blockToken, startOffset,
            len, verifyChecksum, clientName, peer, chosenNode,
            dsFactory, peerCache, fileInputStreamCache,
            allowShortCircuitLocalReads, curCachingStrategy);
        return reader;
      } catch (IOException e) {
        DFSClient.LOG.warn("failed to connect to " + domSock, e);
      } finally {
        if (reader == null) {
         // If the Peer that we got the error from was a DomainPeer,
         // mark the socket path as bad, so that newDataSocket will not try
         // to re-open this socket for a while.
         dsFactory.disableDomainSocketPath(domSock.getPath());
         IOUtils.closeQuietly(peer);
        }
      }
    }

    // Look for cached peers.
    for (; cacheTries < nCachedConnRetry; ++cacheTries) {
      Peer peer = peerCache.get(chosenNode, false);
      if (peer == null) break;
      try {
        reader = BlockReaderFactory.newBlockReader(
            dfsClient.getConf(), file, block, blockToken, startOffset,
            len, verifyChecksum, clientName, peer, chosenNode,
            dsFactory, peerCache, fileInputStreamCache, false,
            curCachingStrategy);
        return reader;
      } catch (IOException ex) {
        DFSClient.LOG.debug("Error making BlockReader. Closing stale " +
          peer, ex);
      } finally {
        if (reader == null) {
          IOUtils.closeQuietly(peer);
        }
      }
    }
    if (tcpReadsDisabledForTesting) {
      throw new IOException("TCP reads are disabled.");
    }
    // Try to create a new remote peer.
    Peer peer = newTcpPeer(dnAddr);
    try {
      reader = BlockReaderFactory.newBlockReader(dfsClient.getConf(), file,
          block, blockToken, startOffset, len, verifyChecksum, clientName,
          peer, chosenNode, dsFactory, peerCache, fileInputStreamCache, false,
        curCachingStrategy);
View Full Code Here

          "TCP socket");
    }
    BlockReader blockReader = null;
    while (true) {
      BlockReaderPeer curPeer = null;
      Peer peer = null;
      try {
        curPeer = nextTcpPeer();
        if (curPeer == null) break;
        if (curPeer.fromCache) remainingCacheTries--;
        peer = curPeer.peer;
View Full Code Here

   *
   * @return the next DomainPeer, or null if we could not construct one.
   */
  private BlockReaderPeer nextDomainPeer() {
    if (remainingCacheTries > 0) {
      Peer peer = clientContext.getPeerCache().get(datanode, true);
      if (peer != null) {
        if (LOG.isTraceEnabled()) {
          LOG.trace("nextDomainPeer: reusing existing peer " + peer);
        }
        return new BlockReaderPeer(peer, true);
View Full Code Here

   * @throws IOException  If there was an error while constructing the peer
   *                      (such as an InvalidEncryptionKeyException)
   */
  private BlockReaderPeer nextTcpPeer() throws IOException {
    if (remainingCacheTries > 0) {
      Peer peer = clientContext.getPeerCache().get(datanode, false);
      if (peer != null) {
        if (LOG.isTraceEnabled()) {
          LOG.trace("nextTcpPeer: reusing existing peer " + peer);
        }
        return new BlockReaderPeer(peer, true);
      }
    }
    try {
      Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress);
      if (LOG.isTraceEnabled()) {
        LOG.trace("nextTcpPeer: created newConnectedPeer " + peer);
      }
      return new BlockReaderPeer(peer, false);
    } catch (IOException e) {
View Full Code Here

                   DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT));
  }

  @Override
  public void run() {
    Peer peer = null;
    while (datanode.shouldRun && !datanode.shutdownForUpgrade) {
      try {
        peer = peerServer.accept();

        // Make sure the xceiver count is not exceeded
View Full Code Here

      setConfiguration(conf).
      setRemotePeerFactory(new RemotePeerFactory() {
        @Override
        public Peer newConnectedPeer(InetSocketAddress addr)
            throws IOException {
          Peer peer = null;
          Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
          try {
            sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
            sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
            peer = TcpPeerServer.peerFromSocketAndKey(sock, encryptionKey);
View Full Code Here

    Iterator<Value> iter = sockStreamList.iterator();
    while (iter.hasNext()) {
      Value candidate = iter.next();
      iter.remove();
      long ageMs = Time.monotonicNow() - candidate.getTime();
      Peer peer = candidate.getPeer();
      if (ageMs >= expiryPeriod) {
        try {
          peer.close();
        } catch (IOException e) {
          LOG.warn("got IOException closing stale peer " + peer +
                ", which is " + ageMs + " ms old");
        }
      } else if (!peer.isClosed()) {
        return peer;
      }
    }
    return null;
  }
View Full Code Here

    assertEquals(CAPACITY, cache.size());
    assertSame(null, cache.get(dnIds[0], false));

    // Make sure that the other entries are still there
    for (int i = 1; i < CAPACITY; ++i) {
      Peer peer = cache.get(dnIds[i], false);
      assertSame(peers[i], peer);
      assertTrue(!peer.isClosed());
      peer.close();
    }
    assertEquals(1, cache.size());
    cache.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.net.Peer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.