Examples of BlockReader


Examples of org.apache.hadoop.hdfs.BlockReader

     
    int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
     
      // Use the block name for file name.
    String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
    BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf, file,
        new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
        offsetIntoBlock, amtToRead,  true,
        "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
        new DatanodeID(addr.getAddress().getHostAddress(),
            addr.getHostName(), poolId, addr.getPort(), 0, 0), null,
            null, null, false);
       
    final byte[] buf = new byte[amtToRead];
    int readOffset = 0;
    int retries = 2;
    while ( amtToRead > 0 ) {
      int numRead = amtToRead;
      try {
        blockReader.readFully(buf, readOffset, amtToRead);
      }
      catch (IOException e) {
        retries--;
        if (retries == 0)
          throw new IOException("Could not read data from datanode");
        continue;
      }
      amtToRead -= numRead;
      readOffset += numRead;
    }
    blockReader.close();
    out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.BlockReader

  // try reading a block using a BlockReader directly
  private static void tryRead(Configuration conf, LocatedBlock lblock,
      boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    Socket s = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
      DatanodeInfo[] nodes = lblock.getLocations();
      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
      s = NetUtils.getDefaultSocketFactory(conf).createSocket();
View Full Code Here

Examples of org.apache.hadoop.hdfs.BlockReader

    s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    String file = BlockReaderFactory.getFileName(targetAddr,
        "test-blockpoolid",
        block.getBlockId());
    BlockReader blockReader =
      BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf), file, block,
        lblock.getBlockToken(), 0, -1, true, "TestDataNodeVolumeFailure",
        TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false);
    blockReader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.BlockReader

                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
      catch (IOException ie) {
        if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = new Socket();
        s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
       
        String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
            block.getBlockId());
        blockReader = BlockReaderFactory.newBlockReader(
            conf, s, file, block, lblock
            .getBlockToken(), 0, -1);
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader

  private BlockReader getBlockReader(
    int offset, int lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    Socket s = null;
    BlockReader blockReader = null;
    Block block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
    s = new Socket();
    s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader

  /**
   * Verify that if we read an entire block, we send checksumOk
   */
  @Test
  public void testBlockVerification() throws Exception {
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024));
    slurpReader(reader, FILE_SIZE_K * 1024, true);
    verify(reader).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader

  /**
   * Test that if we do an incomplete read, we don't call checksumOk
   */
  @Test
  public void testIncompleteRead() throws Exception {
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024));
    slurpReader(reader, FILE_SIZE_K / 2 * 1024, false);

    // We asked the blockreader for the whole file, and only read
    // half of it, so no checksumOk
    verify(reader, never()).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader

   * the whole block or not.
   */
  @Test
  public void testCompletePartialRead() throws Exception {
    // Ask for half the file
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024 / 2));
    // And read half the file
    slurpReader(reader, FILE_SIZE_K * 1024 / 2, true);
    verify(reader).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader

    int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
    for (int startOffset : startOffsets) {
      for (int length : lengths) {
        DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                           " len=" + length);
        BlockReader reader = spy(getBlockReader(startOffset, length));
        slurpReader(reader, length, true);
        verify(reader).checksumOk(reader.dnSock);
        reader.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.BCFile.Reader.BlockReader

    public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
        throws IOException {
      readerBCF = new BCFile.Reader(fsdis, fileLength, conf);

      // first, read TFile meta
      BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
      try {
        tfileMeta = new TFileMeta(brMeta);
      } finally {
        brMeta.close();
      }

      comparator = tfileMeta.getComparator();
      // Set begin and end locations.
      begin = new Location(0, 0);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.