Package org.apache.hadoop.hdfs.DFSClient

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream


  /**
   * Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
   */
  static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
      int readOffset) throws IOException {
    DFSDataInputStream stm = (DFSDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);

    actual.limit(3);

    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);

    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
        "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
      int nbytes = stm.read(actual);

      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    stm.close();
  }
View Full Code Here


      FileSystem parityFs, boolean stripeVerified, StripeInfo si)
          throws StripeMismatchException {
    //setting remoteRackFlag for each of the input streams and verify the stripe
    for (int i = 0 ; i < codec.parityLength + codec.stripeLength ; i++) {
      if (parallelReader.streams[i] instanceof DFSDataInputStream) {
        DFSDataInputStream stream =
            (DFSDataInputStream) parallelReader.streams[i]
        if (i < codec.parityLength) {
          //Dealing with parity blocks
          remoteRackFlag[i] =
              !(((DistributedFileSystem)parityFs).getClient().
                  isInLocalRack(NetUtils.createSocketAddr(
                      stream.getCurrentDatanode().getName())));
          if (LOG.isDebugEnabled()) {
            LOG.debug("RemoteRackFlag at index " + i + " is " +
                remoteRackFlag[i]);
          }
          // Verify with parity Blocks
          if (stripeVerified == false) {
            Block currentBlock = stream.getCurrentBlock();
            if (!currentBlock.equals(si.parityBlocks.get(i))) {
              throw new StripeMismatchException("current block " +
                  currentBlock.toString() + " in position " + i + " doesn't "
                  + "match stripe info:" + si);
            }
          }
        } else {
          //Dealing with Source (file) block
          remoteRackFlag[i] =
              !(((DistributedFileSystem)srcFs).getClient().
                  isInLocalRack(NetUtils.createSocketAddr(
                      stream.getCurrentDatanode().getName())));
          if (LOG.isDebugEnabled()) {
            LOG.debug("RemoteRackFlag at index " + i + " is " +
                remoteRackFlag[i]);
          }
          // Verify with source Blocks
          if (stripeVerified == false) {
            Block currentBlock = stream.getCurrentBlock();
            if (!currentBlock.equals(si.srcBlocks.get(
                i - codec.parityLength))) {
              throw new StripeMismatchException("current block " +
                  currentBlock.toString() + " in position " + i + " doesn't "
                  + "match stripe info:" + si);
View Full Code Here

    Path p = new Path(fileName);
    for (int pri = 0; pri < 8; pri++) {
      createFile(p, pri);

      ioprioClass = ioprioData = 0;
      DFSDataInputStream in = (DFSDataInputStream) fs.open(p);

      byte[] buffer = new byte[BLOCK_SIZE * 2];
      ReadOptions options = new ReadOptions();
      options.setIoprio(NativeIO.IOPRIO_CLASS_BE, pri);
      in.read(BLOCK_SIZE / 2, buffer, 0, BLOCK_SIZE / 2, options);

      if (NativeIO.isAvailable()) {
        assertTrue(NativeIO.isIoprioPossible());
        assertEquals(NativeIO.IOPRIO_CLASS_BE, ioprioClass);
        assertEquals(pri, ioprioData);
View Full Code Here

  public void testRead() throws Exception{
    for(int i = 0; i < TEST_FILE_NUM; ++i) {
      String file = "/tmp" + i +".txt";
      DFSTestUtil.createFile(fs, new Path(file), FILE_LEN, (short)5, 1L);
     
      DFSDataInputStream in = (DFSDataInputStream)fs.open(new Path(file));
      int numOfRead = 0;
      while(in.read() > 0){
        numOfRead ++;
      }
      assertEquals(FILE_LEN * (i+1),
          metrics.readSize.getCurrentIntervalValue());
      assertEquals(numOfRead * (i+1),
View Full Code Here

    }
    this.outs = outs;
    for (int i = 0; i < streams.length; i++) {
      this.streams[i] = streams[i];
      if (this.streams[i] instanceof DFSDataInputStream) {
        DFSDataInputStream stream = (DFSDataInputStream)this.streams[i];
        // in directory raiding, the block size for each input stream
        // might be different, so we need to determine the endOffset of
        // each stream by their own block size.
        List<LocatedBlock> blocks = stream.getAllBlocks();
        if (blocks.size() == 0) {
          this.endOffsets[i] = Long.MAX_VALUE;
          if (computeChecksum) {
            this.checksums[i] = null;
          }
        } else {
          long blockSize = blocks.get(0).getBlockSize();
          this.endOffsets[i] = stream.getPos() + blockSize;
          if (computeChecksum) {
            this.checksums[i] = new CRC32();
          }
        }
      } else {
View Full Code Here

    LOG.info("Store the checksums of source blocks into checksumStore");
    for (int i = 0; i < streams.length; i++) {
      if (streams[i] != null &&
          streams[i] instanceof DFSDataInputStream &&
          !(streams[i] instanceof RaidUtils.ZeroInputStream)) {
        DFSDataInputStream stream = (DFSDataInputStream)this.streams[i];
        Long newVal = checksums[i].getValue();
        ckmStore.putIfAbsentChecksum(stream.getCurrentBlock(), newVal);
      }
    }
  }
View Full Code Here

          .create(path, true, 4096, (short) 2, (long) 2048);
      stm.write(new byte[4096]);
      stm.close();
     
      FSDataInputStream is = fs.open(path);
      DFSDataInputStream dis = (DFSDataInputStream) is;
      TestCase.assertNotNull(dis);

      is.read(new byte[1024]);
      DatanodeInfo currentDn1 = dis.getCurrentDatanode();
      dis.setUnfavoredNodes(Arrays.asList(new DatanodeInfo[] { currentDn1 }));

      is.read(new byte[512]);
      DatanodeInfo currentDn2 = dis.getCurrentDatanode();
      TestCase.assertTrue(!currentDn2.equals(currentDn1));
      dis.setUnfavoredNodes(Arrays.asList(new DatanodeInfo[] { currentDn1, currentDn2 }));

      is.read(new byte[512]);
      TestCase.assertEquals(currentDn1, dis.getCurrentDatanode());
     
      is.read(new byte[1024]);
     
      TestCase.assertEquals(dis.getAllBlocks().get(1).getLocations()[0],
          dis.getCurrentDatanode());
    }
    finally {
      if (cluster != null) {cluster.shutdown();}
    }   
  }
View Full Code Here

    fs.delete(root, true);
    files = null;
  }
 
  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

      }
    });
   
    // Set visible length of all replicas to be smaller
    int SIZE_TO_SHRINK = 5;
    DFSDataInputStream is = (DFSDataInputStream) in;
    for (DataNode dn : cluster.getDataNodes()) {
      ReplicaBeingWritten rbw = dn.data.getReplicaBeingWritten(
          fs.dfs.getNamespaceId(), is.getCurrentBlock());
      if (rbw != null) {
        rbw.setBytesAcked(len1 - SIZE_TO_SHRINK);
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.