Package org.apache.hadoop.hdfs.client

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream


    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
   
    if (doRename) {
      fileStr += ".renamed";
      Path renamedPath = new Path(fileStr);
      assertTrue(dfs.rename(filePath, renamedPath));
View Full Code Here


          "Input streams must be types of HdfsDataInputStream");
   
    LocatedBlock lblocks[] = new LocatedBlock[2];

    // Find block in data stream.
    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
    if (dataBlock == null) {
      LOG.error("Error: Current block in data stream is null! ");
      return false;
    }
    DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()};
    lblocks[0] = new LocatedBlock(dataBlock, dataNode);
    LOG.info("Found checksum error in data stream at "
        + dataBlock + " on datanode="
        + dataNode[0]);

    // Find block in checksum stream
    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
    if (sumsBlock == null) {
      LOG.error("Error: Current block in checksum stream is null! ");
      return false;
    }
    DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
    lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
    LOG.info("Found checksum error in checksum stream at "
        + sumsBlock + " on datanode=" + sumsNode[0]);

    // Ask client to delete blocks.
View Full Code Here

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
   
    if (doRename) {
      fileStr += ".renamed";
      Path renamedPath = new Path(fileStr);
      assertTrue(dfs.rename(filePath, renamedPath));
View Full Code Here

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                                 new String[] {"supergroup"});
   
    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   
    final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for(int i = 0; i < expectedsize; i++) {
      Assert.assertEquals((byte)i, (byte)in.read())
    }

    in.close();
  }
View Full Code Here

    // create cluster
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(2).build();
    HdfsDataInputStream in = null;
    try {
      Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      FSDataOutputStream out = dfs.create(path);
      int fileLength = 1030;
      out.write(new byte[fileLength]);
      out.hsync();
      cluster.restartNameNode();
      cluster.waitActive();
      in = (HdfsDataInputStream) dfs.open(path, 1024);
      // Verify the length when we just restart NN. DNs will register
      // immediately.
      Assert.assertEquals(fileLength, in.getVisibleLength());
      cluster.shutdownDataNodes();
      cluster.restartNameNode(false);
      // This is just for ensuring NN started.
      verifyNNIsInSafeMode(dfs);

      try {
        in = (HdfsDataInputStream) dfs.open(path);
        Assert.fail("Expected IOException");
      } catch (IOException e) {
        Assert.assertTrue(e.getLocalizedMessage().indexOf(
            "Name node is in safe mode") >= 0);
      }
    } finally {
      if (null != in) {
        in.close();
      }
      cluster.shutdown();

    }
  }
View Full Code Here

    ClientContext clientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
      assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
   
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);

    actual.limit(3);

    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);

    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
        "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
      int nbytes = stm.read(actual);

      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    if (legacyShortCircuitFails) {
      assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
    stm.close();
  }
View Full Code Here

    }
  }

  private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                           byte[] expected, int readOffset) throws IOException {
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
    IOUtils.skipFully(stm, readOffset);
    try {
      stm.read(actual);
    } catch(UnsupportedOperationException unex) {
      return true;
    }
    return false;
  }
View Full Code Here

    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FSDataInputStream>() {
      @Override
      public FSDataInputStream doCall(final Path p)
          throws IOException, UnresolvedLinkException {
        return new HdfsDataInputStream(
            dfs.open(getPathName(p), bufferSize, verifyChecksum));
      }
      @Override
      public FSDataInputStream next(final FileSystem fs, final Path p)
          throws IOException {
View Full Code Here

          "Input streams must be types of HdfsDataInputStream");
   
    LocatedBlock lblocks[] = new LocatedBlock[2];

    // Find block in data stream.
    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
    if (dataBlock == null) {
      LOG.error("Error: Current block in data stream is null! ");
      return false;
    }
    DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()};
    lblocks[0] = new LocatedBlock(dataBlock, dataNode);
    LOG.info("Found checksum error in data stream at "
        + dataBlock + " on datanode="
        + dataNode[0]);

    // Find block in checksum stream
    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
    if (sumsBlock == null) {
      LOG.error("Error: Current block in checksum stream is null! ");
      return false;
    }
    DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
    lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
    LOG.info("Found checksum error in checksum stream at "
        + sumsBlock + " on datanode=" + sumsNode[0]);

    // Ask client to delete blocks.
View Full Code Here

      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      ByteBuffer result = fsIn.read(null, 4096,
          EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      fsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.client.HdfsDataInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.