Examples of HdfsDataInputStream


Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    // create cluster
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(2).build();
    HdfsDataInputStream in = null;
    try {
      Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
      DistributedFileSystem dfs = cluster
          .getFileSystem();
      FSDataOutputStream out = dfs.create(path);
      int fileLength = 1030;
      out.write(new byte[fileLength]);
      out.hsync();
      cluster.restartNameNode();
      cluster.waitActive();
      in = (HdfsDataInputStream) dfs.open(path, 1024);
      // Verify the length when we just restart NN. DNs will register
      // immediately.
      Assert.assertEquals(fileLength, in.getVisibleLength());
      cluster.shutdownDataNodes();
      cluster.restartNameNode(false);
      // This is just for ensuring NN started.
      verifyNNIsInSafeMode(dfs);

      try {
        in = (HdfsDataInputStream) dfs.open(path);
        Assert.fail("Expected IOException");
      } catch (IOException e) {
        Assert.assertTrue(e.getLocalizedMessage().indexOf(
            "Name node is in safe mode") >= 0);
      }
    } finally {
      if (null != in) {
        in.close();
      }
      cluster.shutdown();

    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      ByteBuffer result = fsIn.read(null, 4096,
          EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      fsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);

      // Try to read 8192, but only get 4096 because of the block size.
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      ByteBuffer result =
        dfsIn.read(null, 8192, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
     
      // Try to read 4097, but only get 4096 because of the block size.
      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      fsIn = fs.open(TEST_PATH);
      byte original[] = new byte[TEST_FILE_LENGTH];
      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      ByteBuffer result;
      try {
        result = dfsIn.read(null, 4097, EnumSet.noneOf(ReadOption.class));
        Assert.fail("expected UnsupportedOperationException");
      } catch (UnsupportedOperationException e) {
        // expected
      }
      result = dfsIn.read(null, 4096, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    // create cluster
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(2).build();
    HdfsDataInputStream in = null;
    try {
      Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test");
      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      FSDataOutputStream out = dfs.create(path);
      int fileLength = 1030;
      out.write(new byte[fileLength]);
      out.hsync();
      cluster.restartNameNode();
      cluster.waitActive();
      in = (HdfsDataInputStream) dfs.open(path, 1024);
      // Verify the length when we just restart NN. DNs will register
      // immediately.
      Assert.assertEquals(fileLength, in.getVisibleLength());
      cluster.shutdownDataNodes();
      cluster.restartNameNode(false);
      // This is just for ensuring NN started.
      verifyNNIsInSafeMode(dfs);

      try {
        in = (HdfsDataInputStream) dfs.open(path);
        Assert.fail("Expected IOException");
      } catch (IOException e) {
        Assert.assertTrue(e.getLocalizedMessage().indexOf(
            "Name node is in safe mode") >= 0);
      }
    } finally {
      if (null != in) {
        in.close();
      }
      cluster.shutdown();

    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      HdfsDataInputStream in = null;
      try {
        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
     
      final long n = length.getValue() != null ?
        Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
        in.getVisibleLength() - offset.getValue();
      return Response.ok(new OpenEntity(in, n, dfsclient)).type(
          MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case GETFILECHECKSUM:
    {
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    return ((HdfsDataInputStream) in).getAllBlocks();
  }

  public static List<LocatedBlock> getAllBlocks(FileSystem fs, Path path)
      throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    return in.getAllBlocks();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FSDataInputStream>() {
      @Override
      public FSDataInputStream doCall(final Path p)
          throws IOException, UnresolvedLinkException {
        return new HdfsDataInputStream(
            dfs.open(getPathName(p), bufferSize, verifyChecksum));
      }
      @Override
      public FSDataInputStream next(final FileSystem fs, final Path p)
          throws IOException {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.