Examples of FSDataInputStreamWrapper


Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

      System.out.println("Block Headers:");
      /*
       * TODO: this same/similar block iteration logic is used in HFileBlock#blockRange and
       * TestLazyDataBlockDecompression. Refactor?
       */
      FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file);
      long fileSize = fs.getFileStatus(file).getLen();
      FixedFileTrailer trailer =
        FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
      long offset = trailer.getFirstDataBlockOffset(),
        max = trailer.getLastDataBlockOffset();
      HFileBlock block;
      while (offset <= max) {
        block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false,
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

    /**
     * A constructor that reads files with the latest minor version.
     * This is used by unit tests only.
     */
    FSReaderV2(FSDataInputStream istream, long fileSize, HFileContext fileContext) throws IOException {
      this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext);
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * @param cacheConf The cache configuration and block cache reference.
   * @return The StoreFile.Reader for the file
   */
  public StoreFile.Reader open(final FileSystem fs,
      final CacheConfig cacheConf) throws IOException {
    FSDataInputStreamWrapper in;
    FileStatus status;

    if (this.link != null) {
      // HFileLink
      in = new FSDataInputStreamWrapper(fs, this.link);
      status = this.link.getFileStatus(fs);
    } else if (this.reference != null) {
      // HFile Reference
      Path referencePath = getReferredToFile(this.getPath());
      in = new FSDataInputStreamWrapper(fs, referencePath);
      status = fs.getFileStatus(referencePath);
    } else {
      in = new FSDataInputStreamWrapper(fs, this.getPath());
      status = fileStatus;
    }
    long length = status.getLen();
    if (this.reference != null) {
      hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(fs, reference, status);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

                           .withHBaseCheckSum(false)
                           .withIncludesMvcc(includesMemstoreTS)
                           .withIncludesTags(includesTag)
                           .withCompression(algo)
                           .build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
            totalSize, fs, path, meta);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        HFileBlock expected = b;

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is), totalSize, fs, path,
              meta);
          b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(expected, b);
          int wrongCompressedSize = 2172;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

                              .withHBaseCheckSum(false)
                              .withIncludesMvcc(includesMemstoreTS)
                              .withIncludesTags(includesTag)
                              .withCompression(algo)
                              .build();
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
              totalSize, fs, path, meta);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

  /**
   * Read all blocks from {@code path} to populate {@code blockCache}.
   */
  private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, FileSystem fs,
      Path path, HFileContext cxt) throws IOException {
    FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path);
    long fileSize = fs.getFileStatus(path).getLen();
    FixedFileTrailer trailer =
      FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
    HFileReaderV2 reader = new HFileReaderV2(path, trailer, fsdis, fileSize, cacheConfig,
      fsdis.getHfs(), conf);
    reader.loadFileInfo();
    long offset = trailer.getFirstDataBlockOffset(),
      max = trailer.getLastDataBlockOffset();
    List<HFileBlock> blocks = new ArrayList<HFileBlock>(4);
    HFileBlock block;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

        // Use hbase checksums.
        assertEquals(true, hfs.useHBaseChecksum());

        // Do a read that purposely introduces checksum verification failures.
        FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
        meta = new HFileContextBuilder()
              .withCompression(algo)
              .withIncludesMvcc(true)
              .withIncludesTags(useTags)
              .withHBaseCheckSum(true)
              .build();
        HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        // read data back from the hfile, exclude header and checksum
        ByteBuffer bb = b.unpack(meta, hbr).getBufferWithoutHeader(); // read back data
        DataInputStream in = new DataInputStream(
                               new ByteArrayInputStream(
                                 bb.array(), bb.arrayOffset(), bb.limit()));

        // assert that we encountered hbase checksum verification failures
        // but still used hdfs checksums and read data successfully.
        assertEquals(1, HFile.getChecksumFailuresCount());
        validateData(in);

        // A single instance of hbase checksum failure causes the reader to
        // switch off hbase checksum verification for the next 100 read
        // requests. Verify that this is correct.
        for (int i = 0; i <
             HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
          b = hbr.readBlockData(0, -1, -1, pread);
          assertEquals(0, HFile.getChecksumFailuresCount());
        }
        // The next read should have hbase checksum verification reanabled,
        // we verify this by assertng that there was a hbase-checksum failure.
        b = hbr.readBlockData(0, -1, -1, pread);
        assertEquals(1, HFile.getChecksumFailuresCount());

        // Since the above encountered a checksum failure, we switch
        // back to not checking hbase checksums.
        b = hbr.readBlockData(0, -1, -1, pread);
        assertEquals(0, HFile.getChecksumFailuresCount());
        is.close();

        // Now, use a completely new reader. Switch off hbase checksums in
        // the configuration. In this case, we should not detect
        // any retries within hbase.
        HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
        assertEquals(false, newfs.useHBaseChecksum());
        is = new FSDataInputStreamWrapper(newfs, path);
        hbr = new FSReaderV2Test(is, totalSize, newfs, path, meta);
        b = hbr.readBlockData(0, -1, -1, pread);
        is.close();
        b.sanityCheck();
        b = b.unpack(meta, hbr);
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

               .withIncludesMvcc(true)
               .withIncludesTags(useTags)
               .withHBaseCheckSum(true)
               .withBytesPerCheckSum(bytesPerChecksum)
               .build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(
            is, nochecksum), totalSize, hfs, path, meta);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();
        b.sanityCheck();
        assertEquals(dataSize, b.getUncompressedSizeWithoutHeader());
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * @param cacheConf The cache configuration and block cache reference.
   * @return The StoreFile.Reader for the file
   */
  public StoreFile.Reader open(final FileSystem fs,
      final CacheConfig cacheConf) throws IOException {
    FSDataInputStreamWrapper in;
    FileStatus status;

    if (this.link != null) {
      // HFileLink
      in = new FSDataInputStreamWrapper(fs, this.link);
      status = this.link.getFileStatus(fs);
    } else if (this.reference != null) {
      // HFile Reference
      Path referencePath = getReferredToFile(this.getPath());
      in = new FSDataInputStreamWrapper(fs, referencePath);
      status = fs.getFileStatus(referencePath);
    } else {
      in = new FSDataInputStreamWrapper(fs, this.getPath());
      status = fileStatus;
    }
    long length = status.getLen();
    if (this.reference != null) {
      hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(fs, reference, status);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile is corrupt/invalid.
   */
  public static Reader createReader(
      FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) throws IOException {
    Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
    FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
    return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
      cacheConf, stream.getHfs(), conf);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.