Examples of FSDataInputStreamWrapper


Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * This factory method is used only by unit tests
   */
  static Reader createReaderFromStream(Path path,
      FSDataInputStream fsdis, long size, CacheConfig cacheConf)
      throws IOException {
    FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
    return pickReaderVersion(path, wrapper, size, cacheConf, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

     * A constructor that reads files with the latest minor version.
     * This is used by unit tests only.
     */
    FSReaderV2(FSDataInputStream istream, Algorithm compressAlgo,
        long fileSize) throws IOException {
      this(new FSDataInputStreamWrapper(istream), compressAlgo, fileSize,
           HFileReaderV2.MAX_MINOR_VERSION, null, null);
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile is corrupt/invalid.
   */
  public static Reader createReader(
      FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
    Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
    FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
    return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(), cacheConf, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * This factory method is used only by unit tests
   */
  static Reader createReaderFromStream(Path path,
      FSDataInputStream fsdis, long size, CacheConfig cacheConf)
      throws IOException {
    FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
    return pickReaderVersion(path, wrapper, size, cacheConf, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * @param cacheConf The cache configuration and block cache reference.
   * @return The StoreFile.Reader for the file
   */
  public StoreFile.Reader open(final FileSystem fs,
      final CacheConfig cacheConf) throws IOException {
    FSDataInputStreamWrapper in;
    FileStatus status;

    if (this.link != null) {
      // HFileLink
      in = new FSDataInputStreamWrapper(fs, this.link);
      status = this.link.getFileStatus(fs);
    } else if (this.reference != null) {
      // HFile Reference
      Path referencePath = getReferredToFile(this.getPath());
      in = new FSDataInputStreamWrapper(fs, referencePath);
      status = fs.getFileStatus(referencePath);
    } else {
      in = new FSDataInputStreamWrapper(fs, this.getPath());
      status = fileStatus;
    }
    long length = status.getLen();
    if (this.reference != null) {
      hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(fs, reference, status);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
            algo, totalSize, MINOR_VERSION, fs, path);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        String blockStr = b.toString();

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
              algo, totalSize, MINOR_VERSION, fs, path);
          b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(blockStr, b.toString());
          int wrongCompressedSize = 2172;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
              algo, totalSize, MINOR_VERSION, fs, path);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   */
  public static Reader createReaderWithEncoding(
      FileSystem fs, Path path, CacheConfig cacheConf,
      DataBlockEncoding preferredEncodingInCache) throws IOException {
    final boolean closeIStream = true;
    FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
    return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
        cacheConf, preferredEncodingInCache, stream.getHfs());
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

   * This factory method is used only by unit tests
   */
  static Reader createReaderFromStream(Path path,
      FSDataInputStream fsdis, long size, CacheConfig cacheConf)
      throws IOException {
    FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
    return pickReaderVersion(path, wrapper, size, cacheConf, DataBlockEncoding.NONE, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.FSDataInputStreamWrapper

        // Use hbase checksums.
        assertEquals(true, hfs.useHBaseChecksum());

        // Do a read that purposely introduces checksum verification failures.
        FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
        HFileBlock.FSReader hbr = new FSReaderV2Test(is, algo,
            totalSize, HFile.MAX_FORMAT_VERSION, fs, path);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        // read data back from the hfile, exclude header and checksum
        ByteBuffer bb = b.getBufferWithoutHeader(); // read back data
        DataInputStream in = new DataInputStream(
                               new ByteArrayInputStream(
                                 bb.array(), bb.arrayOffset(), bb.limit()));

        // assert that we encountered hbase checksum verification failures
        // but still used hdfs checksums and read data successfully.
        assertEquals(1, HFile.getChecksumFailuresCount());
        validateData(in);

        // A single instance of hbase checksum failure causes the reader to
        // switch off hbase checksum verification for the next 100 read
        // requests. Verify that this is correct.
        for (int i = 0; i <
             HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
          b = hbr.readBlockData(0, -1, -1, pread);
          assertEquals(0, HFile.getChecksumFailuresCount());
        }
        // The next read should have hbase checksum verification reanabled,
        // we verify this by assertng that there was a hbase-checksum failure.
        b = hbr.readBlockData(0, -1, -1, pread);
        assertEquals(1, HFile.getChecksumFailuresCount());

        // Since the above encountered a checksum failure, we switch
        // back to not checking hbase checksums.
        b = hbr.readBlockData(0, -1, -1, pread);
        assertEquals(0, HFile.getChecksumFailuresCount());
        is.close();

        // Now, use a completely new reader. Switch off hbase checksums in
        // the configuration. In this case, we should not detect
        // any retries within hbase.
        HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
        assertEquals(false, newfs.useHBaseChecksum());
        is = new FSDataInputStreamWrapper(newfs, path);
        hbr = new FSReaderV2Test(is, algo,
            totalSize, HFile.MAX_FORMAT_VERSION, newfs, path);
        b = hbr.readBlockData(0, -1, -1, pread);
        is.close();
        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        // read data back from the hfile, exclude header and checksum
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.