Package org.apache.hadoop.hbase.io.hfile.BlockType

Examples of org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory


    long startTimeNs = System.nanoTime();

    BlockCacheKey cacheKey = new BlockCacheKey(name, offset,
        DataBlockEncoding.NONE, BlockType.META);

    BlockCategory effectiveCategory = BlockCategory.META;
    if (metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_META_KEY) ||
        metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_DATA_KEY)) {
      effectiveCategory = BlockCategory.BLOOM;
    }
View Full Code Here


          // Try and get the block from the block cache.  If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = (HFileBlock)
              cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock, useLock);
          if (cachedBlock != null) {
            BlockCategory blockCategory =
                cachedBlock.getBlockType().getCategory();

            getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);

            if (cachedBlock.getBlockType() == BlockType.DATA) {
              HFile.dataBlockReadCnt.incrementAndGet();
            }

            validateBlockType(cachedBlock, expectedBlockType);

            // Validate encoding type for encoded blocks. We include encoding
            // type in the cache key, and we expect it to match on a cache hit.
            if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA &&
                cachedBlock.getDataBlockEncoding() !=
                    dataBlockEncoder.getEncodingInCache()) {
              throw new IOException("Cached block under key " + cacheKey + " " +
                  "has wrong encoding: " + cachedBlock.getDataBlockEncoding() +
                  " (expected: " + dataBlockEncoder.getEncodingInCache() + ")");
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }

        // Load block from filesystem.
        long startTimeNs = System.nanoTime();
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
            onDiskBlockSize, -1, pread);
        hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
            isCompaction);
        validateBlockType(hfileBlock, expectedBlockType);
        passSchemaMetricsTo(hfileBlock);
        BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

        final long delta = System.nanoTime() - startTimeNs;
        HFile.offerReadLatency(delta, pread);
        getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
View Full Code Here

          // Try and get the block from the block cache.  If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = (HFileBlock)
              cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock, useLock);
          if (cachedBlock != null) {
            BlockCategory blockCategory =
                cachedBlock.getBlockType().getCategory();

            getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);

            if (cachedBlock.getBlockType() == BlockType.DATA) {
              HFile.dataBlockReadCnt.incrementAndGet();
            }

            validateBlockType(cachedBlock, expectedBlockType);

            // Validate encoding type for encoded blocks. We include encoding
            // type in the cache key, and we expect it to match on a cache hit.
            if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA &&
                cachedBlock.getDataBlockEncoding() !=
                    dataBlockEncoder.getEncodingInCache()) {
              throw new IOException("Cached block under key " + cacheKey + " " +
                  "has wrong encoding: " + cachedBlock.getDataBlockEncoding() +
                  " (expected: " + dataBlockEncoder.getEncodingInCache() + ")");
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }

        // Load block from filesystem.
        long startTimeNs = System.nanoTime();
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
            onDiskBlockSize, -1, pread);
        hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
            isCompaction);
        validateBlockType(hfileBlock, expectedBlockType);
        passSchemaMetricsTo(hfileBlock);
        BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

        final long delta = System.nanoTime() - startTimeNs;
        HFile.offerReadLatency(delta, pread);
        getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
View Full Code Here

    long startTimeNs = System.nanoTime();

    BlockCacheKey cacheKey = new BlockCacheKey(name, offset,
        DataBlockEncoding.NONE, BlockType.META);

    BlockCategory effectiveCategory = BlockCategory.META;
    if (metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_META_KEY) ||
        metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_DATA_KEY)) {
      effectiveCategory = BlockCategory.BLOOM;
    }
View Full Code Here

      // Check cache for block. If found return.
      if (cacheConf.isBlockCacheEnabled()) {
        HFileBlock cachedBlock = (HFileBlock)
            cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
        if (cachedBlock != null) {
          BlockCategory blockCategory =
              cachedBlock.getBlockType().getCategory();
          cacheHits.incrementAndGet();

          getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);

          if (cachedBlock.getBlockType() == BlockType.DATA) {
            HFile.dataBlockReadCnt.incrementAndGet();
          }

          validateBlockType(cachedBlock, expectedBlockType);

          // Validate encoding type for encoded blocks. We include encoding
          // type in the cache key, and we expect it to match on a cache hit.
          if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA &&
              cachedBlock.getDataBlockEncoding() !=
              dataBlockEncoder.getEncodingInCache()) {
            throw new IOException("Cached block under key " + cacheKey + " " +
                "has wrong encoding: " + cachedBlock.getDataBlockEncoding() +
                " (expected: " + dataBlockEncoder.getEncodingInCache() + ")");
          }
          return cachedBlock;
        }
        // Carry on, please load.
      }

      // Load block from filesystem.
      long startTimeNs = System.nanoTime();
      HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
          onDiskBlockSize, -1, pread);
      hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
          isCompaction);
      validateBlockType(hfileBlock, expectedBlockType);
      passSchemaMetricsTo(hfileBlock);
      BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

      final long delta = System.nanoTime() - startTimeNs;
      HFile.offerReadLatency(delta, pread);
      getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
View Full Code Here

      // Load block from filesystem.
      long startTimeNs = System.nanoTime();
      HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
          onDiskBlockSize, -1, pread);
      BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

      HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs);
      HFile.readOps.incrementAndGet();

      // Cache the block
View Full Code Here

    long startTimeNs = System.nanoTime();

    BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, offset);

    BlockCategory effectiveCategory = BlockCategory.META;
    if (metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_META_KEY) ||
        metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_DATA_KEY)) {
      effectiveCategory = BlockCategory.BLOOM;
    }
View Full Code Here

          // Try and get the block from the block cache.  If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = (HFileBlock)
              cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock, useLock);
          if (cachedBlock != null) {
            BlockCategory blockCategory =
                cachedBlock.getBlockType().getCategory();

            getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);

            if (cachedBlock.getBlockType() == BlockType.DATA) {
              HFile.dataBlockReadCnt.incrementAndGet();
            }

            validateBlockType(cachedBlock, expectedBlockType);

            // Validate encoding type for encoded blocks. We include encoding
            // type in the cache key, and we expect it to match on a cache hit.
            if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA &&
                cachedBlock.getDataBlockEncoding() !=
                    dataBlockEncoder.getEncodingInCache()) {
              throw new IOException("Cached block under key " + cacheKey + " " +
                  "has wrong encoding: " + cachedBlock.getDataBlockEncoding() +
                  " (expected: " + dataBlockEncoder.getEncodingInCache() + ")");
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }

        // Load block from filesystem.
        long startTimeNs = System.nanoTime();
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
            onDiskBlockSize, -1, pread);
        hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
            isCompaction);
        validateBlockType(hfileBlock, expectedBlockType);
        passSchemaMetricsTo(hfileBlock);
        BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

        final long delta = System.nanoTime() - startTimeNs;
        HFile.offerReadLatency(delta, pread);
        getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
View Full Code Here

      // Check cache for block. If found return.
      if (cacheConf.isBlockCacheEnabled()) {
        HFileBlock cachedBlock = (HFileBlock)
            cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
        if (cachedBlock != null) {
          BlockCategory blockCategory =
              cachedBlock.getBlockType().getCategory();
          cacheHits.incrementAndGet();

          getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);

          if (cachedBlock.getBlockType() == BlockType.DATA) {
            HFile.dataBlockReadCnt.incrementAndGet();
          }

          validateBlockType(cachedBlock, expectedBlockType);

          // Validate encoding type for encoded blocks. We include encoding
          // type in the cache key, and we expect it to match on a cache hit.
          if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA &&
              cachedBlock.getDataBlockEncoding() !=
              dataBlockEncoder.getEncodingInCache()) {
            throw new IOException("Cached block under key " + cacheKey + " " +
                "has wrong encoding: " + cachedBlock.getDataBlockEncoding() +
                " (expected: " + dataBlockEncoder.getEncodingInCache() + ")");
          }
          return cachedBlock;
        }
        // Carry on, please load.
      }

      // Load block from filesystem.
      long startTimeNs = System.nanoTime();
      HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
          onDiskBlockSize, -1, pread);
      hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
          isCompaction);
      validateBlockType(hfileBlock, expectedBlockType);
      passSchemaMetricsTo(hfileBlock);
      BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

      final long delta = System.nanoTime() - startTimeNs;
      HFile.offerReadLatency(delta, pread);
      getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
View Full Code Here

    long startTimeNs = System.nanoTime();

    BlockCacheKey cacheKey = new BlockCacheKey(name, offset,
        DataBlockEncoding.NONE, BlockType.META);

    BlockCategory effectiveCategory = BlockCategory.META;
    if (metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_META_KEY) ||
        metaBlockName.equals(HFileWriterV1.BLOOM_FILTER_DATA_KEY)) {
      effectiveCategory = BlockCategory.BLOOM;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.