Examples of BlockCache


Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    try {
      processTable(fs, tableDir, log, c, majorCompact);
    } finally {
       log.close();
       // TODO: is this still right?
       BlockCache bc = new CacheConfig(c).getBlockCache();
       if (bc != null) bc.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

      }
    } finally {
      storeFile.closeReader(true);
      exec.shutdown();

      BlockCache c = cacheConf.getBlockCache();
      if (c != null) {
        c.shutdown();
      }
    }
    LOG.info("Worker threads completed: " + numCompleted);
    LOG.info("Worker threads failed: " + numFailed);
    return true;
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    readStoreFile(writer.getPath());
  }

  private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.ROWCOL, null);
    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
    try {
      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");

    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();

    // Let's write a StoreFile with three blocks, with cache on write off
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    readStoreFile(writer.getPath());
  }

  private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.ROWCOL, null);
    store.passSchemaMetricsTo(sf);
    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
    try {
      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    try {
      processTable(fs, tableDir, log, c, majorCompact);
    } finally {
       log.close();
       // TODO: is this still right?
       BlockCache bc = new CacheConfig(c).getBlockCache();
       if (bc != null) bc.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");

    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();

    // Let's write a StoreFile with three blocks, with cache on write off
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

        tableName).getFromOnlineRegions(regionName);
    Store store = region.getStores().values().iterator().next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    BlockCache cache = cacheConf.getBlockCache();

    // establish baseline stats
    long startBlockCount = cache.getBlockCount();
    long startBlockHits = cache.getStats().getHitCount();
    long startBlockMiss = cache.getStats().getMissCount();

    // wait till baseline is stable, (minimal 500 ms)
    for (int i = 0; i < 5; i++) {
      Thread.sleep(100);
      if (startBlockCount != cache.getBlockCount()
          || startBlockHits != cache.getStats().getHitCount()
          || startBlockMiss != cache.getStats().getMissCount()) {
        startBlockCount = cache.getBlockCount();
        startBlockHits = cache.getStats().getHitCount();
        startBlockMiss = cache.getStats().getMissCount();
        i = -1;
      }
    }

    // insert data
    Put put = new Put(ROW);
    put.add(FAMILY, QUALIFIER, data);
    table.put(put);
    assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
    // data was in memstore so don't expect any changes
    assertEquals(startBlockCount, cache.getBlockCount());
    assertEquals(startBlockHits, cache.getStats().getHitCount());
    assertEquals(startBlockMiss, cache.getStats().getMissCount());
    // flush the data
    System.out.println("Flushing cache");
    region.flushcache();
    // expect one more block in cache, no change in hits/misses
    long expectedBlockCount = startBlockCount + 1;
    long expectedBlockHits = startBlockHits;
    long expectedBlockMiss = startBlockMiss;
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // read the data and expect same blocks, one new hit, no misses
    assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // insert a second column, read the row, no new blocks, one new hit
    byte [] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    byte [] data2 = Bytes.add(data, data);
    put = new Put(ROW);
    put.add(FAMILY, QUALIFIER2, data2);
    table.put(put);
    Result r = table.get(new Get(ROW));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // flush, one new block
    System.out.println("Flushing cache");
    region.flushcache();
    assertEquals(++expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // compact, net minus two blocks, two hits, no misses
    System.out.println("Compacting");
    assertEquals(2, store.getNumberOfStoreFiles());
    store.triggerMajorCompaction();
    region.compactStores();
    waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
    assertEquals(1, store.getNumberOfStoreFiles());
    expectedBlockCount -= 2; // evicted two blocks, cached none
    assertEquals(expectedBlockCount, cache.getBlockCount());
    expectedBlockHits += 2;
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    // read the row, this should be a cache miss because we don't cache data
    // blocks on compaction
    r = table.get(new Get(ROW));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
    expectedBlockCount += 1; // cached one data block
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

        .getFlushQueueSize());
    this.metrics.updatesBlockedSeconds.set(updatesBlockedMs/1000);
    final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
    this.metrics.updatesBlockedSecondsHighWater.set(updatesBlockedMsHigherWater/1000);

    BlockCache blockCache = cacheConfig.getBlockCache();
    if (blockCache != null) {
      this.metrics.blockCacheCount.set(blockCache.size());
      this.metrics.blockCacheFree.set(blockCache.getFreeSize());
      this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
      CacheStats cacheStats = blockCache.getStats();
      this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
      this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
      this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
      double ratio = blockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
      ratio = blockCache.getStats().getHitCachingRatio();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatio.set(percent);
      // past N period block cache hit / hit caching ratios
      cacheStats.rollMetricsPeriod();
      ratio = cacheStats.getHitRatioPastNPeriods();
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache

    new HRegionServerCommandLine(regionServerClass).doMain(args);
  }

  @Override
  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws IOException {
    BlockCache c = new CacheConfig(this.conf).getBlockCache();
    return c.getBlockCacheColumnFamilySummaries(this.conf);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.