Examples of LruBlockCache


Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

   
    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
   
    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);
   
    Runtime runtime = Runtime.getRuntime();
    if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(String.format(
          "Maximum tablet server map memory %,d and block cache sizes %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

   
    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
   
    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);
   
    Runtime runtime = Runtime.getRuntime();
    if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(String.format(
          "Maximum tablet server map memory %,d and block cache sizes %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

   
    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
   
    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);
   
    Runtime runtime = Runtime.getRuntime();
    if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(String.format(
          "Maximum tablet server map memory %,d and block cache sizes %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

   
    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
   
    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);
   
    Runtime runtime = Runtime.getRuntime();
    if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(String.format(
          "Maximum tablet server map memory %,d and block cache sizes %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

    public void openReader() throws IOException {
      byte[] data = baos.toByteArray();
      bais = new SeekableByteArrayInputStream(data);
      in = new FSDataInputStream(bais);
     
      LruBlockCache indexCache = new LruBlockCache(100000000, 100000);
      LruBlockCache dataCache = new LruBlockCache(100000000, 100000);
     
      CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf, dataCache, indexCache);
      reader = new RFile.Reader(_cbr);
      iter = new ColumnFamilySkippingIterator(reader);
     
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

      bais = new SeekableByteArrayInputStream(data);
      in = new FSDataInputStream(bais);
      fileLength = data.length;

      LruBlockCache indexCache = new LruBlockCache(100000000, 100000);
      LruBlockCache dataCache = new LruBlockCache(100000000, 100000);

      CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, fileLength, conf, dataCache, indexCache, AccumuloConfiguration.getDefaultConfiguration());
      reader = new RFile.Reader(_cbr);
      iter = new ColumnFamilySkippingIterator(reader);
View Full Code Here

Examples of org.apache.accumulo.core.file.blockfile.cache.LruBlockCache

    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);

    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);

    Runtime runtime = Runtime.getRuntime();
    if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(String.format(
          "Maximum tablet server map memory %,d and block cache sizes %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.LruBlockCache

    System.err.println("Testing encoded seekers for encoding : " + encoding + ", encodeOnDisk : "
        + encodeOnDisk + ", includeTags : " + includeTags + ", compressTags : " + compressTags);
    if(includeTags) {
      testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
    }
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setBlocksize(BLOCK_SIZE).
        setBloomFilterType(BloomType.NONE).
        setCompressTags(compressTags);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    //write the data, but leave some in the memstore
    doPuts(region);

    //verify correctness when memstore contains data
    doGets(region);

    //verify correctness again after compacting
    region.compactStores();
    doGets(region);


    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.LruBlockCache

  }

  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println("Testing encoded seekers for encoding " + encoding);
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setEncodeOnDisk(encodeOnDisk).
        setBlocksize(BLOCK_SIZE).
        setBloomFilterType(BloomType.NONE);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    //write the data, but leave some in the memstore
    doPuts(region);

    //verify correctness when memstore contains data
    doGets(region);

    //verify correctness again after compacting
    region.compactStores();
    doGets(region);


    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.LruBlockCache

  }

  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println("Testing encoded seekers for encoding " + encoding);
    LruBlockCache cache = (LruBlockCache)
    new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();

    HRegion region = testUtil.createTestRegion(
        TABLE_NAME, new HColumnDescriptor(CF_NAME)
            .setMaxVersions(MAX_VERSIONS)
            .setDataBlockEncoding(encoding)
            .setEncodeOnDisk(encodeOnDisk)
    );

    LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(
        MIN_VALUE_SIZE, MAX_VALUE_SIZE);

    // Write
    for (int i = 0; i < NUM_ROWS; ++i) {
      byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
      for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
        Put put = new Put(key);
        byte[] col = Bytes.toBytes(String.valueOf(j));
        byte[] value = dataGenerator.generateRandomSizeValue(key, col);
        put.add(CF_BYTES, col, value);
        region.put(put);
      }
      if (i % NUM_ROWS_PER_FLUSH == 0) {
        region.flushcache();
      }
    }

    for (int doneCompaction = 0; doneCompaction <= 1; ++doneCompaction) {
      // Read
      for (int i = 0; i < NUM_ROWS; ++i) {
        byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
        for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
          if (VERBOSE) {
            System.err.println("Reading row " + i + ", column " +  j);
          }
          final String qualStr = String.valueOf(j);
          final byte[] qualBytes = Bytes.toBytes(qualStr);
          Get get = new Get(rowKey);
          get.addColumn(CF_BYTES, qualBytes);
          Result result = region.get(get);
          assertEquals(1, result.size());
          byte[] value = result.getValue(CF_BYTES, qualBytes);
          assertTrue(LoadTestKVGenerator.verify(value, rowKey, qualBytes));
        }
      }

      if (doneCompaction == 0) {
        // Compact, then read again at the next loop iteration.
        region.compactStores();
      }
    }

    Map<DataBlockEncoding, Integer> encodingCounts =
        cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.