Examples of StripeInformationProvider


Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  @Test
  public void testNoStripesFromFlush() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, true);
    StripeCompactionPolicy policy = createPolicy(conf);
    StripeInformationProvider si = createStripesL0Only(0, 0);

    KeyValue[] input = new KeyValue[] { KV_A, KV_B, KV_C, KV_D, KV_E };
    KeyValue[][] expected = new KeyValue[][] { input };
    verifyFlush(policy, si, input, expected, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  }

  @Test
  public void testOldStripesFromFlush() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeInformationProvider si = createStripes(0, KEY_C, KEY_D);

    KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
    KeyValue[][] expected = new KeyValue[][] { new KeyValue[] { KV_B },
        new KeyValue[] { KV_C, KV_C }, new KeyValue[] {  KV_D, KV_E } };
    verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, KEY_C, KEY_D, OPEN_KEY });
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  }

  @Test
  public void testNewStripesFromFlush() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeInformationProvider si = createStripesL0Only(0, 0);
    KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
    // Starts with one stripe; unlike flush results, must have metadata
    KeyValue[][] expected = new KeyValue[][] { input };
    verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, OPEN_KEY });
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

        return needsSingleStripeCompaction(si);
      }
    };

    // No compaction due to min files or ratio
    StripeInformationProvider si = createStripesWithSizes(0, 0,
        new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L });
    verifyNoCompaction(policy, si);
    // No compaction due to min files or ratio - will report needed, but not do any.
    si = createStripesWithSizes(0, 0,
        new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L, 1L });
    assertNull(policy.selectCompaction(si, al(), false));
    assertTrue(policy.needsCompactions(si, al()));
    // One stripe has possible compaction
    si = createStripesWithSizes(0, 0,
        new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 4L, 3L });
    verifySingleStripeCompaction(policy, si, 2, null);
    // Several stripes have possible compactions; choose best quality (removes most files)
    si = createStripesWithSizes(0, 0,
        new Long[] { 3L, 2L, 2L }, new Long[] { 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L, 1L });
    verifySingleStripeCompaction(policy, si, 2, null);
    si = createStripesWithSizes(0, 0,
        new Long[] { 5L }, new Long[] { 3L, 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L });
    verifySingleStripeCompaction(policy, si, 1, null);
    // Or with smallest files, if the count is the same
    si = createStripesWithSizes(0, 0,
        new Long[] { 3L, 3L, 3L }, new Long[] { 3L, 1L, 2L }, new Long[] { 3L, 2L, 2L });
    verifySingleStripeCompaction(policy, si, 1, null);
    // Verify max count is respected.
    si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 5L, 4L, 4L, 4L, 4L });
    List<StoreFile> sfs = si.getStripes().get(1).subList(1, 5);
    verifyCompaction(policy, si, sfs, null, 1, null, si.getStartRow(1), si.getEndRow(1), true);
    // Verify ratio is applied.
    si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 50L, 4L, 4L, 4L, 4L });
    sfs = si.getStripes().get(1).subList(1, 5);
    verifyCompaction(policy, si, sfs, null, 1, null, si.getStartRow(1), si.getEndRow(1), true);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  public void testWithReferences() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeCompactor sc = mock(StripeCompactor.class);
    StoreFile ref = createFile();
    when(ref.isReference()).thenReturn(true);
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    Collection<StoreFile> sfs = al(ref, createFile());
    when(si.getStorefiles()).thenReturn(sfs);

    assertTrue(policy.needsCompactions(si, al()));
    StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
    assertEquals(si.getStorefiles(), scr.getRequest().getFiles());
    scr.execute(sc);
    verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(),
        aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY));
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  public void testSingleStripeDropDeletes() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    StripeCompactionPolicy policy = createPolicy(conf);
    // Verify the deletes can be dropped if there are no L0 files.
    Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
    StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
    verifySingleStripeCompaction(policy, si, 0, true);
    // But cannot be dropped if there are.
    si = createStripesWithSizes(2, 2, stripes);
    verifySingleStripeCompaction(policy, si, 0, false);
    // Unless there are enough to cause L0 compaction.
    si = createStripesWithSizes(6, 2, stripes);
    ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>();
    sfs.addSublist(si.getLevel0Files());
    sfs.addSublist(si.getStripes().get(0));
    verifyCompaction(
        policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
    // If we cannot actually compact all files in some stripe, L0 is chosen.
    si = createStripesWithSizes(6, 2,
        new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
    verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

   */
  private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries,
      List<List<StoreFile>> stripeFiles, List<StoreFile> l0Files) throws Exception {
    ArrayList<ImmutableList<StoreFile>> stripes = new ArrayList<ImmutableList<StoreFile>>();
    ArrayList<byte[]> boundariesList = new ArrayList<byte[]>();
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    if (!stripeFiles.isEmpty()) {
      assert stripeFiles.size() == (boundaries.size() + 1);
      boundariesList.add(OPEN_KEY);
      for (int i = 0; i <= boundaries.size(); ++i) {
        byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
        byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
        boundariesList.add(endKey);
        for (StoreFile sf : stripeFiles.get(i)) {
          setFileStripe(sf, startKey, endKey);
        }
        stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
        when(si.getStartRow(eq(i))).thenReturn(startKey);
        when(si.getEndRow(eq(i))).thenReturn(endKey);
      }
    }
    ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>();
    sfs.addAllSublists(stripes);
    sfs.addSublist(l0Files);
    when(si.getStorefiles()).thenReturn(sfs);
    when(si.getStripes()).thenReturn(stripes);
    when(si.getStripeBoundaries()).thenReturn(boundariesList);
    when(si.getStripeCount()).thenReturn(stripes.size());
    when(si.getLevel0Files()).thenReturn(l0Files);
    return si;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  @Test
  public void testNoStripesFromFlush() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, true);
    StripeCompactionPolicy policy = createPolicy(conf);
    StripeInformationProvider si = createStripesL0Only(0, 0);

    KeyValue[] input = new KeyValue[] { KV_A, KV_B, KV_C, KV_D, KV_E };
    KeyValue[][] expected = new KeyValue[][] { input };
    verifyFlush(policy, si, input, expected, null);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  }

  @Test
  public void testOldStripesFromFlush() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeInformationProvider si = createStripes(0, KEY_C, KEY_D);

    KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
    KeyValue[][] expected = new KeyValue[][] { new KeyValue[] { KV_B },
        new KeyValue[] { KV_C, KV_C }, new KeyValue[] {  KV_D, KV_E } };
    verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, KEY_C, KEY_D, OPEN_KEY });
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider

  }

  @Test
  public void testNewStripesFromFlush() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeInformationProvider si = createStripesL0Only(0, 0);
    KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E };
    // Starts with one stripe; unlike flush results, must have metadata
    KeyValue[][] expected = new KeyValue[][] { input };
    verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, OPEN_KEY });
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.