logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);
SSTableWriter writer;
CompactionController controller = new CompactionController(cfs, sstables, major, gcBefore, false);
CompactionIterator ci = new CompactionIterator(type, sstables, controller); // retain a handle so we can call close()
Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate());
Map<DecoratedKey, Long> cachedKeys = new HashMap<DecoratedKey, Long>();
executor.beginCompaction(ci);
try
{
if (!nni.hasNext())
{
// don't mark compacted in the finally block, since if there _is_ nondeleted data,
// we need to sync it (via closeAndOpen) first, so there is no period during which
// a crash could cause data loss.
cfs.markCompacted(sstables);
return 0;
}
writer = cfs.createCompactionWriter(expectedBloomFilterSize, compactionFileLocation);
while (nni.hasNext())
{
AbstractCompactedRow row = nni.next();
long position = writer.append(row);
totalkeysWritten++;
if (DatabaseDescriptor.getPreheatKeyCache())
{