Examples of LazilyCompactedRow


Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

    }

    @Override
    public AbstractCompactedRow getCompactedRowWriter(CompactionController controller, ImmutableList<OnDiskAtomIterator> onDiskAtomIterators)
    {
        return new LazilyCompactedRow(controller, onDiskAtomIterators);
    }
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

            ICompactionScanner scanner = scanners.scanners.get(0);
            CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
            writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
            while(scanner.hasNext())
            {
                AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
                writer.append(row);
            }
        }
        cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, writer.finish(), OperationType.COMPACTION);
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
View Full Code Here

Examples of org.apache.cassandra.db.compaction.LazilyCompactedRow

        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000)
                {
                    assertEquals(1, cfs.getSSTables().size()); // we dont open small files early ...
                    assertEquals(origFirst, cfs.getSSTables().iterator().next().first); // ... and the first key should stay the same
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.