Package org.apache.cassandra.io.sstable.format

Examples of org.apache.cassandra.io.sstable.format.SSTableReader


        String jsonUrl = resourcePath("SimpleCF.json");
        File tempSS = tempSSTableFile(KEYSPACE1, "AsciiKeys");
        new SSTableImport(true).importJson(jsonUrl, KEYSPACE1, "AsciiKeys", tempSS.getPath());

        // Verify results
        SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
        // check that keys are treated as ascii
        QueryFilter qf = QueryFilter.getIdentityFilter(Util.dk("726f7741", AsciiType.instance), "AsciiKeys", System.currentTimeMillis());
        OnDiskAtomIterator iter = qf.getSSTableColumnIterator(reader);
        assert iter.hasNext(); // "ascii" key exists
        QueryFilter qf2 = QueryFilter.getIdentityFilter(Util.dk("726f7741", BytesType.instance), "AsciiKeys", System.currentTimeMillis());
View Full Code Here


        // To ignore current key validator
        System.setProperty("skip.key.validator", "true");
        new SSTableImport(true).importJson(jsonUrl, KEYSPACE1, "AsciiKeys", tempSS.getPath());

        // Verify results
        SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
        // check that keys are treated as bytes
        QueryFilter qf = QueryFilter.getIdentityFilter(Util.dk("rowA"), "AsciiKeys", System.currentTimeMillis());
        OnDiskAtomIterator iter = qf.getSSTableColumnIterator(reader);
        assert iter.hasNext(); // "bytes" key exists
    }
View Full Code Here

    public void shouldImportCqlTable() throws IOException, URISyntaxException
    {
        String jsonUrl = resourcePath("CQLTable.json");
        File tempSS = tempSSTableFile(KEYSPACE1, CQL_TABLE);
        new SSTableImport(true).importJson(jsonUrl, KEYSPACE1, CQL_TABLE, tempSS.getPath());
        SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
        Keyspace.open(KEYSPACE1).getColumnFamilyStore(CQL_TABLE).addSSTable(reader);
       
        UntypedResultSet result = QueryProcessor.executeOnceInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE1, CQL_TABLE));
        assertThat(result.size(), is(2));
        assertThat(result, hasItem(withElements(1, "NY", 1980)));
View Full Code Here

            cf.addColumn(Util.cellname(i), ByteBuffer.allocate(1000), 1);
        File dir = cfs.directories.getDirectoryForNewSSTables();
        SSTableWriter writer = getWriter(cfs, dir);
        for (int i = 0; i < 500; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        SSTableReader s = writer.openEarly(1000);
        assertFileCounts(dir.list(), 2, 3);
        for (int i = 500; i < 1000; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        SSTableReader s2 = writer.openEarly(1000);
        assertTrue(s != s2);
        assertFileCounts(dir.list(), 2, 3);
        s.markObsolete();
        s.releaseReference();
        Thread.sleep(1000);
View Full Code Here

        File dir = cfs.directories.getDirectoryForNewSSTables();
        SSTableWriter writer = getWriter(cfs, dir);

        for (int i = 0; i < 500; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        SSTableReader s = writer.openEarly(1000);
        //assertFileCounts(dir.list(), 2, 3);
        for (int i = 500; i < 1000; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        writer.closeAndOpenReader();
        s.markObsolete();
        s.releaseReference();
        Thread.sleep(1000);
        assertFileCounts(dir.list(), 0, 0);
        validateCFS(cfs);
    }
View Full Code Here

    {
        Keyspace keyspace = Keyspace.open(KEYSPACE);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
        cfs.truncateBlocking();

        SSTableReader s = writeFile(cfs, 1000);
        cfs.addSSTable(s);
        long startStorageMetricsLoad = StorageMetrics.load.count();
        Set<SSTableReader> compacting = Sets.newHashSet(s);
        SSTableRewriter.overrideOpenInterval(10000000);
        SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));

        int files = 1;
        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
                if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
                {
                    rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
                    files++;
                    assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
                    assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.count());
                    assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.count());

                }
            }
        }
        List<SSTableReader> sstables = rewriter.finish();
        cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
        long sum = 0;
        for (SSTableReader x : cfs.getSSTables())
            sum += x.bytesOnDisk();
        assertEquals(sum, cfs.metric.liveDiskSpaceUsed.count());
        assertEquals(startStorageMetricsLoad - s.bytesOnDisk() + sum, StorageMetrics.load.count());
        assertEquals(files, sstables.size());
        assertEquals(files, cfs.getSSTables().size());
        Thread.sleep(1000);
        // tmplink and tmp files should be gone:
        assertEquals(sum, cfs.metric.totalDiskSpaceUsed.count());
View Full Code Here

    {
        Keyspace keyspace = Keyspace.open(KEYSPACE);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
        cfs.truncateBlocking();

        SSTableReader s = writeFile(cfs, 1000);
        cfs.addSSTable(s);

        Set<SSTableReader> compacting = Sets.newHashSet(s);
        SSTableRewriter.overrideOpenInterval(10000000);
        SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));

        int files = 1;
        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
View Full Code Here

    {
        Keyspace keyspace = Keyspace.open(KEYSPACE);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
        cfs.truncateBlocking();

        SSTableReader s = writeFile(cfs, 1000);
        cfs.addSSTable(s);
        long startSize = cfs.metric.liveDiskSpaceUsed.count();
        DecoratedKey origFirst = s.first;
        DecoratedKey origLast = s.last;
        Set<SSTableReader> compacting = Sets.newHashSet(s);
        SSTableRewriter.overrideOpenInterval(10000000);
        SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));

        int files = 1;
        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
View Full Code Here

    {
        Keyspace keyspace = Keyspace.open(KEYSPACE);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
        cfs.truncateBlocking();

        SSTableReader s = writeFile(cfs, 1000);
        cfs.addSSTable(s);

        DecoratedKey origFirst = s.first;
        DecoratedKey origLast = s.last;
        Set<SSTableReader> compacting = Sets.newHashSet(s);
        SSTableRewriter.overrideOpenInterval(10000000);
        SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));

        int files = 1;
        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
View Full Code Here

    {
        Keyspace keyspace = Keyspace.open(KEYSPACE);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
        cfs.truncateBlocking();

        SSTableReader s = writeFile(cfs, 1000);
        cfs.addSSTable(s);

        Set<SSTableReader> compacting = Sets.newHashSet(s);
        SSTableRewriter.overrideOpenInterval(10000000);
        SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));

        int files = 1;
        try (ICompactionScanner scanner = s.getScanner();
             CompactionController controller = new CompactionController(cfs, compacting, 0))
        {
            while(scanner.hasNext())
            {
                rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.format.SSTableReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.