Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.ColumnFamilyStore


                String tableName;
                String[] pieces = FBUtilities.strip(distinctEntry, "-");
                tableName = pieces[0];
                Table table = Table.open( tableName );

                ColumnFamilyStore cfStore = table.getColumnFamilyStore(pieces[1]);
                if (logger.isDebugEnabled())
                  logger.debug("Generating file name for " + distinctEntry + " ...");
                fileNames.put(distinctEntry, cfStore.getTempSSTableFileName());
            }

            return fileNames;
        }
View Full Code Here


        session.closeIfFinished();
    }

    private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException
    {
        ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
        DecoratedKey key;
        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = null;

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
View Full Code Here

    public void testValidationMultipleSSTablePerLevel() throws Exception
    {
        String ksname = "Keyspace1";
        String cfname = "StandardLeveled";
        Table table = Table.open(ksname);
        ColumnFamilyStore store = table.getColumnFamilyStore(cfname);

        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files

        // Enough data to have a level 1 and 2
        int rows = 20;
        int columns = 10;

        // Adds enough data to trigger multiple sstable per level
        for (int r = 0; r < rows; r++)
        {
            DecoratedKey key = Util.dk(String.valueOf(r));
            RowMutation rm = new RowMutation(ksname, key.key);
            for (int c = 0; c < columns; c++)
            {
                rm.add(new QueryPath(cfname, null, ByteBufferUtil.bytes("column" + c)), value, 0);
            }
            rm.apply();
            store.forceBlockingFlush();
        }

        LeveledCompactionStrategy strat = (LeveledCompactionStrategy)store.getCompactionStrategy();

        while (strat.getLevelSize(0) > 1)
        {
            store.forceMajorCompaction();
            Thread.sleep(200);
        }
        // Checking we're not completely bad at math
        assert strat.getLevelSize(1) > 0;
        assert strat.getLevelSize(2) > 0;
View Full Code Here

    public void testCompactionProgress() throws Exception
    {
        String ksname = "Keyspace1";
        String cfname = "StandardLeveled";
        Table table = Table.open(ksname);
        ColumnFamilyStore store = table.getColumnFamilyStore(cfname);

        // make sure we have SSTables in L1
        ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
        int rows = 2;
        int columns = 10;
        for (int r = 0; r < rows; r++)
        {
            DecoratedKey key = Util.dk(String.valueOf(r));
            RowMutation rm = new RowMutation(ksname, key.key);
            for (int c = 0; c < columns; c++)
            {
                rm.add(new QueryPath(cfname, null, ByteBufferUtil.bytes("column" + c)), value, 0);
            }
            rm.apply();
            store.forceBlockingFlush();
        }
        store.forceMajorCompaction();

        LeveledCompactionStrategy strat = (LeveledCompactionStrategy)store.getCompactionStrategy();
        assert strat.getLevelSize(1) > 0;

        // get LeveledScanner for level 1 sstables
        Collection<SSTableReader> sstables = strat.manifest.getLevel(1);
        ICompactionScanner scanner = strat.getScanners(sstables).get(0);
View Full Code Here

            throw new UnsupportedOperationException(String.format("SSTable %s is not compatible with current version %s",
                                                                  remote.getFilename(), Descriptor.Version.CURRENT));

        // new local sstable
        Table table = Table.open(remotedesc.ksname);
        ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
        Directories.DataDirectory localDir = Directories.getLocationCapableOfSize(remote.size);
        if (localDir == null)
            throw new RuntimeException("Insufficient disk space to store " + remote.size + " bytes");
        Descriptor localdesc = Descriptor.fromFilename(cfStore.getTempSSTablePath(cfStore.directories.getLocationForDisk(localDir.location)));

        return new PendingFile(localdesc, remote);
    }
View Full Code Here

    /**
     * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
     */
    private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException
    {
        ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
        DecoratedKey key;
        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE);

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);
            long totalBytesRead = 0;

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                // skip to beginning of section inside chunk
                if (remoteFile.compressionInfo != null)
                    ((CompressedInputStream) underliningStream).position(section.left);
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    if (cfs.containsCachedRow(key) && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.maybeUpdateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    // when compressed, report total bytes of decompressed chunks since remoteFile.size is the sum of chunks transferred
                    remoteFile.progress += remoteFile.compressionInfo != null
View Full Code Here

    }

    /** load keyspace (table) definitions, but do not initialize the table instances. */
    public static void loadSchemas() throws IOException
    {
        ColumnFamilyStore schemaCFS = SystemTable.schemaCFS(SystemTable.SCHEMA_KEYSPACES_CF);

        // if table with definitions is empty try loading the old way
        if (schemaCFS.estimateKeys() == 0)
        {
            // we can load tables from local storage if a version is set in the system table and that actually maps to
            // real data in the definitions table.  If we do end up loading from xml, store the definitions so that we
            // don't load from xml anymore.
            UUID uuid = MigrationManager.getLastMigrationId();
View Full Code Here

    }

    /** load keyspace (table) definitions, but do not initialize the table instances. */
    public static void loadSchemas() throws IOException
    {
        ColumnFamilyStore schemaCFS = SystemTable.schemaCFS(SystemTable.SCHEMA_KEYSPACES_CF);

        // if table with definitions is empty try loading the old way
        if (schemaCFS.estimateKeys() == 0)
        {
            // we can load tables from local storage if a version is set in the system table and that actually maps to
            // real data in the definitions table.  If we do end up loading from xml, store the definitions so that we
            // don't load from xml anymore.
            UUID uuid = MigrationManager.getLastMigrationId();
View Full Code Here

            {
                TreeRequest remotereq = this.deserialize(buffer, message.getVersion());
                TreeRequest request = new TreeRequest(remotereq.sessionid, message.getFrom(), remotereq.range, remotereq.cf);

                // trigger readonly-compaction
                ColumnFamilyStore store = Table.open(request.cf.left).getColumnFamilyStore(request.cf.right);
                Validator validator = new Validator(request);
                logger.debug("Queueing validation compaction for " + request);
                CompactionManager.instance.submitValidation(store, validator);
            }
            catch (IOException e)
View Full Code Here

        session.closeIfFinished();
    }

    private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException
    {
        ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
        DecoratedKey key;
        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = null;

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.ColumnFamilyStore

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.