Package org.apache.cassandra.io.sstable

Examples of org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter


                                             int expectedBloomFilterSize,
                                             long repairedAt,
                                             SSTableReader sstable)
    {
        FileUtils.createDirectory(compactionFileLocation);
        return new SSTableWriter(cfs.getTempSSTablePath(compactionFileLocation),
                                 expectedBloomFilterSize,
                                 repairedAt,
                                 cfs.metadata,
                                 cfs.partitioner,
                                 new MetadataCollector(Collections.singleton(sstable), cfs.metadata.comparator, sstable.getSSTableLevel()));
View Full Code Here


        long totalSize = totalSize();

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

        Directories.DataDirectory localDir = cfs.directories.getWriteableLocation();
        if (localDir == null)
            throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
        desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

        return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys, repairedAt);
    }
View Full Code Here

                // a crash could cause data loss.
                cfs.markCompacted(toCompact, compactionType);
                return;
            }

            SSTableWriter writer = cfs.createCompactionWriter(keysPerSSTable, cfs.directories.getLocationForDisk(dataDirectory), toCompact);
            writers.add(writer);
            while (nni.hasNext())
            {
                if (ci.isStopRequested())
                    throw new CompactionInterruptedException(ci.getCompactionInfo());

                AbstractCompactedRow row = nni.next();
                if (row.isEmpty())
                    continue;

                RowIndexEntry indexEntry = writer.append(row);
                totalkeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : toCompact)
                    {
                        if (sstable.getCachedPosition(row.key, false) != null)
                        {
                            cachedKeys.put(row.key, indexEntry);
                            break;
                        }
                    }
                }
                if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer))
                {
                    SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
                    cachedKeyMap.put(toIndex, cachedKeys);
                    sstables.add(toIndex);
                    if (nni.hasNext())
                    {
                        writer = cfs.createCompactionWriter(keysPerSSTable, cfs.directories.getLocationForDisk(dataDirectory), toCompact);
                        writers.add(writer);
                        cachedKeys = new HashMap<DecoratedKey, RowIndexEntry>();
                    }
                }
            }
        }
        catch (Throwable t)
        {
            for (SSTableWriter writer : writers)
                writer.abort();
            throw Throwables.propagate(t);
        }
        finally
        {
            controller.close();
View Full Code Here

        {
            logger.info("Writing " + Memtable.this.toString());

            SSTableReader ssTable;
            // errors when creating the writer that may leave empty temp files.
            SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(dataDirectory)));
            try
            {
                // (we can't clear out the map as-we-go to free up memory,
                //  since the memtable is being used for queries in the "pending flush" category)
                for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
                {
                    ColumnFamily cf = entry.getValue();
                    if (cf.isMarkedForDelete())
                    {
                        // When every node is up, there's no reason to write batchlog data out to sstables
                        // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                        // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                        // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                        // See CASSANDRA-4667.
                        if (cfs.columnFamily.equals(SystemTable.BATCHLOG_CF) && cfs.table.name.equals(Table.SYSTEM_KS) && !cf.isEmpty())
                            continue;

                        // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                        // But it can result in unexpected behaviour where deletes never make it to disk,
                        // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                        // is a CF level tombstone to ensure the delete makes it into an SSTable.
                        ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                    }
                    writer.append((DecoratedKey)entry.getKey(), cf);
                }

                if (writer.getFilePointer() > 0)
                {
                    ssTable = writer.closeAndOpenReader();
                    logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                              ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
                }
                else
                {
                    writer.abort();
                    ssTable = null;
                    logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                                context.get());
                }
                return ssTable;
            }
            catch (Throwable e)
            {
                writer.abort();
                throw Throwables.propagate(e);
            }
        }
View Full Code Here

        }

        public SSTableWriter createFlushWriter(String filename) throws ExecutionException, InterruptedException
        {
            SSTableMetadata.Collector sstableMetadataCollector = SSTableMetadata.createCollector().replayPosition(context.get());
            return new SSTableWriter(filename,
                                     columnFamilies.size(),
                                     cfs.metadata,
                                     cfs.partitioner,
                                     sstableMetadataCollector);
        }
View Full Code Here

            keySize += key.key.remaining();
        long estimatedSize = (long) ((keySize // index entries
                                      + keySize // keys in data file
                                      + currentThroughput.get()) // data
                                     * 1.2); // bloom filter and row index overhead
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize);

        // (we can't clear out the map as-we-go to free up memory,
        //  since the memtable is being used for queries in the "pending flush" category)
        for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
            writer.append(entry.getKey(), entry.getValue());

        SSTableReader ssTable = writer.closeAndOpenReader();
        logger.info(String.format("Completed flushing %s (%d bytes)",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length()));
        return ssTable;
    }
View Full Code Here

            ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfmeta);
            cf.addColumn(Util.column("01", "a", 1)); // this must not resurrect
            cf.addColumn(Util.column("a", "a", 3));
            cf.deletionInfo().add(new RangeTombstone(Util.cellname("0"), Util.cellname("b"), 2, (int) (System.currentTimeMillis()/1000)),cfmeta.comparator);

            SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
                                                     0,
                                                     0,
                                                     cfs.metadata,
                                                     StorageService.getPartitioner(),
                                                     new MetadataCollector(cfs.metadata.comparator));


            writer.append(Util.dk("0"), cf);
            writer.append(Util.dk("1"), cf);
            writer.append(Util.dk("3"), cf);

            cfs.addSSTable(writer.closeAndOpenReader());
            writer = new SSTableWriter(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
                                       0,
                                       0,
                                       cfs.metadata,
                                       StorageService.getPartitioner(),
                                       new MetadataCollector(cfs.metadata.comparator));

            writer.append(Util.dk("0"), cf);
            writer.append(Util.dk("1"), cf);
            writer.append(Util.dk("2"), cf);
            writer.append(Util.dk("3"), cf);
            cfs.addSSTable(writer.closeAndOpenReader());

            Collection<SSTableReader> toCompact = cfs.getSSTables();
            assert toCompact.size() == 2;

            // forcing lazy comapction
View Full Code Here

        long totalSize = totalSize();

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

        Directories.DataDirectory localDir = cfs.directories.getWriteableLocation();
        if (localDir == null)
            throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
        desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

        return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys, repairedAt);
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.