Examples of BufferedRandomAccessFile


Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

     */
    SSTableScanner(SSTableReader sstable, int bufferSize, boolean skipCache)
    {
        try
        {
            this.file = new BufferedRandomAccessFile(new File(sstable.getFilename()), "r", bufferSize, skipCache);
        }
        catch (IOException e)
        {
            throw new IOError(e);
        }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

     */
    SSTableScanner(SSTableReader sstable, QueryFilter filter, int bufferSize)
    {
        try
        {
            this.file = new BufferedRandomAccessFile(sstable.getFilename(), "r", bufferSize);
        }
        catch (IOException e)
        {
            throw new IOError(e);
        }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

              partitioner,
              SSTable.defaultRowHistogram(),
              SSTable.defaultColumnHistogram());
        iwriter = new IndexWriter(descriptor, partitioner, keyCount);
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        dataFile = new BufferedRandomAccessFile(new File(getFilename()), "rw", BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE, true);
    }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

        return sstable;
    }

    private static void writeStatistics(Descriptor desc, EstimatedHistogram rowSizes, EstimatedHistogram columnnCounts) throws IOException
    {
        BufferedRandomAccessFile out = new BufferedRandomAccessFile(new File(desc.filenameFor(SSTable.COMPONENT_STATS)),
                                                                     "rw",
                                                                     BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE,
                                                                     true);
        EstimatedHistogram.serializer.serialize(rowSizes, out);
        EstimatedHistogram.serializer.serialize(columnnCounts, out);
        out.close();
    }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

        Set<Table> tablesRecovered = new HashSet<Table>();
        List<Future<?>> futures = new ArrayList<Future<?>>();
        for (File file : clogs)
        {
            int bufferSize = (int)Math.min(file.length(), 32 * 1024 * 1024);
            BufferedRandomAccessFile reader = new BufferedRandomAccessFile(file.getAbsolutePath(), "r", bufferSize);

            final CommitLogHeader clHeader;
            try
            {
                clHeader = CommitLogHeader.readCommitLogHeader(reader);
            }
            catch (EOFException eofe)
            {
                logger.info("Attempted to recover an incomplete CommitLogHeader.  Everything is ok, don't panic.");
                continue;
            }

            /* seek to the lowest position where any CF has non-flushed data */
            int lowPos = CommitLogHeader.getLowestPosition(clHeader);
            if (lowPos == 0)
                continue;

            reader.seek(lowPos);
            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + lowPos);

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF())
            {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                byte[] bytes;
                try
                {
                    bytes = new byte[(int) reader.readLong()]; // readlong can throw EOFException too
                    reader.readFully(bytes);
                    claimedCRC32 = reader.readLong();
                }
                catch (EOFException e)
                {
                    // last CL entry didn't get completely written.  that's ok.
                    break;
                }

                ByteArrayInputStream bufIn = new ByteArrayInputStream(bytes);
                Checksum checksum = new CRC32();
                checksum.update(bytes, 0, bytes.length);
                if (claimedCRC32 != checksum.getValue())
                {
                    // this part of the log must not have been fsynced.  probably the rest is bad too,
                    // but just in case there is no harm in trying them.
                    continue;
                }

                /* deserialize the commit log entry */
                final RowMutation rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn));
                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s",
                                                rm.getTable(),
                                                rm.key(),
                                                "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));
                final Table table = Table.open(rm.getTable());
                tablesRecovered.add(table);
                final Collection<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>(rm.getColumnFamilies());
                final long entryLocation = reader.getFilePointer();
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
                        /* remove column families that have already been flushed before applying the rest */
                        for (ColumnFamily columnFamily : columnFamilies)
                        {
                            int id = table.getColumnFamilyId(columnFamily.name());
                            if (!clHeader.isDirty(id) || entryLocation < clHeader.getPosition(id))
                            {
                                rm.removeColumnFamily(columnFamily);
                            }
                        }
                        if (!rm.isEmpty())
                        {
                            Table.open(rm.getTable()).apply(rm, null, false);
                        }
                    }
                };
                futures.add(StageManager.getStage(StageManager.MUTATION_STAGE).submit(runnable));
                if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT)
                {
                    FBUtilities.waitOnFutures(futures);
                    futures.clear();
                }
            }
            reader.close();
            logger.info("Finished reading " + file);
        }

        // wait for all the writes to finish on the mutation stage
        FBUtilities.waitOnFutures(futures);
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

            // loop through each row, deserializing to check for damage.
            // we'll also loop through the index at the same time, using the position from the index to recover if the
            // row header (key or data size) is corrupt. (This means our position in the index file will be one row
            // "ahead" of the data file.)
            final BufferedRandomAccessFile dataFile = BufferedRandomAccessFile.getUncachingReader(sstable.getFilename());
            String indexFilename = sstable.descriptor.filenameFor(Component.PRIMARY_INDEX);
            BufferedRandomAccessFile indexFile = BufferedRandomAccessFile.getUncachingReader(indexFilename);
            ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
            {
                // throw away variable so we don't have a side effect in the assert
                long firstRowPositionFromIndex = indexFile.readLong();
                assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
            }

            SSTableWriter writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null);
            executor.beginCompaction(cfs.columnFamily, new ScrubInfo(dataFile, sstable));
            int goodRows = 0, badRows = 0, emptyRows = 0;

            while (!dataFile.isEOF())
            {
                long rowStart = dataFile.getFilePointer();
                if (logger.isDebugEnabled())
                    logger.debug("Reading row at " + rowStart);

                DecoratedKey key = null;
                long dataSize = -1;
                try
                {
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile));
                    dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                    if (logger.isDebugEnabled())
                        logger.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
                }
                catch (Throwable th)
                {
                    throwIfFatal(th);
                    // check for null key below
                }

                ByteBuffer currentIndexKey = nextIndexKey;
                long nextRowPositionFromIndex;
                try
                {
                    nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                    nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
                }
                catch (Throwable th)
                {
                    logger.warn("Error reading index file", th);
                    nextIndexKey = null;
                    nextRowPositionFromIndex = dataFile.length();
                }

                long dataStart = dataFile.getFilePointer();
                long dataStartFromIndex = currentIndexKey == null
                                        ? -1
                                        : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
                long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
                assert currentIndexKey != null || indexFile.isEOF();
                if (logger.isDebugEnabled() && currentIndexKey != null)
                    logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey),  dataSizeFromIndex));

                writer.mark();
                try
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

              partitioner,
              SSTable.defaultRowHistogram(),
              SSTable.defaultColumnHistogram());
        iwriter = new IndexWriter(descriptor, partitioner, keyCount);
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        dataFile = new BufferedRandomAccessFile(new File(getFilename()), "rw", DatabaseDescriptor.getInMemoryCompactionLimit(), true);
    }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

        {
            if (dfile != null)
                return;
            try
            {
                dfile = new BufferedRandomAccessFile(new File(desc.filenameFor(SSTable.COMPONENT_DATA)), "r", 8 * 1024 * 1024, true);
            }
            catch (IOException e)
            {
                throw new IOError(e);
            }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

        IndexWriter(Descriptor desc, IPartitioner part, long keyCount) throws IOException
        {
            this.desc = desc;
            this.partitioner = part;
            indexFile = new BufferedRandomAccessFile(new File(desc.filenameFor(SSTable.COMPONENT_INDEX)), "rw", 8 * 1024 * 1024, true);
            builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
            summary = new IndexSummary(keyCount);
            bf = BloomFilter.getFilter(keyCount, 15);
        }
View Full Code Here

Examples of org.apache.cassandra.io.util.BufferedRandomAccessFile

    private BloomFilter bf;

    public SSTableWriter(String filename, long keyCount, IPartitioner partitioner) throws IOException
    {
        super(filename, partitioner);
        dataFile = new BufferedRandomAccessFile(path, "rw", (int)(DatabaseDescriptor.getFlushDataBufferSizeInMB() * 1024 * 1024));
        indexFile = new BufferedRandomAccessFile(indexFilename(), "rw", (int)(DatabaseDescriptor.getFlushIndexBufferSizeInMB() * 1024 * 1024));
        bf = BloomFilter.getFilter(keyCount, 15);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.