Package org.apache.cassandra.io.util

Examples of org.apache.cassandra.io.util.RandomAccessReader


    {
        logger.info("Replaying " + file.getPath());
        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
        final long segment = desc.id;
        int version = desc.getMessagingVersion();
        RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()), true);
        try
        {
            assert reader.length() <= Integer.MAX_VALUE;
            int replayPosition;
            if (globalPosition.segment < segment)
            {
                replayPosition = 0;
            }
            else if (globalPosition.segment == segment)
            {
                replayPosition = globalPosition.position;
            }
            else
            {
                logger.debug("skipping replay of fully-flushed {}", file);
                return;
            }

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + replayPosition);
            reader.seek(replayPosition);

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF())
            {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                int serializedSize;
                try
                {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    if (serializedSize == CommitLog.END_OF_SEGMENT_MARKER)
                    {
                        logger.debug("Encountered end of segment marker at " + reader.getFilePointer());
                        break;
                    }

                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Table and Key (including the
                    // 2-byte length from writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;
                    long claimedSizeChecksum = reader.readLong();
                    checksum.reset();
                    checksum.update(serializedSize);
                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully. that's
                               // ok.

                    if (serializedSize > buffer.length)
                        buffer = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(buffer, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                }
                catch (EOFException eof)
                {
                    break; // last CL entry didn't get completely written. that's ok.
                }

                checksum.update(buffer, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue())
                {
                    // this entry must not have been fsynced. probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
                RowMutation rm;
                try
                {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version. so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer.deserialize(new DataInputStream(bufIn), version, IColumnSerializer.Flag.LOCAL);
                }
                catch (UnknownColumnFamilyException ex)
                {
                    if (ex.cfId == null)
                        continue;
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null)
                    {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    }
                    else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies().iterator(), ", ")
                            + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
View Full Code Here


    {
        logger.info("Replaying " + file.getPath());
        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
        final long segment = desc.id;
        int version = desc.getMessagingVersion();
        RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()));
        try
        {
            assert reader.length() <= Integer.MAX_VALUE;
            int replayPosition;
            if (globalPosition.segment < segment)
            {
                replayPosition = 0;
            }
            else if (globalPosition.segment == segment)
            {
                replayPosition = globalPosition.position;
            }
            else
            {
                logger.debug("skipping replay of fully-flushed {}", file);
                return;
            }

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + replayPosition);
            reader.seek(replayPosition);

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF())
            {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                int serializedSize;
                try
                {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    if (serializedSize == CommitLog.END_OF_SEGMENT_MARKER)
                    {
                        logger.debug("Encountered end of segment marker at " + reader.getFilePointer());
                        break;
                    }

                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Keyspace and Key (including the
                    // 2-byte length from writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;

                    long claimedSizeChecksum = reader.readLong();
                    checksum.reset();
                    if (version < CommitLogDescriptor.VERSION_20)
                        checksum.update(serializedSize);
                    else
                        FBUtilities.updateChecksumInt(checksum, serializedSize);

                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully. that's
                               // ok.

                    if (serializedSize > buffer.length)
                        buffer = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(buffer, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                }
                catch (EOFException eof)
                {
                    break; // last CL entry didn't get completely written. that's ok.
                }

                checksum.update(buffer, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue())
                {
                    // this entry must not have been fsynced. probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
                RowMutation rm;
                try
                {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version. so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer.deserialize(new DataInputStream(bufIn), version, ColumnSerializer.Flag.LOCAL);
                }
                catch (UnknownColumnFamilyException ex)
                {
                    if (ex.cfId == null)
                        continue;
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null)
                    {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    }
                    else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getKeyspaceName(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies().iterator(), ", ")
                            + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
View Full Code Here

        Queue<RandomAccessReader> instances = getCacheFor(path);

        if (instances == null)
            return null;

        RandomAccessReader result = instances.poll();

        if (result != null)
            metrics.hits.mark();

        return result;
View Full Code Here

            writer.resetAndTruncate(mark);
            writer.write("brown fox jumps over the lazy dog".getBytes());
            writer.close();

            assert f.exists();
            RandomAccessReader reader = compressed
                                      ? CompressedRandomAccessReader.open(filename, new CompressionMetadata(filename + ".metadata", f.length(), true))
                                      : RandomAccessReader.open(f);
            String expected = "The quick brown fox jumps over the lazy dog";
            assertEquals(expected.length(), reader.length());
            byte[] b = new byte[expected.length()];
            reader.readFully(b);
            assert new String(b).equals(expected) : "Expecting '" + expected + "', got '" + new String(b) + "'";
        }
        finally
        {
            // cleanup
View Full Code Here

        // open compression metadata and get chunk information
        CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
        CompressionMetadata.Chunk chunk = meta.chunkFor(0);

        RandomAccessReader reader = CompressedRandomAccessReader.open(file.getPath(), meta);
        // read and verify compressed data
        assertEquals(CONTENT, reader.readLine());
        // close reader
        reader.close();

        Random random = new Random();
        RandomAccessFile checksumModifier = null;

        try
        {
            checksumModifier = new RandomAccessFile(file, "rw");
            byte[] checksum = new byte[4];

            // seek to the end of the compressed chunk
            checksumModifier.seek(chunk.length);
            // read checksum bytes
            checksumModifier.read(checksum);
            // seek back to the chunk end
            checksumModifier.seek(chunk.length);

            // lets modify one byte of the checksum on each iteration
            for (int i = 0; i < checksum.length; i++)
            {
                checksumModifier.write(random.nextInt());
                checksumModifier.getFD().sync(); // making sure that change was synced with disk

                final RandomAccessReader r = CompressedRandomAccessReader.open(file.getPath(), meta);

                Throwable exception = null;
                try
                {
                    r.readLine();
                }
                catch (Throwable t)
                {
                    exception = t;
                }
                assertNotNull(exception);
                assertEquals(exception.getClass(), CorruptSSTableException.class);
                assertEquals(exception.getCause().getClass(), CorruptBlockException.class);

                r.close();
            }

            // lets write original checksum and check if we can read data
            updateChecksum(checksumModifier, chunk.length, checksum);
View Full Code Here

     * @throws IOException on failure to read/write input/output
     */
    public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes) throws IOException
    {
        SSTableReader sstable = SSTableReader.open(desc);
        RandomAccessReader dfile = sstable.openDataReader();

        IPartitioner<?> partitioner = sstable.partitioner;

        if (excludes != null)
            toExport.removeAll(Arrays.asList(excludes));

        outs.println("[");

        int i = 0;

        // last key to compare order
        DecoratedKey lastKey = null;

        for (String key : toExport)
        {
            DecoratedKey decoratedKey = partitioner.decorateKey(hexToBytes(key));

            if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
                throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

            lastKey = decoratedKey;

            RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
            if (entry == null)
                continue;

            dfile.seek(entry.position);
            ByteBufferUtil.readWithShortLength(dfile); // row key
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
            Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);

            checkStream(outs);
View Full Code Here

     * @throws IOException
     */
    private void buildSummary(boolean recreateBloomFilter, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder, boolean summaryLoaded, int samplingLevel) throws IOException
    {
        // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
        RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));

        try
        {
            long indexSize = primaryIndex.length();
            long histogramCount = sstableMetadata.estimatedRowSize.count();
            long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
                               ? histogramCount
                               : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional

            if (recreateBloomFilter)
                bf = FilterFactory.getFilter(estimatedKeys, metadata.getBloomFilterFpChance(), true);

            IndexSummaryBuilder summaryBuilder = null;
            if (!summaryLoaded)
                summaryBuilder = new IndexSummaryBuilder(estimatedKeys, metadata.getMinIndexInterval(), samplingLevel);

            long indexPosition;
            while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
            {
                ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
                RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
                DecoratedKey decoratedKey = partitioner.decorateKey(key);
                if (first == null)
View Full Code Here

    }

    private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
    {
        // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
        RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
        try
        {
            long indexSize = primaryIndex.length();
            IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel);

            long indexPosition;
            while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
            {
                summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
                RowIndexEntry.Serializer.skip(primaryIndex);
            }
View Full Code Here

        // loop through each row, deserializing to check for damage.
        // we'll also loop through the index at the same time, using the position from the index to recover if the
        // row header (key or data size) is corrupt. (This means our position in the index file will be one row
        // "ahead" of the data file.)
        final RandomAccessReader dataFile = sstable.openDataReader(true);
        RandomAccessReader indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
        ScrubInfo scrubInfo = new ScrubInfo(dataFile, sstable);
        executor.beginCompaction(scrubInfo);

        SSTableWriter writer = null;
        SSTableReader newSstable = null;
        int goodRows = 0, badRows = 0, emptyRows = 0;

        try
        {
            ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
            {
                // throw away variable so we don't have a side effect in the assert
                long firstRowPositionFromIndex = indexFile.readLong();
                assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
            }

            // TODO errors when creating the writer may leave empty temp files.
            writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable));

            while (!dataFile.isEOF())
            {
                long rowStart = dataFile.getFilePointer();
                if (logger.isDebugEnabled())
                    logger.debug("Reading row at " + rowStart);

                DecoratedKey key = null;
                long dataSize = -1;
                try
                {
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile));
                    dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                    if (logger.isDebugEnabled())
                        logger.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
                }
                catch (Throwable th)
                {
                    throwIfFatal(th);
                    // check for null key below
                }

                ByteBuffer currentIndexKey = nextIndexKey;
                long nextRowPositionFromIndex;
                try
                {
                    nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                    nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
                }
                catch (Throwable th)
                {
                    logger.warn("Error reading index file", th);
                    nextIndexKey = null;
                    nextRowPositionFromIndex = dataFile.length();
                }

                long dataStart = dataFile.getFilePointer();
                long dataStartFromIndex = currentIndexKey == null
                                        ? -1
                                        : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
                long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
                assert currentIndexKey != null || indexFile.isEOF();
                if (logger.isDebugEnabled() && currentIndexKey != null)
                    logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey),  dataSizeFromIndex));

                writer.mark();
                try
View Full Code Here

        // loop through each row, deserializing to check for damage.
        // we'll also loop through the index at the same time, using the position from the index to recover if the
        // row header (key or data size) is corrupt. (This means our position in the index file will be one row
        // "ahead" of the data file.)
        final RandomAccessReader dataFile = sstable.openDataReader(true);
        RandomAccessReader indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
        ScrubInfo scrubInfo = new ScrubInfo(dataFile, sstable);
        executor.beginCompaction(scrubInfo);

        SSTableWriter writer = null;
        SSTableReader newSstable = null;
        int goodRows = 0, badRows = 0, emptyRows = 0;

        try
        {
            ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
            {
                // throw away variable so we don't have a side effect in the assert
                long firstRowPositionFromIndex = indexFile.readLong();
                assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
            }

            // TODO errors when creating the writer may leave empty temp files.
            writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable));

            while (!dataFile.isEOF())
            {
                if (scrubInfo.isStopped())
                    throw new CompactionInterruptedException(scrubInfo.getCompactionInfo());
                long rowStart = dataFile.getFilePointer();
                if (logger.isDebugEnabled())
                    logger.debug("Reading row at " + rowStart);

                DecoratedKey key = null;
                long dataSize = -1;
                try
                {
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile));
                    dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                    if (logger.isDebugEnabled())
                        logger.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
                }
                catch (Throwable th)
                {
                    throwIfFatal(th);
                    // check for null key below
                }

                ByteBuffer currentIndexKey = nextIndexKey;
                long nextRowPositionFromIndex;
                try
                {
                    nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                    nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
                }
                catch (Throwable th)
                {
                    logger.warn("Error reading index file", th);
                    nextIndexKey = null;
                    nextRowPositionFromIndex = dataFile.length();
                }

                long dataStart = dataFile.getFilePointer();
                long dataStartFromIndex = currentIndexKey == null
                                        ? -1
                                        : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
                long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
                assert currentIndexKey != null || indexFile.isEOF();
                if (logger.isDebugEnabled() && currentIndexKey != null)
                    logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey),  dataSizeFromIndex));

                writer.mark();
                try
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.util.RandomAccessReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.