Package org.apache.cassandra.io.util

Examples of org.apache.cassandra.io.util.RandomAccessReader


     * @throws IOException on any I/O error
     */
    public void write(WritableByteChannel channel) throws IOException
    {
        long totalSize = totalSize();
        RandomAccessReader file = sstable.openDataReader();
        ChecksumValidator validator = null;
        if (new File(sstable.descriptor.filenameFor(Component.CRC)).exists())
            validator = DataIntegrityMetadata.checksumValidator(sstable.descriptor);

        transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];

        // setting up data compression stream
        compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
        long progress = 0L;

        try
        {
            // stream each of the required sections of the file
            for (Pair<Long, Long> section : sections)
            {
                long start = validator == null ? section.left : validator.chunkStart(section.left);
                int skipBytes = (int) (section.left - start);
                // seek to the beginning of the section
                file.seek(start);
                if (validator != null)
                    validator.seek(start);

                // length of the section to read
                long length = section.right - start;
View Full Code Here


     * @throws IOException on failure to read/write input/output
     */
    public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes) throws IOException
    {
        SSTableReader sstable = SSTableReader.open(desc);
        RandomAccessReader dfile = sstable.openDataReader();

        IPartitioner<?> partitioner = sstable.partitioner;

        if (excludes != null)
            toExport.removeAll(Arrays.asList(excludes));

        outs.println("[");

        int i = 0;

        // last key to compare order
        DecoratedKey lastKey = null;

        for (String key : toExport)
        {
            DecoratedKey decoratedKey = partitioner.decorateKey(hexToBytes(key));

            if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
                throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

            lastKey = decoratedKey;

            RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
            if (entry == null)
                continue;

            dfile.seek(entry.position);
            ByteBufferUtil.readWithShortLength(dfile); // row key
            if (sstable.descriptor.version.hasRowSizeAndColumnCount)
                dfile.readLong(); // row size
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
            int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt() : Integer.MAX_VALUE;

            Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, columnCount, sstable.descriptor.version);

            checkStream(outs);

View Full Code Here

    {
        logger.info("Replaying " + file.getPath());
        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
        final long segment = desc.id;
        int version = desc.getMessagingVersion();
        RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()));
        try
        {
            assert reader.length() <= Integer.MAX_VALUE;
            int replayPosition;
            if (globalPosition.segment < segment)
            {
                replayPosition = 0;
            }
            else if (globalPosition.segment == segment)
            {
                replayPosition = globalPosition.position;
            }
            else
            {
                logger.debug("skipping replay of fully-flushed {}", file);
                return;
            }

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + replayPosition);
            reader.seek(replayPosition);

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF())
            {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                int serializedSize;
                try
                {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    if (serializedSize == CommitLog.END_OF_SEGMENT_MARKER)
                    {
                        logger.debug("Encountered end of segment marker at " + reader.getFilePointer());
                        break;
                    }

                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Keyspace and Key (including the
                    // 2-byte length from writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;

                    long claimedSizeChecksum = reader.readLong();
                    checksum.reset();
                    if (version < CommitLogDescriptor.VERSION_20)
                        checksum.update(serializedSize);
                    else
                        FBUtilities.updateChecksumInt(checksum, serializedSize);

                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully. that's
                               // ok.

                    if (serializedSize > buffer.length)
                        buffer = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(buffer, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                }
                catch (EOFException eof)
                {
                    break; // last CL entry didn't get completely written. that's ok.
                }

                checksum.update(buffer, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue())
                {
                    // this entry must not have been fsynced. probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
                RowMutation rm;
                try
                {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version. so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer.deserialize(new DataInputStream(bufIn), version, ColumnSerializer.Flag.LOCAL);
                }
                catch (UnknownColumnFamilyException ex)
                {
                    if (ex.cfId == null)
                        continue;
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null)
                    {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    }
                    else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getKeyspaceName(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies().iterator(), ", ")
                            + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
View Full Code Here

    {
        logger.info("Replaying " + file.getPath());
        CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName());
        final long segment = desc.id;
        int version = desc.getMessagingVersion();
        RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()), true);
        try
        {
            assert reader.length() <= Integer.MAX_VALUE;
            int replayPosition;
            if (globalPosition.segment < segment)
            {
                replayPosition = 0;
            }
            else if (globalPosition.segment == segment)
            {
                replayPosition = globalPosition.position;
            }
            else
            {
                logger.debug("skipping replay of fully-flushed {}", file);
                return;
            }

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + replayPosition);
            reader.seek(replayPosition);

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF())
            {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                int serializedSize;
                try
                {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    if (serializedSize == CommitLog.END_OF_SEGMENT_MARKER)
                    {
                        logger.debug("Encountered end of segment marker at " + reader.getFilePointer());
                        break;
                    }

                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Table and Key (including the
                    // 2-byte length from writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;
                    long claimedSizeChecksum = reader.readLong();
                    checksum.reset();
                    checksum.update(serializedSize);
                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully. that's
                               // ok.

                    if (serializedSize > buffer.length)
                        buffer = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(buffer, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                }
                catch (EOFException eof)
                {
                    break; // last CL entry didn't get completely written. that's ok.
                }

                checksum.update(buffer, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue())
                {
                    // this entry must not have been fsynced. probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
                RowMutation rm;
                try
                {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version. so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer.deserialize(new DataInputStream(bufIn), version, IColumnSerializer.Flag.LOCAL);
                    // doublecheck that what we read is [still] valid for the current schema
                    for (ColumnFamily cf : rm.getColumnFamilies())
                        for (IColumn cell : cf)
                            cf.getComparator().validate(cell.name());
                }
                catch (UnknownColumnFamilyException ex)
                {
                    if (ex.cfId == null)
                        continue;
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null)
                    {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    }
                    else
                        i.incrementAndGet();
                    continue;
                }
                catch (Throwable t)
                {
                    File f = File.createTempFile("mutation", "dat");
                    DataOutputStream out = new DataOutputStream(new FileOutputStream(f));
                    try
                    {
                        out.write(buffer, 0, serializedSize);
                    }
                    finally
                    {
                        out.close();
                    }
                    String st = String.format("Unexpected error deserializing mutation; saved to %s and ignored.  This may be caused by replaying a mutation against a table with the same name but incompatible schema.  Exception follows: ",
                                              f.getAbsolutePath());
                    logger.error(st, t);
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies().iterator(), ", ")
                            + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
View Full Code Here

        // write header
        ByteBuffer headerBuffer = MessagingService.instance().constructStreamHeader(header, false, MessagingService.instance().getVersion(to));
        socket.getOutputStream().write(ByteBufferUtil.getArray(headerBuffer));

        RandomAccessReader file = RandomAccessReader.open(new File(header.file.getFilename()), true);
        FileChannel fc = file.getChannel();

        StreamingMetrics.activeStreamsOutbound.inc();
        // calculate chunks to transfer. we want to send continuous chunks altogether.
        List<Pair<Long, Long>> sections = getTransferSections(header.file.compressionInfo.chunks);
        try
        {
            long totalBytesTransferred = 0;
            // stream each of the required sections of the file
            for (Pair<Long, Long> section : sections)
            {
                // seek to the beginning of the section when socket channel is not available
                if (sc == null)
                    file.seek(section.left);
                // length of the section to stream
                long length = section.right - section.left;
                // tracks write progress
                long bytesTransferred = 0;
                while (bytesTransferred < length)
                {
                    int toTransfer = (int) Math.min(CHUNK_SIZE, length - bytesTransferred);
                    long lastWrite;
                    if (sc != null)
                    {
                        lastWrite = fc.transferTo(section.left + bytesTransferred, toTransfer, sc);
                        throttle.throttleDelta(lastWrite);
                    }
                    else
                    {
                        // NIO is not available. Fall back to normal streaming.
                        // This happens when inter-node encryption is turned on.
                        if (transferBuffer == null)
                            transferBuffer = new byte[CHUNK_SIZE];
                        file.readFully(transferBuffer, 0, toTransfer);
                        socket.getOutputStream().write(transferBuffer, 0, toTransfer);
                        throttle.throttleDelta(toTransfer);
                        lastWrite = toTransfer;
                    }
                    totalBytesTransferred += lastWrite;
View Full Code Here

        if (header.file == null)
            return;

        // try to skip kernel page cache if possible
        RandomAccessReader file = RandomAccessReader.open(new File(header.file.getFilename()), true);

        // setting up data compression stream
        compressedoutput = new LZFOutputStream(output);

        StreamingMetrics.activeStreamsOutbound.inc();
        try
        {
            long totalBytesTransferred = 0;
            // stream each of the required sections of the file
            for (Pair<Long, Long> section : header.file.sections)
            {
                // seek to the beginning of the section
                file.seek(section.left);

                // length of the section to stream
                long length = section.right - section.left;
                // tracks write progress
                long bytesTransferred = 0;
View Full Code Here

            CompactionManager.instance.performMaximal(cfStore);
        }
        // verify that we do indeed have multiple index entries
        SSTableReader sstable = cfStore.getSSTables().iterator().next();
        long position = sstable.getPosition(key, SSTableReader.Operator.EQ);
        RandomAccessReader file = sstable.openDataReader(false);
        file.seek(position);
        assert ByteBufferUtil.readWithShortLength(file).equals(key.key);
        SSTableReader.readRowSize(file, sstable.descriptor);
        IndexHelper.skipBloomFilter(file);
        ArrayList<IndexHelper.IndexInfo> indexes = IndexHelper.deserializeIndex(file);
        assert indexes.size() > 2;
View Full Code Here

        verifySingle(ssTable, bytes, key);
    }

    private void verifySingle(SSTableReader sstable, ByteBuffer bytes, ByteBuffer key) throws IOException
    {
        RandomAccessReader file = sstable.openDataReader(false);
        file.seek(sstable.getPosition(sstable.partitioner.decorateKey(key), SSTableReader.Operator.EQ));
        assert key.equals(ByteBufferUtil.readWithShortLength(file));
        int size = (int)SSTableReader.readRowSize(file, sstable.descriptor);
        byte[] bytes2 = new byte[size];
        file.readFully(bytes2);
        assert ByteBuffer.wrap(bytes2).equals(bytes);
    }
View Full Code Here

    private void verifyMany(SSTableReader sstable, Map<ByteBuffer, ByteBuffer> map) throws IOException
    {
        List<ByteBuffer> keys = new ArrayList<ByteBuffer>(map.keySet());
        Collections.shuffle(keys);
        RandomAccessReader file = sstable.openDataReader(false);
        for (ByteBuffer key : keys)
        {
            file.seek(sstable.getPosition(sstable.partitioner.decorateKey(key), SSTableReader.Operator.EQ));
            assert key.equals( ByteBufferUtil.readWithShortLength(file));
            int size = (int)SSTableReader.readRowSize(file, sstable.descriptor);
            byte[] bytes2 = new byte[size];
            file.readFully(bytes2);
            assert Arrays.equals(bytes2, map.get(key).array());
        }
    }
View Full Code Here

    public RandomAccessReader get(String path)
    {
        metrics.requests.mark();

        Queue<RandomAccessReader> instances = getCacheFor(path);
        RandomAccessReader result = instances.poll();
        if (result != null)
        {
            metrics.hits.mark();
            memoryUsage.addAndGet(-result.getTotalBufferSize());
        }

        return result;
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.util.RandomAccessReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.