Package org.apache.cassandra.io.util

Examples of org.apache.cassandra.io.util.RandomAccessReader


    public String getPath()
    {
        // if input is from file, then return that path, otherwise it's from streaming
        if (input instanceof RandomAccessReader)
        {
            RandomAccessReader file = (RandomAccessReader) input;
            return file.getPath();
        }
        else
        {
            throw new UnsupportedOperationException();
        }
View Full Code Here


    public void reset()
    {
        if (!(input instanceof RandomAccessReader))
            throw new UnsupportedOperationException();

        RandomAccessReader file = (RandomAccessReader) input;
        try
        {
            file.seek(columnPosition);
        }
        catch (IOException e)
        {
            throw new IOError(e);
        }
View Full Code Here

        verifySingle(ssTable, bytes, key);
    }

    private void verifySingle(SSTableReader sstable, ByteBuffer bytes, ByteBuffer key) throws IOException
    {
        RandomAccessReader file = sstable.openDataReader(false);
        file.seek(sstable.getPosition(sstable.partitioner.decorateKey(key), SSTableReader.Operator.EQ));
        assert key.equals(ByteBufferUtil.readWithShortLength(file));
        int size = (int)SSTableReader.readRowSize(file, sstable.descriptor);
        byte[] bytes2 = new byte[size];
        file.readFully(bytes2);
        assert ByteBuffer.wrap(bytes2).equals(bytes);
    }
View Full Code Here

    private void verifyMany(SSTableReader sstable, Map<ByteBuffer, ByteBuffer> map) throws IOException
    {
        List<ByteBuffer> keys = new ArrayList<ByteBuffer>(map.keySet());
        Collections.shuffle(keys);
        RandomAccessReader file = sstable.openDataReader(false);
        for (ByteBuffer key : keys)
        {
            file.seek(sstable.getPosition(sstable.partitioner.decorateKey(key), SSTableReader.Operator.EQ));
            assert key.equals( ByteBufferUtil.readWithShortLength(file));
            int size = (int)SSTableReader.readRowSize(file, sstable.descriptor);
            byte[] bytes2 = new byte[size];
            file.readFully(bytes2);
            assert Arrays.equals(bytes2, map.get(key).array());
        }
    }
View Full Code Here

            CompactionManager.instance.performMaximal(cfStore);
        }
        // verify that we do indeed have multiple index entries
        SSTableReader sstable = cfStore.getSSTables().iterator().next();
        long position = sstable.getPosition(key, SSTableReader.Operator.EQ);
        RandomAccessReader file = sstable.openDataReader(false);
        file.seek(position);
        assert ByteBufferUtil.readWithShortLength(file).equals(key.key);
        SSTableReader.readRowSize(file, sstable.descriptor);
        IndexHelper.skipBloomFilter(file);
        ArrayList<IndexHelper.IndexInfo> indexes = IndexHelper.deserializeIndex(file);
        assert indexes.size() > 2;
View Full Code Here

        // loop through each row, deserializing to check for damage.
        // we'll also loop through the index at the same time, using the position from the index to recover if the
        // row header (key or data size) is corrupt. (This means our position in the index file will be one row
        // "ahead" of the data file.)
        final RandomAccessReader dataFile = sstable.openDataReader(true);
        RandomAccessReader indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
        ScrubInfo scrubInfo = new ScrubInfo(dataFile, sstable);
        executor.beginCompaction(scrubInfo);

        SSTableWriter writer = null;
        SSTableReader newSstable = null;
        int goodRows = 0, badRows = 0, emptyRows = 0;

        try
        {
            ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
            {
                // throw away variable so we don't have a side effect in the assert
                long firstRowPositionFromIndex = indexFile.readLong();
                assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
            }

            // TODO errors when creating the writer may leave empty temp files.
            writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable));

            while (!dataFile.isEOF())
            {
                long rowStart = dataFile.getFilePointer();
                if (logger.isDebugEnabled())
                    logger.debug("Reading row at " + rowStart);

                DecoratedKey key = null;
                long dataSize = -1;
                try
                {
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile));
                    dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                    if (logger.isDebugEnabled())
                        logger.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
                }
                catch (Throwable th)
                {
                    throwIfFatal(th);
                    // check for null key below
                }

                ByteBuffer currentIndexKey = nextIndexKey;
                long nextRowPositionFromIndex;
                try
                {
                    nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                    nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
                }
                catch (Throwable th)
                {
                    logger.warn("Error reading index file", th);
                    nextIndexKey = null;
                    nextRowPositionFromIndex = dataFile.length();
                }

                long dataStart = dataFile.getFilePointer();
                long dataStartFromIndex = currentIndexKey == null
                                        ? -1
                                        : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
                long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
                assert currentIndexKey != null || indexFile.isEOF();
                if (logger.isDebugEnabled() && currentIndexKey != null)
                    logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey),  dataSizeFromIndex));

                writer.mark();
                try
View Full Code Here

        if (header.file == null)
            return;

        // TODO just use a raw RandomAccessFile since we're managing our own buffer here
        RandomAccessReader file = (header.file.sstable.compression) // try to skip kernel page cache if possible
                                ? CompressedRandomAccessReader.open(header.file.getFilename(), header.file.sstable.getCompressionMetadata(), true)
                                : RandomAccessReader.open(new File(header.file.getFilename()), true);

        // setting up data compression stream
        output = new LZFOutputStream(output);

        try
        {
            // stream each of the required sections of the file
            for (Pair<Long, Long> section : header.file.sections)
            {
                // seek to the beginning of the section
                file.seek(section.left);

                // length of the section to stream
                long length = section.right - section.left;
                // tracks write progress
                long bytesTransferred = 0;
View Full Code Here

    public String getPath()
    {
        // if input is from file, then return that path, otherwise it's from streaming
        if (in instanceof RandomAccessReader)
        {
            RandomAccessReader file = (RandomAccessReader) in;
            return file.getPath();
        }
        else
        {
            throw new UnsupportedOperationException();
        }
View Full Code Here

     * @throws IOException on failure to read/write input/output
     */
    public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes, CFMetaData metadata) throws IOException
    {
        SSTableReader sstable = SSTableReader.open(desc);
        RandomAccessReader dfile = sstable.openDataReader();
        try
        {
            IPartitioner partitioner = sstable.partitioner;

            if (excludes != null)
                toExport.removeAll(Arrays.asList(excludes));

            outs.println("[");

            int i = 0;

            // last key to compare order
            DecoratedKey lastKey = null;

            for (String key : toExport)
            {
                DecoratedKey decoratedKey = partitioner.decorateKey(metadata.getKeyValidator().fromString(key));

                if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
                    throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);

                lastKey = decoratedKey;

                RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
                if (entry == null)
                    continue;

                dfile.seek(entry.position);
                ByteBufferUtil.readWithShortLength(dfile); // row key
                DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));

                Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);
                checkStream(outs);

                if (i != 0)
                    outs.println(",");
                i++;
                serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
            }

            outs.println("\n]");
            outs.flush();
        }
        finally
        {
            dfile.close();
        }
    }
View Full Code Here

        final long segmentId = desc.id;
        logger.info("Replaying {} (CL version {}, messaging version {})",
                    file.getPath(),
                    desc.version,
                    desc.getMessagingVersion());
        RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()));

        try
        {
            assert reader.length() <= Integer.MAX_VALUE;
            int offset = getStartOffset(segmentId, desc.version);
            if (offset < 0)
            {
                logger.debug("skipping replay of fully-flushed {}", file);
                return;
            }

            int prevEnd = CommitLogDescriptor.HEADER_SIZE;
            main: while (true)
            {

                int end = prevEnd;
                if (desc.version < CommitLogDescriptor.VERSION_21)
                    end = Integer.MAX_VALUE;
                else
                {
                    do { end = readSyncMarker(desc, end, reader); }
                    while (end < offset && end > prevEnd);
                }

                if (end < prevEnd)
                    break;

                if (logger.isDebugEnabled())
                    logger.debug("Replaying {} between {} and {}", file, offset, end);

                reader.seek(offset);

                 /* read the logs populate Mutation and apply */
                while (reader.getPosition() < end && !reader.isEOF())
                {
                    if (logger.isDebugEnabled())
                        logger.debug("Reading mutation at {}", reader.getFilePointer());

                    long claimedCRC32;
                    int serializedSize;
                    try
                    {
                        // any of the reads may hit EOF
                        serializedSize = reader.readInt();
                        if (serializedSize == LEGACY_END_OF_SEGMENT_MARKER)
                        {
                            logger.debug("Encountered end of segment marker at {}", reader.getFilePointer());
                            break main;
                        }

                        // Mutation must be at LEAST 10 bytes:
                        // 3 each for a non-empty Keyspace and Key (including the
                        // 2-byte length from writeUTF/writeWithShortLength) and 4 bytes for column count.
                        // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                        if (serializedSize < 10)
                            break main;

                        long claimedSizeChecksum;
                        if (desc.version < CommitLogDescriptor.VERSION_21)
                            claimedSizeChecksum = reader.readLong();
                        else
                            claimedSizeChecksum = reader.readInt() & 0xffffffffL;
                        checksum.reset();
                        if (desc.version < CommitLogDescriptor.VERSION_20)
                            checksum.update(serializedSize);
                        else
                            checksum.updateInt(serializedSize);

                        if (checksum.getValue() != claimedSizeChecksum)
                            break main; // entry wasn't synced correctly/fully. that's
                        // ok.

                        if (serializedSize > buffer.length)
                            buffer = new byte[(int) (1.2 * serializedSize)];
                        reader.readFully(buffer, 0, serializedSize);
                        if (desc.version < CommitLogDescriptor.VERSION_21)
                            claimedCRC32 = reader.readLong();
                        else
                            claimedCRC32 = reader.readInt() & 0xffffffffL;
                    }
                    catch (EOFException eof)
                    {
                        break main; // last CL entry didn't get completely written. that's ok.
                    }

                    checksum.update(buffer, 0, serializedSize);
                    if (claimedCRC32 != checksum.getValue())
                    {
                        // this entry must not have been fsynced. probably the rest is bad too,
                        // but just in case there is no harm in trying them (since we still read on an entry boundary)
                        continue;
                    }

                    /* deserialize the commit log entry */
                    FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
                    final Mutation mutation;
                    try
                    {
                        mutation = Mutation.serializer.deserialize(new DataInputStream(bufIn),
                                                                   desc.getMessagingVersion(),
                                                                   ColumnSerializer.Flag.LOCAL);
                        // doublecheck that what we read is [still] valid for the current schema
                        for (ColumnFamily cf : mutation.getColumnFamilies())
                            for (Cell cell : cf)
                                cf.getComparator().validate(cell.name());
                    }
                    catch (UnknownColumnFamilyException ex)
                    {
                        if (ex.cfId == null)
                            continue;
                        AtomicInteger i = invalidMutations.get(ex.cfId);
                        if (i == null)
                        {
                            i = new AtomicInteger(1);
                            invalidMutations.put(ex.cfId, i);
                        }
                        else
                            i.incrementAndGet();
                        continue;
                    }
                    catch (Throwable t)
                    {
                        JVMStabilityInspector.inspectThrowable(t);
                        File f = File.createTempFile("mutation", "dat");
                        DataOutputStream out = new DataOutputStream(new FileOutputStream(f));
                        try
                        {
                            out.write(buffer, 0, serializedSize);
                        }
                        finally
                        {
                            out.close();
                        }
                        String st = String.format("Unexpected error deserializing mutation; saved to %s and ignored.  This may be caused by replaying a mutation against a table with the same name but incompatible schema.  Exception follows: ",
                                                  f.getAbsolutePath());
                        logger.error(st, t);
                        continue;
                    }

                    if (logger.isDebugEnabled())
                        logger.debug("replaying mutation for {}.{}: {}", mutation.getKeyspaceName(), ByteBufferUtil.bytesToHex(mutation.key()), "{" + StringUtils.join(mutation.getColumnFamilies().iterator(), ", ") + "}");

                    final long entryLocation = reader.getFilePointer();
                    Runnable runnable = new WrappedRunnable()
                    {
                        public void runMayThrow() throws IOException
                        {
                            if (Schema.instance.getKSMetaData(mutation.getKeyspaceName()) == null)
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.util.RandomAccessReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.