Package org.elasticsearch.common.io.stream

Examples of org.elasticsearch.common.io.stream.StreamInput


                                logger.warn("failed to receive packet", e);
                            }
                            continue;
                        }
                        try {
                            StreamInput input = CachedStreamInput.cachedHandles(new BytesStreamInput(datagramPacketReceive.getData(), datagramPacketReceive.getOffset(), datagramPacketReceive.getLength()));
                            id = input.readInt();
                            clusterName = ClusterName.readClusterName(input);
                            requestingNodeX = readNode(input);
                        } catch (Exception e) {
                            logger.warn("failed to read requesting node from {}", e, datagramPacketReceive.getSocketAddress());
                            continue;
View Full Code Here


        int size = buffer.getInt(buffer.readerIndex() - 4);

        int markedReaderIndex = buffer.readerIndex();
        int expectedIndexReader = markedReaderIndex + size;

        StreamInput streamIn = new ChannelBufferStreamInput(buffer, size);

        long requestId = buffer.readLong();
        byte status = buffer.readByte();
        boolean isRequest = TransportStreams.statusIsRequest(status);
View Full Code Here

    ThreadPool threadPool() {
        return this.threadPool;
    }

    void messageReceived(byte[] data, String action, LocalTransport sourceTransport, @Nullable final Long sendRequestId) {
        StreamInput stream = new BytesStreamInput(data);
        stream = CachedStreamInput.cachedHandles(stream);

        try {
            long requestId = stream.readLong();
            byte status = stream.readByte();
            boolean isRequest = TransportStreams.statusIsRequest(status);

            if (isRequest) {
                handleRequest(stream, requestId, sourceTransport);
            } else {
View Full Code Here

        int markedReaderIndex = buffer.readerIndex();
        int expectedIndexReader = markedReaderIndex + size;

        // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh
        // buffer, or in the cumlation buffer, which is cleaned each time
        StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size);

        long requestId = buffer.readLong();
        byte status = buffer.readByte();
        Version version = Version.fromId(buffer.readInt());

        // !!! compression handling removed !!!
        StreamInput wrappedStream = CachedStreamInput.cachedHandles(streamIn);

        if (TransportStatus.isRequest(status)) {
            String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
            if (buffer.readerIndex() != expectedIndexReader) {
                if (buffer.readerIndex() < expectedIndexReader) {
                    logger.warn("Message not fully read (request) for [{}] and action [{}], resetting", requestId, action);
                } else {
                    logger.warn("Message read past expected size (request) for [{}] and action [{}], resetting", requestId, action);
                }
                buffer.readerIndex(expectedIndexReader);
            }
        } else {
            TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
            // ignore if its null, the adapter logs it
            if (handler != null) {
                if (TransportStatus.isError(status)) {
                    handlerResponseError(wrappedStream, handler);
                } else {
                    handleResponse(wrappedStream, handler);
                }
            } else {
                // if its null, skip those bytes
                buffer.readerIndex(markedReaderIndex + size);
            }
            if (buffer.readerIndex() != expectedIndexReader) {
                if (buffer.readerIndex() < expectedIndexReader) {
                    logger.warn("Message not fully read (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
                } else {
                    logger.warn("Message read past expected size (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
                }
                buffer.readerIndex(expectedIndexReader);
            }
        }
        wrappedStream.close();
    }
View Full Code Here

        Compressor compressor = compressor(bytes);
        if (compressor != null) {
            if (bytes.hasArray()) {
                return new BytesArray(compressor.uncompress(bytes.array(), bytes.arrayOffset(), bytes.length()));
            }
            StreamInput compressed = compressor.streamInput(bytes.streamInput());
            BytesStreamOutput bStream = new BytesStreamOutput();
            Streams.copy(compressed, bStream);
            compressed.close();
            return bStream.bytes();
        }
        return bytes;
    }
View Full Code Here

        }
    }
      
    private void doTest(byte bytes[]) throws IOException {
        ByteBuffer bb = ByteBuffer.wrap(bytes);
        StreamInput rawIn = new ByteBufferStreamInput(bb);
        Compressor c = CompressorFactory.defaultCompressor();
       
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        OutputStreamStreamOutput rawOs = new OutputStreamStreamOutput(bos);
        StreamOutput os = c.streamOutput(rawOs);
       
        Random r = getRandom();
        int bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
        int prepadding = r.nextInt(70000);
        int postpadding = r.nextInt(70000);
        byte buffer[] = new byte[prepadding + bufferSize + postpadding];
        r.nextBytes(buffer); // fill block completely with junk
        int len;
        while ((len = rawIn.read(buffer, prepadding, bufferSize)) != -1) {
            os.write(buffer, prepadding, len);
        }
        os.close();
        rawIn.close();
       
        // now we have compressed byte array
       
        byte compressed[] = bos.toByteArray();
        ByteBuffer bb2 = ByteBuffer.wrap(compressed);
        StreamInput compressedIn = new ByteBufferStreamInput(bb2);
        StreamInput in = c.streamInput(compressedIn);
       
        // randomize constants again
        bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
        prepadding = r.nextInt(70000);
        postpadding = r.nextInt(70000);
        buffer = new byte[prepadding + bufferSize + postpadding];
        r.nextBytes(buffer); // fill block completely with junk
       
        ByteArrayOutputStream uncompressedOut = new ByteArrayOutputStream();
        while ((len = in.read(buffer, prepadding, bufferSize)) != -1) {
            uncompressedOut.write(buffer, prepadding, len);
        }
        uncompressedOut.close();
       
        assertArrayEquals(bytes, uncompressedOut.toByteArray());
View Full Code Here

        int markedReaderIndex = buffer.readerIndex();
        int expectedIndexReader = markedReaderIndex + size;

        // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh
        // buffer, or in the cumlation buffer, which is cleaned each time
        StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size);

        long requestId = buffer.readLong();
        byte status = buffer.readByte();
        Version version = Version.fromId(buffer.readInt());

        StreamInput wrappedStream;
        if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
            Compressor compressor = CompressorFactory.compressor(buffer);
            if (compressor == null) {
                int maxToRead = Math.min(buffer.readableBytes(), 10);
                int offset = buffer.readerIndex();
                StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
                for (int i = 0; i < maxToRead; i++) {
                    sb.append(buffer.getByte(offset + i)).append(",");
                }
                sb.append("]");
                throw new ElasticsearchIllegalStateException(sb.toString());
            }
            wrappedStream = CachedStreamInput.cachedHandlesCompressed(compressor, streamIn);
        } else {
            wrappedStream = CachedStreamInput.cachedHandles(streamIn);
        }
        wrappedStream.setVersion(version);

        if (TransportStatus.isRequest(status)) {
            String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
            if (buffer.readerIndex() != expectedIndexReader) {
                if (buffer.readerIndex() < expectedIndexReader) {
                    logger.warn("Message not fully read (request) for [{}] and action [{}], resetting", requestId, action);
                } else {
                    logger.warn("Message read past expected size (request) for [{}] and action [{}], resetting", requestId, action);
                }
                buffer.readerIndex(expectedIndexReader);
            }
        } else {
            TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
            // ignore if its null, the adapter logs it
            if (handler != null) {
                if (TransportStatus.isError(status)) {
                    handlerResponseError(wrappedStream, handler);
                } else {
                    handleResponse(ctx.getChannel(), wrappedStream, handler);
                }
            } else {
                // if its null, skip those bytes
                buffer.readerIndex(markedReaderIndex + size);
            }
            if (buffer.readerIndex() != expectedIndexReader) {
                if (buffer.readerIndex() < expectedIndexReader) {
                    logger.warn("Message not fully read (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
                } else {
                    logger.warn("Message read past expected size (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
                }
                buffer.readerIndex(expectedIndexReader);
            }
        }
        wrappedStream.close();
    }
View Full Code Here

            recoveryState.getStart().time(System.currentTimeMillis() - recoveryState.getStart().startTime());
            recoveryState.getStart().checkIndexTime(indexShard.checkIndexTook());

            recoveryState.getTranslog().startTime(System.currentTimeMillis());
            recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
            StreamInput in = null;

            final Set<String> typesToUpdate = Sets.newHashSet();
            try {
                TranslogStream stream = TranslogStreams.translogStreamFor(recoveringTranslogFile);
                try {
                    in = stream.openInput(recoveringTranslogFile);
                } catch (TruncatedTranslogException e) {
                    // file is empty or header has been half-written and should be ignored
                    logger.trace("ignoring truncation exception, the translog is either empty or half-written ([{}])", e.getMessage());
                }
                while (true) {
                    if (in == null) {
                        break;
                    }
                    Translog.Operation operation;
                    try {
                        if (stream instanceof LegacyTranslogStream) {
                            in.readInt(); // ignored opSize
                        }
                        operation = stream.read(in);
                    } catch (EOFException e) {
                        // ignore, not properly written the last op
                        logger.trace("ignoring translog EOF exception, the last operation was not properly written ([{}])", e.getMessage());
View Full Code Here

    }

    public void testStreamInput() throws IOException {
        int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
        BytesReference pbr = getRandomizedPagedBytesReference(length);
        StreamInput si = pbr.streamInput();
        assertNotNull(si);

        // read single bytes one by one
        assertEquals(pbr.get(0), si.readByte());
        assertEquals(pbr.get(1), si.readByte());
        assertEquals(pbr.get(2), si.readByte());

        // reset the stream for bulk reading
        si.reset();

        // buffer for bulk reads
        byte[] origBuf = new byte[length];
        getRandom().nextBytes(origBuf);
        byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length);

        // bulk-read 0 bytes: must not modify buffer
        si.readBytes(targetBuf, 0, 0);
        assertEquals(origBuf[0], targetBuf[0]);
        si.reset();

        // read a few few bytes as ints
        int bytesToRead = randomIntBetween(1, length / 2);
        for (int i = 0; i < bytesToRead; i++) {
            int b = si.read();
            assertEquals(pbr.get(i), b);
        }
        si.reset();

        // bulk-read all
        si.readFully(targetBuf);
        assertArrayEquals(pbr.toBytes(), targetBuf);

        // continuing to read should now fail with EOFException
        try {
            si.readByte();
            fail("expected EOF");
        } catch (EOFException eof) {
            // yay
        }

        // try to read more than the stream contains
        si.reset();
        try {
            si.readBytes(targetBuf, 0, length * 2);
            fail("expected IndexOutOfBoundsException: le > stream.length");
        } catch (IndexOutOfBoundsException ioob) {
            // expected
        }
    }
View Full Code Here

    }

    public void testStreamInputBulkReadWithOffset() throws IOException {
        int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
        BytesReference pbr = getRandomizedPagedBytesReference(length);
        StreamInput si = pbr.streamInput();
        assertNotNull(si);

        // read a bunch of single bytes one by one
        int offset = randomIntBetween(1, length / 2);
        for (int i = 0; i < offset; i++) {
            assertEquals(pbr.get(i), si.readByte());
        }

        // now do NOT reset the stream - keep the stream's offset!

        // buffer to compare remaining bytes against bulk read
        byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length);
        // randomized target buffer to ensure no stale slots
        byte[] targetBytes = new byte[pbrBytesWithOffset.length];
        getRandom().nextBytes(targetBytes);

        // bulk-read all
        si.readFully(targetBytes);
        assertArrayEquals(pbrBytesWithOffset, targetBytes);
    }
View Full Code Here

TOP

Related Classes of org.elasticsearch.common.io.stream.StreamInput

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.