Examples of StoreFileMetaData


Examples of org.elasticsearch.index.store.StoreFileMetaData

        @Override public boolean fileExists(String name) throws IOException {
            return filesMetadata.containsKey(name);
        }

        @Override public long fileModified(String name) throws IOException {
            StoreFileMetaData metaData = filesMetadata.get(name);
            if (metaData == null) {
                throw new FileNotFoundException(name);
            }
            // not set yet (IndexOutput not closed)
            if (metaData.lastModified() != -1) {
                return metaData.lastModified();
            }
            return delegate.fileModified(name);
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

        }

        @Override public void touchFile(String name) throws IOException {
            delegate.touchFile(name);
            synchronized (mutex) {
                StoreFileMetaData metaData = filesMetadata.get(name);
                if (metaData != null) {
                    metaData = new StoreFileMetaData(metaData.name(), metaData.length(), delegate.fileModified(name), metaData.checksum());
                    filesMetadata = MapBuilder.newMapBuilder(filesMetadata).put(name, metaData).immutableMap();
                }
            }
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

                files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
            }
        }

        @Override public long fileLength(String name) throws IOException {
            StoreFileMetaData metaData = filesMetadata.get(name);
            if (metaData == null) {
                throw new FileNotFoundException(name);
            }
            // not set yet (IndexOutput not closed)
            if (metaData.length() != -1) {
                return metaData.length();
            }
            return delegate.fileLength(name);
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

        }

        public IndexOutput createOutput(String name, boolean computeChecksum) throws IOException {
            IndexOutput out = delegate.createOutput(name);
            synchronized (mutex) {
                StoreFileMetaData metaData = new StoreFileMetaData(name, -1, -1, null);
                filesMetadata = MapBuilder.newMapBuilder(filesMetadata).put(name, metaData).immutableMap();
                files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
            }
            return new StoreIndexOutput(out, name, computeChecksum);
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

            String checksum = null;
            if (digest != null) {
                checksum = Long.toString(digest.getValue(), Character.MAX_RADIX);
            }
            synchronized (mutex) {
                StoreFileMetaData md = new StoreFileMetaData(name, directory().fileLength(name), directory().fileModified(name), checksum);
                filesMetadata = MapBuilder.newMapBuilder(filesMetadata).put(name, md).immutableMap();
                files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
            }
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

                long existingTotalSize = 0;
                try {
                    StopWatch stopWatch = new StopWatch().start();

                    for (String name : snapshot.getFiles()) {
                        StoreFileMetaData md = shard.store().metaData(name);
                        boolean useExisting = false;
                        if (request.existingFiles().containsKey(name)) {
                            // we don't compute checksum for segments, so always recover them
                            if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                                response.phase1ExistingFileNames.add(name);
                                response.phase1ExistingFileSizes.add(md.length());
                                existingTotalSize += md.length();
                                useExisting = true;
                                if (logger.isTraceEnabled()) {
                                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
                                }
                            }
                        }
                        if (!useExisting) {
                            if (request.existingFiles().containsKey(name)) {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
                            } else {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
                            }
                            response.phase1FileNames.add(name);
                            response.phase1FileSizes.add(md.length());
                        }
                        totalSize += md.length();
                    }
                    response.phase1TotalSize = totalSize;
                    response.phase1ExistingTotalSize = existingTotalSize;

                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                            response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                    final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                    final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                    for (final String name : response.phase1FileNames) {
                        concurrentStreamPool.execute(new Runnable() {
                            @Override public void run() {
                                IndexInput indexInput = null;
                                try {
                                    final int BUFFER_SIZE = (int) fileChunkSize.bytes();
                                    byte[] buf = new byte[BUFFER_SIZE];
                                    StoreFileMetaData md = shard.store().metaData(name);
                                    indexInput = snapshot.getDirectory().openInput(name);
                                    long len = indexInput.length();
                                    long readCount = 0;
                                    while (readCount < len) {
                                        if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                            throw new IndexShardClosedException(shard.shardId());
                                        }
                                        int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                        long position = indexInput.getFilePointer();
                                        indexInput.readBytes(buf, 0, toRead, false);
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.shardId(), name, position, len, md.checksum(), buf, toRead),
                                                TransportRequestOptions.options().withCompress(compress).withLowType(), VoidTransportResponseHandler.INSTANCE_SAME).txGet();
                                        readCount += toRead;
                                    }
                                    indexInput.close();
                                } catch (Exception e) {
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

        targetNode = DiscoveryNode.readNode(in);
        markAsRelocated = in.readBoolean();
        int size = in.readVInt();
        existingFiles = Maps.newHashMapWithExpectedSize(size);
        for (int i = 0; i < size; i++) {
            StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
            existingFiles.put(md.name(), md);
        }
    }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

                        blobRecoveryHandler.phase1();
                    }
                    StopWatch stopWatch = new StopWatch().start();

                    for (String name : snapshot.getFiles()) {
                        StoreFileMetaData md = shard.store().getMetadata().get(name);
                        boolean useExisting = false;
                        if (request.existingFiles().containsKey(name)) {
                            // we don't compute checksum for segments, so always recover them
                            if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                                response.phase1ExistingFileNames.add(name);
                                response.phase1ExistingFileSizes.add(md.length());
                                existingTotalSize += md.length();
                                useExisting = true;
                                if (logger.isTraceEnabled()) {
                                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
                                }
                            }
                        }
                        if (!useExisting) {
                            if (request.existingFiles().containsKey(name)) {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
                            } else {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
                            }
                            response.phase1FileNames.add(name);
                            response.phase1FileSizes.add(md.length());
                        }
                        totalSize += md.length();
                    }
                    response.phase1TotalSize = totalSize;
                    response.phase1ExistingTotalSize = existingTotalSize;

                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                            response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                    final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                    final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                    for (final String name : response.phase1FileNames) {
                        recoverySettings.concurrentStreamPool().execute(new Runnable() {
                            @Override
                            public void run() {
                                IndexInput indexInput = null;
                                try {
                                    final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                                    byte[] buf = new byte[BUFFER_SIZE];
                                    StoreFileMetaData md = shard.store().getMetadata().get(name);
                                    // TODO: maybe use IOContext.READONCE?
                                    indexInput = shard.store().directory().openInput(name, IOContext.READ);
                                    boolean shouldCompressRequest = recoverySettings.compress();
                                    if (CompressorFactory.isCompressed(indexInput)) {
                                        shouldCompressRequest = false;
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

        Version writtenBy = null;
        if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_0)) {
            String versionString = in.readOptionalString();
            writtenBy = Lucene.parseVersionLenient(versionString, null);
        }
        metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
        if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_4_0_Beta1)) {
            lastChunk = in.readBoolean();
        } else {
            lastChunk = false;
        }
View Full Code Here

Examples of org.elasticsearch.index.store.StoreFileMetaData

            allocated = in.readBoolean();
            shardId = ShardId.readShardId(in);
            int size = in.readVInt();
            files = Maps.newHashMapWithExpectedSize(size);
            for (int i = 0; i < size; i++) {
                StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
                files.put(md.name(), md);
            }
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.