Package org.elasticsearch.index.shard.service

Examples of org.elasticsearch.index.shard.service.InternalIndexShard


        @Override public String executor() {
            return ThreadPool.Names.CACHED;
        }

        @Override public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
            InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
            RecoveryStatus onGoingRecovery = onGoingRecoveries.get(shard.shardId());
            if (onGoingRecovery == null) {
                // shard is getting closed on us
                throw new IndexShardClosedException(shard.shardId());
            }

            // first, we go and move files that were created with the recovery id suffix to
            // the actual names, its ok if we have a corrupted index here, since we have replicas
            // to recover from in case of a full cluster shutdown just when this code executes...
            String suffix = "." + onGoingRecovery.startTime;
            Set<String> filesToRename = Sets.newHashSet();
            for (String existingFile : shard.store().directory().listAll()) {
                if (existingFile.endsWith(suffix)) {
                    filesToRename.add(existingFile.substring(0, existingFile.length() - suffix.length()));
                }
            }
            Exception failureToRename = null;
            if (!filesToRename.isEmpty()) {
                // first, go and delete the existing ones
                for (String fileToRename : filesToRename) {
                    shard.store().directory().deleteFile(fileToRename);
                }
                for (String fileToRename : filesToRename) {
                    // now, rename the files...
                    try {
                        shard.store().renameFile(fileToRename + suffix, fileToRename);
                    } catch (Exception e) {
                        failureToRename = e;
                        break;
                    }
                }
            }
            if (failureToRename != null) {
                throw failureToRename;
            }
            // now write checksums
            shard.store().writeChecksums(onGoingRecovery.checksums);

            for (String existingFile : shard.store().directory().listAll()) {
                if (!request.snapshotFiles().contains(existingFile)) {
                    try {
                        shard.store().directory().deleteFile(existingFile);
                    } catch (Exception e) {
                        // ignore, we don't really care, will get deleted later on
                    }
                }
            }
View Full Code Here


        @Override public String executor() {
            return ThreadPool.Names.CACHED;
        }

        @Override public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
            InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
            RecoveryStatus onGoingRecovery = onGoingRecoveries.get(shard.shardId());
            if (onGoingRecovery == null) {
                // shard is getting closed on us
                throw new IndexShardClosedException(shard.shardId());
            }
            IndexOutput indexOutput;
            if (request.position() == 0) {
                // first request
                onGoingRecovery.checksums.remove(request.name());
                indexOutput = onGoingRecovery.openIndexOutputs.remove(request.name());
                if (indexOutput != null) {
                    try {
                        indexOutput.close();
                    } catch (IOException e) {
                        // ignore
                    }
                }
                // we create an output with no checksum, this is because the pure binary data of the file is not
                // the checksum (because of seek). We will create the checksum file once copying is done

                // also, we check if the file already exists, if it does, we create a file name based
                // on the current recovery "id" and later we make the switch, the reason for that is that
                // we only want to overwrite the index files once we copied all over, and not create a
                // case where the index is half moved

                String name = request.name();
                if (shard.store().directory().fileExists(name)) {
                    name = name + "." + onGoingRecovery.startTime;
                }

                indexOutput = shard.store().createOutputWithNoChecksum(name);

                onGoingRecovery.openIndexOutputs.put(request.name(), indexOutput);
            } else {
                indexOutput = onGoingRecovery.openIndexOutputs.get(request.name());
            }
            if (indexOutput == null) {
                // shard is getting closed on us
                throw new IndexShardClosedException(shard.shardId());
            }
            synchronized (indexOutput) {
                try {
                    indexOutput.writeBytes(request.content(), request.contentLength());
                    onGoingRecovery.currentFilesSize.addAndGet(request.contentLength());
                    if (indexOutput.getFilePointer() == request.length()) {
                        // we are done
                        indexOutput.close();
                        // write the checksum
                        if (request.checksum() != null) {
                            onGoingRecovery.checksums.put(request.name(), request.checksum());
                        }
                        shard.store().directory().sync(Collections.singleton(request.name()));
                        onGoingRecovery.openIndexOutputs.remove(request.name());
                    }
                } catch (IOException e) {
                    onGoingRecovery.openIndexOutputs.remove(request.name());
                    try {
View Full Code Here

    public void close() {
        concurrentStreamPool.shutdown();
    }

    private RecoveryResponse recover(final StartRecoveryRequest request) {
        final InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
        logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
        final RecoveryResponse response = new RecoveryResponse();
        shard.recover(new Engine.RecoveryHandler() {
            @Override public void phase1(final SnapshotIndexCommit snapshot) throws ElasticSearchException {
                long totalSize = 0;
                long existingTotalSize = 0;
                try {
                    StopWatch stopWatch = new StopWatch().start();

                    for (String name : snapshot.getFiles()) {
                        StoreFileMetaData md = shard.store().metaData(name);
                        boolean useExisting = false;
                        if (request.existingFiles().containsKey(name)) {
                            // we don't compute checksum for segments, so always recover them
                            if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                                response.phase1ExistingFileNames.add(name);
                                response.phase1ExistingFileSizes.add(md.length());
                                existingTotalSize += md.length();
                                useExisting = true;
                                if (logger.isTraceEnabled()) {
                                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
                                }
                            }
                        }
                        if (!useExisting) {
                            if (request.existingFiles().containsKey(name)) {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
                            } else {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
                            }
                            response.phase1FileNames.add(name);
                            response.phase1FileSizes.add(md.length());
                        }
                        totalSize += md.length();
                    }
                    response.phase1TotalSize = totalSize;
                    response.phase1ExistingTotalSize = existingTotalSize;

                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                            response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                    final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                    final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                    for (final String name : response.phase1FileNames) {
                        concurrentStreamPool.execute(new Runnable() {
                            @Override public void run() {
                                IndexInput indexInput = null;
                                try {
                                    final int BUFFER_SIZE = (int) fileChunkSize.bytes();
                                    byte[] buf = new byte[BUFFER_SIZE];
                                    StoreFileMetaData md = shard.store().metaData(name);
                                    indexInput = snapshot.getDirectory().openInput(name);
                                    long len = indexInput.length();
                                    long readCount = 0;
                                    while (readCount < len) {
                                        if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                            throw new IndexShardClosedException(shard.shardId());
                                        }
                                        int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                        long position = indexInput.getFilePointer();
                                        indexInput.readBytes(buf, 0, toRead, false);
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.shardId(), name, position, len, md.checksum(), buf, toRead),
                                                TransportRequestOptions.options().withCompress(compress).withLowType(), VoidTransportResponseHandler.INSTANCE_SAME).txGet();
                                        readCount += toRead;
                                    }
                                    indexInput.close();
                                } catch (Exception e) {
                                    lastException.set(e);
                                } finally {
                                    if (indexInput != null) {
                                        try {
                                            indexInput.close();
                                        } catch (IOException e) {
                                            // ignore
                                        }
                                    }
                                    latch.countDown();
                                }
                            }
                        });
                    }

                    latch.await();

                    if (lastException.get() != null) {
                        throw lastException.get();
                    }

                    // now, set the clean files request
                    Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(shard.shardId(), snapshotFiles), VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                    stopWatch.stop();
                    logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                    response.phase1Time = stopWatch.totalTime().millis();
                } catch (Throwable e) {
                    throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
                }
            }

            @Override public void phase2(Translog.Snapshot snapshot) throws ElasticSearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();

                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.shardId()), VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                int totalOperations = sendSnapshot(snapshot);

                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase2Time = stopWatch.totalTime().millis();
                response.phase2Operations = totalOperations;
            }

            @Override public void phase3(Translog.Snapshot snapshot) throws ElasticSearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();
                int totalOperations = sendSnapshot(snapshot);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.shardId()), VoidTransportResponseHandler.INSTANCE_SAME).txGet();
                if (request.markAsRelocated()) {
                    // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                    try {
                        shard.relocated("to " + request.targetNode());
                    } catch (IllegalIndexShardStateException e) {
                        // we can ignore this exception since, on the other node, when it moved to phase3
                        // it will also send shard started, which might cause the index shard we work against
                        // to move be closed by the time we get to the the relocated method
                    }
                }
                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase3Time = stopWatch.totalTime().millis();
                response.phase3Operations = totalOperations;
            }

            private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticSearchException {
                int ops = 0;
                long size = 0;
                int totalOperations = 0;
                List<Translog.Operation> operations = Lists.newArrayList();
                while (snapshot.hasNext()) {
                    if (shard.state() == IndexShardState.CLOSED) {
                        throw new IndexShardClosedException(request.shardId());
                    }
                    Translog.Operation operation = snapshot.next();
                    operations.add(operation);
                    ops += 1;
View Full Code Here

        return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
    }

    @Override
    protected ShardTermlistResponse shardOperation(ShardTermlistRequest request) throws ElasticsearchException {
        InternalIndexShard indexShard = (InternalIndexShard) indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
        Engine.Searcher searcher = indexShard.engine().acquireSearcher("termlist");
        try {
            Map<String, TermInfo> map = new CompactHashMap<String, TermInfo>();
            IndexReader reader = searcher.reader();
            Fields fields = MultiFields.getFields(reader);
            if (fields != null) {
View Full Code Here

    public void registerHandler(){
        transportService.registerHandler(Actions.START_RECOVERY, new StartRecoveryTransportRequestHandler());
    }

    private RecoveryResponse recover(final StartRecoveryRequest request) {
        final InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());

        // verify that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
        // the index operations will not be routed to it properly
        RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
        if (node == null) {
            throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
        }
        ShardRouting targetShardRouting = null;
        for (ShardRouting shardRouting : node) {
            if (shardRouting.shardId().equals(request.shardId())) {
                targetShardRouting = shardRouting;
                break;
            }
        }
        if (targetShardRouting == null) {
            throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node");
        }
        if (!targetShardRouting.initializing()) {
            throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
        }

        logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
        final RecoveryResponse response = new RecoveryResponse();

        final BlobRecoveryHandler blobRecoveryHandler;

        if (BlobIndices.isBlobIndex(shard.shardId().getIndex())) {
            blobRecoveryHandler = new BlobRecoveryHandler(
                transportService, recoverySettings, blobTransferTarget, blobIndices, shard, request);
        } else {
            blobRecoveryHandler = null;
        }

        shard.recover(new Engine.RecoveryHandler() {
            @Override
            public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
                long totalSize = 0;
                long existingTotalSize = 0;
                try {
                    if (blobRecoveryHandler != null) {
                        blobRecoveryHandler.phase1();
                    }
                    StopWatch stopWatch = new StopWatch().start();

                    for (String name : snapshot.getFiles()) {
                        StoreFileMetaData md = shard.store().getMetadata().get(name);
                        boolean useExisting = false;
                        if (request.existingFiles().containsKey(name)) {
                            // we don't compute checksum for segments, so always recover them
                            if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                                response.phase1ExistingFileNames.add(name);
                                response.phase1ExistingFileSizes.add(md.length());
                                existingTotalSize += md.length();
                                useExisting = true;
                                if (logger.isTraceEnabled()) {
                                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
                                }
                            }
                        }
                        if (!useExisting) {
                            if (request.existingFiles().containsKey(name)) {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
                            } else {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
                            }
                            response.phase1FileNames.add(name);
                            response.phase1FileSizes.add(md.length());
                        }
                        totalSize += md.length();
                    }
                    response.phase1TotalSize = totalSize;
                    response.phase1ExistingTotalSize = existingTotalSize;

                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                            response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                    final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                    final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                    for (final String name : response.phase1FileNames) {
                        recoverySettings.concurrentStreamPool().execute(new Runnable() {
                            @Override
                            public void run() {
                                IndexInput indexInput = null;
                                try {
                                    final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                                    byte[] buf = new byte[BUFFER_SIZE];
                                    StoreFileMetaData md = shard.store().getMetadata().get(name);
                                    // TODO: maybe use IOContext.READONCE?
                                    indexInput = shard.store().directory().openInput(name, IOContext.READ);
                                    boolean shouldCompressRequest = recoverySettings.compress();
                                    if (CompressorFactory.isCompressed(indexInput)) {
                                        shouldCompressRequest = false;
                                    }

                                    long len = indexInput.length();
                                    long readCount = 0;
                                    while (readCount < len) {
                                        if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                            throw new IndexShardClosedException(shard.shardId());
                                        }
                                        int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                        long position = indexInput.getFilePointer();

                                        if (recoverySettings.rateLimiter() != null) {
                                            recoverySettings.rateLimiter().pause(toRead);
                                        }

                                        indexInput.readBytes(buf, 0, toRead, false);
                                        BytesArray content = new BytesArray(buf, 0, toRead);
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content),
                                                TransportRequestOptions.options().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                        readCount += toRead;
                                    }
                                } catch (Exception e) {
                                    lastException.set(e);
                                } finally {
                                    if (indexInput != null) {
                                        try {
                                            indexInput.close();
                                        } catch (IOException e) {
                                            // ignore
                                        }
                                    }
                                    latch.countDown();
                                }
                            }
                        });
                    }

                    latch.await();

                    if (lastException.get() != null) {
                        throw lastException.get();
                    }

                    // now, set the clean files request
                    Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                    stopWatch.stop();
                    logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                    response.phase1Time = stopWatch.totalTime().millis();
                } catch (Throwable e) {
                    throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
                }
            }

            @Override
            public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                stopWatch.stop();
                response.startTime = stopWatch.totalTime().millis();
                logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());

                logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                stopWatch = new StopWatch().start();
                int totalOperations = sendSnapshot(snapshot);
                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase2Time = stopWatch.totalTime().millis();
                response.phase2Operations = totalOperations;
            }

            @Override
            public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();
                int totalOperations = sendSnapshot(snapshot);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                if (request.markAsRelocated()) {
                    // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                    try {
                        shard.relocated("to " + request.targetNode());
                    } catch (IllegalIndexShardStateException e) {
                        // we can ignore this exception since, on the other node, when it moved to phase3
                        // it will also send shard started, which might cause the index shard we work against
                        // to move be closed by the time we get to the the relocated method
                    }
                }
                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase3Time = stopWatch.totalTime().millis();
                response.phase3Operations = totalOperations;
            }

            private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
                int ops = 0;
                long size = 0;
                int totalOperations = 0;
                List<Translog.Operation> operations = Lists.newArrayList();
                while (snapshot.hasNext()) {
                    if (shard.state() == IndexShardState.CLOSED) {
                        throw new IndexShardClosedException(request.shardId());
                    }
                    Translog.Operation operation = snapshot.next();
                    operations.add(operation);
                    ops += 1;
View Full Code Here

    }

    private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
        IndexService indexService = indicesService.indexService(shardId.index().name());
        if (indexService != null) {
            InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId.id());
            if (indexShard != null) {
                final Store store = indexShard.store();
                store.incRef();
                try {
                    return new StoreFilesMetaData(true, shardId, store.getMetadataOrEmpty().asMap());
                } finally {
                    store.decRef();
View Full Code Here

        this.internalActionLongTimeout = new TimeValue(internalActionTimeout.millis() * 2);
    }

    private RecoveryResponse recover(final StartRecoveryRequest request) {
        final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name());
        final InternalIndexShard shard = (InternalIndexShard) indexService.shardSafe(request.shardId().id());

        // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
        // the index operations will not be routed to it properly
        RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
        if (node == null) {
            logger.debug("delaying recovery of {} as source node {} is unknown", request.shardId(), request.targetNode());
            throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
        }
        ShardRouting targetShardRouting = null;
        for (ShardRouting shardRouting : node) {
            if (shardRouting.shardId().equals(request.shardId())) {
                targetShardRouting = shardRouting;
                break;
            }
        }
        if (targetShardRouting == null) {
            logger.debug("delaying recovery of {} as it is not listed as assigned to target node {}", request.shardId(), request.targetNode());
            throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node");
        }
        if (!targetShardRouting.initializing()) {
            logger.debug("delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
                    request.shardId(), request.targetNode(), targetShardRouting.state());
            throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
        }

        logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(), request.shardId().id(), request.targetNode(), request.markAsRelocated());

        ShardRecoveryHandler handler = new ShardRecoveryHandler(shard, request, recoverySettings, transportService, internalActionTimeout,
                internalActionLongTimeout, clusterService, indicesService, mappingUpdatedAction, logger);

        shard.recover(handler);
        return handler.getResponse();
    }
View Full Code Here

    }

    @Override
    protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException {
        InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.shardId().getIndex());
        InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId().id());
        // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
        if (indexShard.routingEntry() == null) {
            throw new IndexShardMissingException(indexShard.shardId());
        }

        CommonStatsFlags flags = new CommonStatsFlags().clear();

        if (request.request.docs()) {
            flags.set(CommonStatsFlags.Flag.Docs);
        }
        if (request.request.store()) {
            flags.set(CommonStatsFlags.Flag.Store);
        }
        if (request.request.indexing()) {
            flags.set(CommonStatsFlags.Flag.Indexing);
            flags.types(request.request.types());
        }
        if (request.request.get()) {
            flags.set(CommonStatsFlags.Flag.Get);
        }
        if (request.request.search()) {
            flags.set(CommonStatsFlags.Flag.Search);
            flags.groups(request.request.groups());
        }
        if (request.request.merge()) {
            flags.set(CommonStatsFlags.Flag.Merge);
        }
        if (request.request.refresh()) {
            flags.set(CommonStatsFlags.Flag.Refresh);
        }
        if (request.request.flush()) {
            flags.set(CommonStatsFlags.Flag.Flush);
        }
        if (request.request.warmer()) {
            flags.set(CommonStatsFlags.Flag.Warmer);
        }
        if (request.request.filterCache()) {
            flags.set(CommonStatsFlags.Flag.FilterCache);
        }
        if (request.request.idCache()) {
            flags.set(CommonStatsFlags.Flag.IdCache);
        }
        if (request.request.fieldData()) {
            flags.set(CommonStatsFlags.Flag.FieldData);
            flags.fieldDataFields(request.request.fieldDataFields());
        }
        if (request.request.percolate()) {
            flags.set(CommonStatsFlags.Flag.Percolate);
        }
        if (request.request.segments()) {
            flags.set(CommonStatsFlags.Flag.Segments);
        }
        if (request.request.completion()) {
            flags.set(CommonStatsFlags.Flag.Completion);
            flags.completionDataFields(request.request.completionFields());
        }
        if (request.request.translog()) {
            flags.set(CommonStatsFlags.Flag.Translog);
        }
        if (request.request.suggest()) {
            flags.set(CommonStatsFlags.Flag.Suggest);
        }
        if (request.request.queryCache()) {
            flags.set(CommonStatsFlags.Flag.QueryCache);
        }

        return new ShardStats(indexShard, indexShard.routingEntry(), flags);
    }
View Full Code Here

    @Override
    protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) throws ElasticsearchException {

        InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.shardId().getIndex());
        InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId().id());
        ShardRecoveryResponse shardRecoveryResponse = new ShardRecoveryResponse(request.shardId());

        RecoveryState state = indexShard.recoveryState();

        if (state == null) {
            state = recoveryTarget.recoveryState(indexShard);
        }
View Full Code Here

    }

    @Override
    protected ShardSegments shardOperation(IndexShardSegmentRequest request) throws ElasticsearchException {
        InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.shardId().getIndex());
        InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId().id());
        return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments());
    }
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.shard.service.InternalIndexShard

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.