Package org.elasticsearch.common

Examples of org.elasticsearch.common.StopWatch$TaskInfo


            client = node.client();
        }

        client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();

        StopWatch stopWatch = new StopWatch().start();
        for (long i = 0; i < OPERATIONS; i++) {
            client.prepareGet("test", "type1", "1").execute().actionGet();
        }
        stopWatch.stop();

        System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));

        node.close();
    }
View Full Code Here


            clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().execute().actionGet();
            if (clusterHealthResponse.isTimedOut()) {
                System.err.println("--> Timed out waiting for cluster health");
            }

            StopWatch stopWatch = new StopWatch().start();

            System.out.println("--> Indexing [" + rootDocs + "] root documents and [" + (rootDocs * nestedCount) + "] nested objects");
            long ITERS = rootDocs / batch;
            long i = 1;
            int counter = 0;
            for (; i <= ITERS; i++) {
                BulkRequestBuilder request = client.prepareBulk();
                for (int j = 0; j < batch; j++) {
                    counter++;
                    XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
                            .field("field1", counter)
                            .startArray("field2");
                    for (int k = 0; k < nestedCount; k++) {
                        doc = doc.startObject()
                                .field("field3", k)
                                .endObject();
                    }
                    doc = doc.endArray();
                    request.add(
                            Requests.indexRequest(indexName).type("type").id(Integer.toString(counter)).source(doc)
                    );
                }
                BulkResponse response = request.execute().actionGet();
                if (response.hasFailures()) {
                    System.err.println("--> failures...");
                }
                if (((i * batch) % 10000) == 0) {
                    System.out.println("--> Indexed " + (i * batch) + " took " + stopWatch.stop().lastTaskTime());
                    stopWatch.start();
                }
            }
            System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (count * (1 + nestedCount))) / stopWatch.totalTime().secondsFrac()));
        } catch (Exception e) {
            System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
            clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
            if (clusterHealthResponse.isTimedOut()) {
                System.err.println("--> Timed out waiting for cluster health");
View Full Code Here

            client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
                    .startObject("_parent").field("type", "parent").endObject()
                    .endObject().endObject()).execute().actionGet();
            Thread.sleep(5000);

            StopWatch stopWatch = new StopWatch().start();

            System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents");
            long ITERS = PARENT_COUNT / BATCH;
            int i = 1;
            int counter = 0;
            for (; i <= ITERS; i++) {
                BulkRequestBuilder request = client.prepareBulk();
                for (int j = 0; j < BATCH; j++) {
                    counter++;
                    request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
                            .source(parentSource(counter)));

                }
                BulkResponse response = request.execute().actionGet();
                if (response.hasFailures()) {
                    System.err.println("--> failures...");
                }
                if (((i * BATCH) % 10000) == 0) {
                    System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime());
                    stopWatch.start();
                }
            }

            int id = 0;
            for (i = 1; i <= PARENT_COUNT; i *= 2) {
                int parentId = 1;
                for (int j = 0; j < i; j++) {
                    client.prepareIndex(indexName, "child", Integer.toString(id++))
                            .setParent(Integer.toString(parentId++))
                            .setSource(childSource(i))
                            .execute().actionGet();
                }
            }

            System.out.println("--> Indexing took " + stopWatch.totalTime());
        } catch (Exception e) {
            System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
            ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
            if (clusterHealthResponse.isTimedOut()) {
                System.err.println("--> Timed out waiting for cluster health");
View Full Code Here

        }

        ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
        logger.info("closing ...");

        StopWatch stopWatch = new StopWatch("node_close");
        stopWatch.start("tribe");
        injector.getInstance(TribeService.class).close();
        stopWatch.stop().start("http");
        if (settings.getAsBoolean("http.enabled", true)) {
            injector.getInstance(HttpServer.class).close();
        }

        stopWatch.stop().start("rivers");
        injector.getInstance(RiversManager.class).close();

        stopWatch.stop().start("snapshot_service");
        injector.getInstance(SnapshotsService.class).close();
        stopWatch.stop().start("client");
        Releasables.close(injector.getInstance(Client.class));
        stopWatch.stop().start("indices_cluster");
        injector.getInstance(IndicesClusterStateService.class).close();
        stopWatch.stop().start("indices");
        injector.getInstance(IndicesFilterCache.class).close();
        injector.getInstance(IndicesFieldDataCache.class).close();
        injector.getInstance(IndexingMemoryController.class).close();
        injector.getInstance(IndicesTTLService.class).close();
        injector.getInstance(IndicesService.class).close();
        stopWatch.stop().start("routing");
        injector.getInstance(RoutingService.class).close();
        stopWatch.stop().start("cluster");
        injector.getInstance(ClusterService.class).close();
        stopWatch.stop().start("discovery");
        injector.getInstance(DiscoveryService.class).close();
        stopWatch.stop().start("monitor");
        injector.getInstance(MonitorService.class).close();
        stopWatch.stop().start("gateway");
        injector.getInstance(GatewayService.class).close();
        stopWatch.stop().start("search");
        injector.getInstance(SearchService.class).close();
        stopWatch.stop().start("rest");
        injector.getInstance(RestController.class).close();
        stopWatch.stop().start("transport");
        injector.getInstance(TransportService.class).close();
        stopWatch.stop().start("percolator_service");
        injector.getInstance(PercolatorService.class).close();

        for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
            stopWatch.stop().start("plugin(" + plugin.getName() + ")");
            injector.getInstance(plugin).close();
        }

        stopWatch.stop().start("script");
        injector.getInstance(ScriptService.class).close();

        stopWatch.stop().start("thread_pool");
        // TODO this should really use ThreadPool.terminate()
        injector.getInstance(ThreadPool.class).shutdown();
        try {
            injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            // ignore
        }
        stopWatch.stop().start("thread_pool_force_shutdown");
        try {
            injector.getInstance(ThreadPool.class).shutdownNow();
        } catch (Exception e) {
            // ignore
        }
        stopWatch.stop();

        if (logger.isTraceEnabled()) {
            logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
        }

        injector.getInstance(NodeEnvironment.class).close();
        injector.getInstance(PageCacheRecycler.class).close();
        Injectors.close(injector);
View Full Code Here

                false, existingFiles, recoveryStatus.state().getType(), recoveryStatus.recoveryId());

        try {
            logger.trace("[{}][{}] starting recovery from {}", request.shardId().index().name(), request.shardId().id(), request.sourceNode());

            StopWatch stopWatch = new StopWatch().start();
            recoveryStatus.setWaitingRecoveryThread(Thread.currentThread());

            RecoveryResponse recoveryResponse = transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
                @Override
                public RecoveryResponse newInstance() {
                    return new RecoveryResponse();
                }
            }).txGet();
            recoveryStatus.clearWaitingRecoveryThread(Thread.currentThread());
            stopWatch.stop();
            if (logger.isTraceEnabled()) {
                StringBuilder sb = new StringBuilder();
                sb.append('[').append(request.shardId().index().name()).append(']').append('[').append(request.shardId().id()).append("] ");
                sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(stopWatch.totalTime()).append("]\n");
                sb.append("   phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
                        .append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
                        .append("\n");
                sb.append("         : reusing_files   [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
                sb.append("   phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
                sb.append("         : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations")
                        .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
                        .append("\n");
                sb.append("   phase3: recovered [").append(recoveryResponse.phase3Operations).append("]").append(" transaction log operations")
                        .append(", took [").append(timeValueMillis(recoveryResponse.phase3Time)).append("]");
                logger.trace(sb.toString());
            } else if (logger.isDebugEnabled()) {
                logger.debug("{} recovery completed from [{}], took [{}]", request.shardId(), request.sourceNode(), stopWatch.totalTime());
            }
            // do this through ongoing recoveries to remove it from the collection
            onGoingRecoveries.markRecoveryAsDone(recoveryStatus.recoveryId());
        } catch (Throwable e) {
            if (logger.isTraceEnabled()) {
View Full Code Here

            final int[] values = new int[NUM_DOCS];
            for (int i = 0; i < NUM_DOCS; ++i) {
                values[i] = d.next();
            }
            System.out.println("Indexing " + NUM_DOCS + " documents into " + d.indexName());
            StopWatch stopWatch = new StopWatch().start();
            for (int i = 0; i < NUM_DOCS; ) {
                BulkRequestBuilder request = client.prepareBulk();
                for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
                    request.add(client.prepareIndex(d.indexName(), "values", Integer.toString(i)).setSource("v", values[i]));
                    ++i;
                }
                BulkResponse response = request.execute().actionGet();
                if (response.hasFailures()) {
                    System.err.println("--> failures...");
                    System.err.println(response.buildFailureMessage());
                }
                if ((i % 100000) == 0) {
                    System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
                    stopWatch.start();
                }
            }
            Arrays.sort(values);
            XContentBuilder builder = JsonXContent.contentBuilder().startObject();
            for (double percentile : PERCENTILES) {
View Full Code Here

        Thread.sleep(10000);
        try {
            client.admin().indices().create(createIndexRequest("test")).actionGet();

            StopWatch stopWatch = new StopWatch().start();

            System.out.println("--> Indexing [" + COUNT + "] ...");
            long ITERS = COUNT / BATCH;
            long i = 1;
            int counter = 0;
            for (; i <= ITERS; i++) {
                BulkRequestBuilder request = client.prepareBulk();
                for (int j = 0; j < BATCH; j++) {
                    counter++;

                    XContentBuilder builder = jsonBuilder().startObject();
                    builder.field("id", Integer.toString(counter));
                    builder.field("l_value", lValues[ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS)]);

                    builder.endObject();

                    request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
                            .source(builder));
                }
                BulkResponse response = request.execute().actionGet();
                if (response.hasFailures()) {
                    System.err.println("--> failures...");
                }
                if (((i * BATCH) % 100000) == 0) {
                    System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
                    stopWatch.start();
                }
            }
            System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
        } catch (Exception e) {
            System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
            ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
            if (clusterHealthResponse.isTimedOut()) {
                System.err.println("--> Timed out waiting for cluster health");
View Full Code Here

        // Total size of segment files that were able to be re-used
        long existingTotalSize = 0;
        final Store store = shard.store();
        store.incRef();
        try {
            StopWatch stopWatch = new StopWatch().start();
            final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
            for (String name : snapshot.getFiles()) {
                final StoreFileMetaData md = recoverySourceMetadata.get(name);
                if (md == null) {
                    logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                    throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " +
                            recoverySourceMetadata.asMap().size() + " files", name);
                }
            }
            // Generate a "diff" of all the identical, different, and missing
            // segment files on the target node, using the existing files on
            // the source node
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(new Store.MetadataSnapshot(request.existingFiles()));
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                            indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) {
                if (request.existingFiles().containsKey(md.name())) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                            indexName, shardId, request.targetNode(), md.name(), request.existingFiles().get(md.name()), md);
                } else {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                            indexName, shardId, request.targetNode(), md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;

            logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
                    indexName, shardId, request.targetNode(), response.phase1FileNames.size(),
                    new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

            RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(),
                    response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes,
                    response.phase1TotalSize, response.phase1ExistingTotalSize);
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest,
                    TransportRequestOptions.options().withTimeout(internalActionTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

            // This latch will be used to wait until all files have been transferred to the target node
            final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
            final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
            final AtomicReference<Throwable> corruptedEngine = new AtomicReference<>();
            int fileIndex = 0;
            ThreadPoolExecutor pool;
            for (final String name : response.phase1FileNames) {
                long fileSize = response.phase1FileSizes.get(fileIndex);

                // Files are split into two categories, files that are "small"
                // (under 5mb) and other files. Small files are transferred
                // using a separate thread pool dedicated to small files.
                //
                // The idea behind this is that while we are transferring an
                // older, large index, a user may create a new index, but that
                // index will not be able to recover until the large index
                // finishes, by using two different thread pools we can allow
                // tiny files (like segments for a brand new index) to be
                // recovered while ongoing large segment recoveries are
                // happening. It also allows these pools to be configured
                // separately.
                if (fileSize > recoverySettings.SMALL_FILE_CUTOFF_BYTES) {
                    pool = recoverySettings.concurrentStreamPool();
                } else {
                    pool = recoverySettings.concurrentSmallFileStreamPool();
                }

                pool.execute(new Runnable() {
                    @Override
                    public void run() {
                        store.incRef();
                        final StoreFileMetaData md = recoverySourceMetadata.get(name);
                        try (final IndexInput indexInput = store.directory().openInput(name, IOContext.READONCE)) {
                            final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                            final byte[] buf = new byte[BUFFER_SIZE];
                            boolean shouldCompressRequest = recoverySettings.compress();
                            if (CompressorFactory.isCompressed(indexInput)) {
                                shouldCompressRequest = false;
                            }

                            long len = indexInput.length();
                            long readCount = 0;
                            TransportRequestOptions requestOptions = TransportRequestOptions.options()
                                    .withCompress(shouldCompressRequest)
                                    .withType(TransportRequestOptions.Type.RECOVERY)
                                    .withTimeout(internalActionTimeout);

                            while (readCount < len) {
                                if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                    throw new IndexShardClosedException(shard.shardId());
                                }
                                int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                long position = indexInput.getFilePointer();

                                // Pause using the rate limiter, if desired, to throttle the recovery
                                if (recoverySettings.rateLimiter() != null) {
                                    recoverySettings.rateLimiter().pause(toRead);
                                }

                                indexInput.readBytes(buf, 0, toRead, false);
                                BytesArray content = new BytesArray(buf, 0, toRead);
                                readCount += toRead;
                                // Actually send the file chunk to the target node, waiting for it to complete
                                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK,
                                        new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, readCount == len),
                                        requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                            }
                        } catch (Throwable e) {
                            final Throwable corruptIndexException;
                            if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
                                if (store.checkIntegrity(md) == false) { // we are corrupted on the primary -- fail!
                                    logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                                    if (corruptedEngine.compareAndSet(null, corruptIndexException) == false) {
                                        // if we are not the first exception, add ourselves as suppressed to the main one:
                                        corruptedEngine.get().addSuppressed(e);
                                    }
                                } else { // corruption has happened on the way to replica
                                    RemoteTransportException exception = new RemoteTransportException("File corruption occured on recovery but checksums are ok", null);
                                    exception.addSuppressed(e);
                                    exceptions.add(0, exception); // last exception first
                                    logger.warn("{} File corruption on recovery {} local checksum OK",
                                            corruptIndexException, shard.shardId(), md);
                                }
                            } else {
                                exceptions.add(0, e); // last exceptions first
                            }
                        } finally {
                            try {
                                store.decRef();
                            } finally {
                                // Signify this file has completed by decrementing the latch
                                latch.countDown();
                            }
                        }
                    }
                });
                fileIndex++;
            }

            // Wait for all files that need to be transferred to finish transferring
            latch.await();
            if (corruptedEngine.get() != null) {
                throw corruptedEngine.get();
            } else {
                ExceptionsHelper.rethrowAndSuppress(exceptions);
            }

            Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
            // Send the CLEAN_FILES request, which takes all of the files that
            // were transferred and renames them from their temporary file
            // names to the actual file names. It also writes checksums for
            // the files after they have been renamed.
            //
            // Once the files have been renamed, any other files that are not
            // related to this recovery (out of date segments, for example)
            // are deleted
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
                    new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles),
                    TransportRequestOptions.options().withTimeout(internalActionTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
            response.phase1Time = stopWatch.totalTime().millis();
        } catch (Throwable e) {
            throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
        } finally {
            store.decRef();
        }
View Full Code Here

    public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
        if (shard.state() == IndexShardState.CLOSED) {
            throw new IndexShardClosedException(request.shardId());
        }
        logger.trace("{} recovery [phase2] to {}: start", request.shardId(), request.targetNode());
        StopWatch stopWatch = new StopWatch().start();
        // Send a request preparing the new shard's translog to receive
        // operations. This ensures the shard engine is started and disables
        // garbage collection (not the JVM's GC!) of tombstone deletes
        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
                new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()),
                TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
        stopWatch.stop();
        response.startTime = stopWatch.totalTime().millis();
        logger.trace("{} recovery [phase2] to {}: start took [{}]",
                request.shardId(), request.targetNode(), request.targetNode(), stopWatch.totalTime());


        logger.trace("{} recovery [phase2] to {}: updating current mapping to master", request.shardId(), request.targetNode());
        // Ensure that the mappings are synced with the master node
        updateMappingOnMaster();

        logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode());
        stopWatch = new StopWatch().start();
        // Send all the snapshot's translog operations to the target
        int totalOperations = sendSnapshot(snapshot);
        stopWatch.stop();
        logger.trace("{} recovery [phase2] to {}: took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime());
        response.phase2Time = stopWatch.totalTime().millis();
        response.phase2Operations = totalOperations;
    }
View Full Code Here

    public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
        if (shard.state() == IndexShardState.CLOSED) {
            throw new IndexShardClosedException(request.shardId());
        }
        logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", indexName, shardId, request.targetNode());
        StopWatch stopWatch = new StopWatch().start();

        // Send the translog operations to the target node
        int totalOperations = sendSnapshot(snapshot);

        // Send the FINALIZE request to the target node. The finalize request
        // clears unreferenced translog files, refreshes the engine now that
        // new segments are available, and enables garbage collection of
        // tombstone files. The shard is also moved to the POST_RECOVERY phase
        // during this time
        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE,
                new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
                TransportRequestOptions.options().withTimeout(internalActionLongTimeout),
                EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

        if (request.markAsRelocated()) {
            // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
            try {
                shard.relocated("to " + request.targetNode());
            } catch (IllegalIndexShardStateException e) {
                // we can ignore this exception since, on the other node, when it moved to phase3
                // it will also send shard started, which might cause the index shard we work against
                // to move be closed by the time we get to the the relocated method
            }
        }
        stopWatch.stop();
        logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]",
                indexName, shardId, request.targetNode(), stopWatch.totalTime());
        response.phase3Time = stopWatch.totalTime().millis();
        response.phase3Operations = totalOperations;
    }
View Full Code Here

TOP

Related Classes of org.elasticsearch.common.StopWatch$TaskInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.