Package org.elasticsearch.common

Examples of org.elasticsearch.common.StopWatch$TaskInfo


        System.out.println(String.format("\nIndexed %s documents, %.2f per second in %s seconds", itemsIndexed, totalDocumentsPerSecond, totalTimeInSeconds));
        indexingMeter.mark(1);
    }

    private void reset() {
        sw = new StopWatch().start();
        startTimestamp = System.currentTimeMillis() / 1000;
        bulk = client.prepareBulk();
    }
View Full Code Here


            se.execute(compiled, vars);
        }

        final long ITER = 100000;

        StopWatch stopWatch = new StopWatch().start();
        for (long i = 0; i < ITER; i++) {
            se.execute(compiled, vars);
        }
        System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime());

        stopWatch = new StopWatch().start();
        ExecutableScript executableScript = se.executable(compiled, vars);
        for (long i = 0; i < ITER; i++) {
            executableScript.run();
        }
        System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime());

        stopWatch = new StopWatch().start();
        executableScript = se.executable(compiled, vars);
        for (long i = 0; i < ITER; i++) {
            for (Map.Entry<String, Object> entry : vars.entrySet()) {
                executableScript.setNextVar(entry.getKey(), entry.getValue());
            }
            executableScript.run();
        }
        System.out.println("Executable (vars) Took: " + stopWatch.stop().lastTaskTime());
    }
View Full Code Here

                    Thread.sleep(Math.round(waitSeconds * 1000));
                } catch (InterruptedException ex) {
                    break;
                }
            }
            StopWatch queryWatch = new StopWatch().start();
            int currentResults = rsp.doScoll();
            if (currentResults == 0)
                break;

            MySearchHits res = callback(rsp.hits());
            if (res == null)
                break;
            queryWatch.stop();
            StopWatch updateWatch = new StopWatch().start();
            failed += bulkUpdate(res, newIndex, newType, withVersion).size();
            if (flushEnabled)
                client.admin().indices().flush(new FlushRequest(newIndex)).actionGet();

            updateWatch.stop();
            collectedResults += currentResults;
            logger.debug("Progress " + collectedResults + "/" + total
                    + ". Time of update:" + updateWatch.totalTime().getSeconds() + " query:"
                    + queryWatch.totalTime().getSeconds() + " failed:" + failed);
        }
        String str = "found " + total + ", collected:" + collectedResults
                + ", transfered:" + (float) rsp.bytes() / (1 << 20) + "MB";
        if (failed > 0)
View Full Code Here

            tweets = createFake();
        else
            tweets = createReal();

        logger.info("Now feeding");
        StopWatch sw = new StopWatch().start();
        tws.bulkUpdate(tweets, tws.getIndexName());
        long time = sw.stop().totalTime().seconds();
        if (time == 0)
            time = 1;
        logger.info("Feeded " + tweets.size() + " users => " + tweets.size() / time + " users/sec");
    }
View Full Code Here

            try {
                long total = rsp.hits().totalHits();
                int collectedResults = 0;
                while (true) {
                    StopWatch queryWatch = new StopWatch().start();
                    rsp = client.prepareSearchScroll(rsp.scrollId()).
                            setScroll(TimeValue.timeValueMinutes(keepTime)).execute().actionGet();
                    long currentResults = rsp.hits().hits().length;
                    if (currentResults == 0)
                        break;

                    queryWatch.stop();
                    Collection<T> objs = createObj.collectObjects(rsp);
                    StopWatch updateWatch = new StopWatch().start();
                    int failed = bulkUpdate(objs, intoIndex, false, false).size();
                    // trying to enable flushing to avoid memory issues on the server side?
                    flush(intoIndex);
                    updateWatch.stop();
                    collectedResults += currentResults;
                    logger.info("Progress " + collectedResults + "/" + total + " fromIndex="
                            + fromIndex + " update:" + updateWatch.totalTime().getSeconds() + " query:" + queryWatch.totalTime().getSeconds() + " failed:" + failed);
                }
                logger.info("Finished copying of index:" + fromIndex + ". Total:" + total + " collected:" + collectedResults);
            } catch (Exception ex) {
//                throw new RuntimeException(ex);
                logger.error("Failed to copy data from index " + fromIndex + " into " + intoIndex + ".", ex);
View Full Code Here

            while (!closed) {
                DiscoveryNode node = clusterService.localNode();
                boolean isClusterStarted = clusterService.lifecycleState().equals(Lifecycle.State.STARTED);

                if (isClusterStarted && node != null && node.isMasterNode()) {
                    StopWatch sw = new StopWatch().start();
                    suggestRefreshAction.execute(new SuggestRefreshRequest()).actionGet();
                    logger.info("Suggest update took [{}], next update in [{}]", sw.stop().totalTime(), suggestRefreshInterval);
                } else {
                    if (node != null) {
                        logger.debug("[{}]/[{}] is not master node, not triggering update", node.getId(), node.getName());
                    }
                }
View Full Code Here

            se.execute(compiled, vars);
        }

        final long ITER = 100000;

        StopWatch stopWatch = new StopWatch().start();
        for (long i = 0; i < ITER; i++) {
            se.execute(compiled, vars);
        }
        System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime());

        stopWatch = new StopWatch().start();
        ExecutableScript executableScript = se.executable(compiled, vars);
        for (long i = 0; i < ITER; i++) {
            executableScript.run();
        }
        System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime());

        stopWatch = new StopWatch().start();
        executableScript = se.executable(compiled, vars);
        for (long i = 0; i < ITER; i++) {
            for (Map.Entry<String, Object> entry : vars.entrySet()) {
                executableScript.setNextVar(entry.getKey(), entry.getValue());
            }
            executableScript.run();
        }
        System.out.println("Executable (vars) Took: " + stopWatch.stop().lastTaskTime());
    }
View Full Code Here

    }

    public void phase1() throws Exception {
        logger.debug("[{}][{}] recovery [phase1] to {}: start",
            request.shardId().index().name(), request.shardId().id(), request.targetNode().getName());
        StopWatch stopWatch = new StopWatch().start();
        blobTransferTarget.startRecovery();
        blobTransferTarget.createActiveTransfersSnapshot();
        sendStartRecoveryRequest();

        final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
        try {
            syncVarFiles(lastException);
        } catch (InterruptedException ex) {
            throw new ElasticsearchException("blob recovery phase1 failed", ex);
        }

        Exception exception = lastException.get();
        if (exception != null) {
            throw exception;
        }

        /**
         * as soon as the recovery starts the target node will receive PutChunkReplicaRequests
         * the target node will then request the bytes it is missing from the source node
         * (it is missing bytes from PutChunk/StartBlob requests that happened before the recovery)
         * here we need to block so that the target node has enough time to request the head chunks
         *
         * e.g.
         *      Target Node receives Chunk X with bytes 10-19
         *      Target Node requests bytes 0-9 from Source Node
         *      Source Node sends bytes 0-9
         *      Source Node sets transferTakenOver
         */

        blobTransferTarget.waitForGetHeadRequests(GET_HEAD_TIMEOUT, TimeUnit.SECONDS);
        blobTransferTarget.createActivePutHeadChunkTransfersSnapshot();

        /**
         * After receiving a getHeadRequest the source node starts to send HeadChunks to the target
         * wait for all PutHeadChunk-Runnables to finish before ending the recovery.
         */
        blobTransferTarget.waitUntilPutHeadChunksAreFinished();
        sendFinalizeRecoveryRequest();

        blobTransferTarget.stopRecovery();
        stopWatch.stop();
        logger.debug("[{}][{}] recovery [phase1] to {}: took [{}]",
            request.shardId().index().name(), request.shardId().id(), request.targetNode().getName(),
            stopWatch.totalTime());
    }
View Full Code Here

                long existingTotalSize = 0;
                try {
                    if (blobRecoveryHandler != null) {
                        blobRecoveryHandler.phase1();
                    }
                    StopWatch stopWatch = new StopWatch().start();

                    for (String name : snapshot.getFiles()) {
                        StoreFileMetaData md = shard.store().getMetadata().get(name);
                        boolean useExisting = false;
                        if (request.existingFiles().containsKey(name)) {
                            // we don't compute checksum for segments, so always recover them
                            if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                                response.phase1ExistingFileNames.add(name);
                                response.phase1ExistingFileSizes.add(md.length());
                                existingTotalSize += md.length();
                                useExisting = true;
                                if (logger.isTraceEnabled()) {
                                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
                                }
                            }
                        }
                        if (!useExisting) {
                            if (request.existingFiles().containsKey(name)) {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
                            } else {
                                logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
                            }
                            response.phase1FileNames.add(name);
                            response.phase1FileSizes.add(md.length());
                        }
                        totalSize += md.length();
                    }
                    response.phase1TotalSize = totalSize;
                    response.phase1ExistingTotalSize = existingTotalSize;

                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                            response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                    final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                    final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                    for (final String name : response.phase1FileNames) {
                        recoverySettings.concurrentStreamPool().execute(new Runnable() {
                            @Override
                            public void run() {
                                IndexInput indexInput = null;
                                try {
                                    final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                                    byte[] buf = new byte[BUFFER_SIZE];
                                    StoreFileMetaData md = shard.store().getMetadata().get(name);
                                    // TODO: maybe use IOContext.READONCE?
                                    indexInput = shard.store().directory().openInput(name, IOContext.READ);
                                    boolean shouldCompressRequest = recoverySettings.compress();
                                    if (CompressorFactory.isCompressed(indexInput)) {
                                        shouldCompressRequest = false;
                                    }

                                    long len = indexInput.length();
                                    long readCount = 0;
                                    while (readCount < len) {
                                        if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                            throw new IndexShardClosedException(shard.shardId());
                                        }
                                        int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                        long position = indexInput.getFilePointer();

                                        if (recoverySettings.rateLimiter() != null) {
                                            recoverySettings.rateLimiter().pause(toRead);
                                        }

                                        indexInput.readBytes(buf, 0, toRead, false);
                                        BytesArray content = new BytesArray(buf, 0, toRead);
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content),
                                                TransportRequestOptions.options().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                        readCount += toRead;
                                    }
                                } catch (Exception e) {
                                    lastException.set(e);
                                } finally {
                                    if (indexInput != null) {
                                        try {
                                            indexInput.close();
                                        } catch (IOException e) {
                                            // ignore
                                        }
                                    }
                                    latch.countDown();
                                }
                            }
                        });
                    }

                    latch.await();

                    if (lastException.get() != null) {
                        throw lastException.get();
                    }

                    // now, set the clean files request
                    Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                    stopWatch.stop();
                    logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                    response.phase1Time = stopWatch.totalTime().millis();
                } catch (Throwable e) {
                    throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
                }
            }

            @Override
            public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                stopWatch.stop();
                response.startTime = stopWatch.totalTime().millis();
                logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());

                logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                stopWatch = new StopWatch().start();
                int totalOperations = sendSnapshot(snapshot);
                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase2Time = stopWatch.totalTime().millis();
                response.phase2Operations = totalOperations;
            }

            @Override
            public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
                StopWatch stopWatch = new StopWatch().start();
                int totalOperations = sendSnapshot(snapshot);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                if (request.markAsRelocated()) {
                    // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                    try {
                        shard.relocated("to " + request.targetNode());
                    } catch (IllegalIndexShardStateException e) {
                        // we can ignore this exception since, on the other node, when it moved to phase3
                        // it will also send shard started, which might cause the index shard we work against
                        // to move be closed by the time we get to the the relocated method
                    }
                }
                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase3Time = stopWatch.totalTime().millis();
                response.phase3Operations = totalOperations;
            }

            private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
                int ops = 0;
View Full Code Here

                } while (!added);
            }
            String[] sValues = uniqueTerms.toArray(String.class);
            uniqueTerms = null;

            StopWatch stopWatch = new StopWatch().start();

            System.out.println("--> Indexing [" + COUNT + "] ...");
            long ITERS = COUNT / BATCH;
            long i = 1;
            int counter = 0;
            for (; i <= ITERS; i++) {
                BulkRequestBuilder request = client.prepareBulk();
                for (int j = 0; j < BATCH; j++) {
                    counter++;

                    XContentBuilder builder = jsonBuilder().startObject();
                    builder.field("id", Integer.toString(counter));
                    final String sValue = sValues[ThreadLocalRandom.current().nextInt(sValues.length)];
                    final long lValue = lValues[ThreadLocalRandom.current().nextInt(lValues.length)];
                    builder.field("s_value", sValue);
                    builder.field("l_value", lValue);
                    builder.field("s_value_dv", sValue);
                    builder.field("l_value_dv", lValue);

                    for (String field : new String[] {"sm_value", "sm_value_dv"}) {
                        builder.startArray(field);
                        for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
                            builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
                        }
                        builder.endArray();
                    }

                    for (String field : new String[] {"lm_value", "lm_value_dv"}) {
                        builder.startArray(field);
                        for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
                            builder.value(lValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
                        }
                        builder.endArray();
                    }

                    builder.endObject();

                    request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
                            .source(builder));
                }
                BulkResponse response = request.execute().actionGet();
                if (response.hasFailures()) {
                    System.err.println("--> failures...");
                }
                if (((i * BATCH) % 10000) == 0) {
                    System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
                    stopWatch.start();
                }
            }
            System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
        } catch (Exception e) {
            System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
            ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
            if (clusterHealthResponse.isTimedOut()) {
                System.err.println("--> Timed out waiting for cluster health");
View Full Code Here

TOP

Related Classes of org.elasticsearch.common.StopWatch$TaskInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.