Package org.elasticsearch.index.store

Examples of org.elasticsearch.index.store.StoreFileMetaData


                    if (snapshotStatus.aborted()) {
                        logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
                        throw new IndexShardSnapshotFailedException(shardId, "Aborted");
                    }
                    logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
                    final StoreFileMetaData md = metadata.get(fileName);
                    boolean snapshotRequired = false;
                    BlobStoreIndexShardSnapshot.FileInfo fileInfo = snapshots.findPhysicalIndexFile(fileName);
                    try {
                        // in 1.3.3 we added additional hashes for .si / segments_N files
                        // to ensure we don't double the space in the repo since old snapshots
                        // don't have this hash we try to read that hash from the blob store
                        // in a bwc compatible way.
                        maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
                    catch (Throwable e) {
                        logger.warn("{} Can't calculate hash from blob for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata());
                    }
                    if (fileInfo == null || !fileInfo.isSame(md) || !snapshotFileExistsInBlobs(fileInfo, blobs)) {
                        // commit point file does not exists in any commit point, or has different length, or does not fully exists in the listed blobs
                        snapshotRequired = true;
                    }

                    if (snapshotRequired) {
                        indexNumberOfFiles++;
                        indexTotalFilesSize += md.length();
                        // create a new FileInfo
                        BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize);
                        indexCommitPointFiles.add(snapshotFileInfo);
                        filesToSnapshot.add(snapshotFileInfo);
                    } else {
View Full Code Here


                        throw new ElasticsearchParseException("unexpected token  [" + token + "]");
                    }
                }
            }
            // TODO: Verify???
            return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize);
        }
View Full Code Here

     * This is a BWC layer to ensure we update the snapshots metdata with the corresponding hashes before we compare them.
     * The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
     * comparison of the files on a per-segment / per-commit level.
     */
    private static final void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Throwable {
        final StoreFileMetaData metadata;
        if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
            if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
                // we have a hash - check if our repo has a hash too otherwise we have
                // to calculate it.
                // we might have multiple parts even though the file is small... make sure we read all of it.
                try (final InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
                    BytesRefBuilder builder = new BytesRefBuilder();
View Full Code Here

        targetNode = DiscoveryNode.readNode(in);
        markAsRelocated = in.readBoolean();
        int size = in.readVInt();
        existingFiles = Maps.newHashMapWithExpectedSize(size);
        for (int i = 0; i < size; i++) {
            StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
            existingFiles.put(md.name(), md);
        }
        if (in.getVersion().onOrAfter(Version.V_1_2_2)) {
            recoveryType = RecoveryState.Type.fromId(in.readByte());
        }
    }
View Full Code Here

        store.incRef();
        try {
            StopWatch stopWatch = new StopWatch().start();
            final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
            for (String name : snapshot.getFiles()) {
                final StoreFileMetaData md = recoverySourceMetadata.get(name);
                if (md == null) {
                    logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                    throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " +
                            recoverySourceMetadata.asMap().size() + " files", name);
                }
            }
            // Generate a "diff" of all the identical, different, and missing
            // segment files on the target node, using the existing files on
            // the source node
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(new Store.MetadataSnapshot(request.existingFiles()));
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                            indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) {
                if (request.existingFiles().containsKey(md.name())) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                            indexName, shardId, request.targetNode(), md.name(), request.existingFiles().get(md.name()), md);
                } else {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                            indexName, shardId, request.targetNode(), md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;

            logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
                    indexName, shardId, request.targetNode(), response.phase1FileNames.size(),
                    new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

            RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(),
                    response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes,
                    response.phase1TotalSize, response.phase1ExistingTotalSize);
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest,
                    TransportRequestOptions.options().withTimeout(internalActionTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

            // This latch will be used to wait until all files have been transferred to the target node
            final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
            final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
            final AtomicReference<Throwable> corruptedEngine = new AtomicReference<>();
            int fileIndex = 0;
            ThreadPoolExecutor pool;
            for (final String name : response.phase1FileNames) {
                long fileSize = response.phase1FileSizes.get(fileIndex);

                // Files are split into two categories, files that are "small"
                // (under 5mb) and other files. Small files are transferred
                // using a separate thread pool dedicated to small files.
                //
                // The idea behind this is that while we are transferring an
                // older, large index, a user may create a new index, but that
                // index will not be able to recover until the large index
                // finishes, by using two different thread pools we can allow
                // tiny files (like segments for a brand new index) to be
                // recovered while ongoing large segment recoveries are
                // happening. It also allows these pools to be configured
                // separately.
                if (fileSize > recoverySettings.SMALL_FILE_CUTOFF_BYTES) {
                    pool = recoverySettings.concurrentStreamPool();
                } else {
                    pool = recoverySettings.concurrentSmallFileStreamPool();
                }

                pool.execute(new Runnable() {
                    @Override
                    public void run() {
                        store.incRef();
                        final StoreFileMetaData md = recoverySourceMetadata.get(name);
                        try (final IndexInput indexInput = store.directory().openInput(name, IOContext.READONCE)) {
                            final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                            final byte[] buf = new byte[BUFFER_SIZE];
                            boolean shouldCompressRequest = recoverySettings.compress();
                            if (CompressorFactory.isCompressed(indexInput)) {
View Full Code Here

    }

    public void getStoreMetadata(Map<String, Object> response, Store.MetadataSnapshot metadata) {
        List<Map<String, Object>> result = new ArrayList();
        for (String name : metadata.asMap().keySet()) {
            StoreFileMetaData metaData = metadata.get(name);
            Map<String, Object> info = new HashMap();
            info.put("name", name);
            info.put("length", metaData.length());
            info.put("checksum", metaData.checksum() );
            info.put("function", getFileFunction(name));
            result.add(info);
        }
        response.put("store", result);
    }
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.store.StoreFileMetaData

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.