Package org.elasticsearch.index.engine

Examples of org.elasticsearch.index.engine.FlushFailedEngineException


                        }

                        maybePruneDeletedTombstones();

                    } catch (Throwable t) {
                        throw new FlushFailedEngineException(shardId, t);
                    }
                }
            } else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) {
                try (InternalLock _ = readLock.acquire()) {
                    final IndexWriter indexWriter = currentIndexWriter();
                    if (onGoingRecoveries.get() > 0) {
                        throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
                    }

                    if (flushNeeded || flush.force()) {
                        flushNeeded = false;
                        try {
                            long translogId = translogIdGenerator.incrementAndGet();
                            translog.newTransientTranslog(translogId);
                            indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)));
                            indexWriter.commit();
                            // we need to refresh in order to clear older version values
                            refresh(new Refresh("version_table_flush").force(true));
                            // we need to move transient to current only after we refresh
                            // so items added to current will still be around for realtime get
                            // when tans overrides it
                            translog.makeTransientCurrent();

                        } catch (Throwable e) {
                            translog.revertTransient();
                            throw new FlushFailedEngineException(shardId, e);
                        }
                    }
                }

                // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving
                // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones:
                if (enableGcDeletes) {
                    pruneDeletedTombstones();
                }

            } else if (flush.type() == Flush.Type.COMMIT) {
                // note, its ok to just commit without cleaning the translog, its perfectly fine to replay a
                // translog on an index that was opened on a committed point in time that is "in the future"
                // of that translog
                try (InternalLock _ = readLock.acquire()) {
                    final IndexWriter indexWriter = currentIndexWriter();
                    // we allow to *just* commit if there is an ongoing recovery happening...
                    // its ok to use this, only a flush will cause a new translogId, and we are locked here from
                    // other flushes use flushLock
                    try {
                        long translogId = translog.currentId();
                        indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)));
                        indexWriter.commit();
                    } catch (Throwable e) {
                        throw new FlushFailedEngineException(shardId, e);
                    }
                }

                // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving
                // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones:
                if (enableGcDeletes) {
                    pruneDeletedTombstones();
                }

            } else {
                throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported");
            }

            // reread the last committed segment infos
            try (InternalLock _ = readLock.acquire()) {
                ensureOpen();
                readLastCommittedSegmentsInfo();
            } catch (Throwable e) {
                if (!closed) {
                    logger.warn("failed to read latest segment infos on flush", e);
                    if (Lucene.isCorruptionException(e)) {
                        throw new FlushFailedEngineException(shardId, e);
                    }
                }
            }
        } catch (FlushFailedEngineException ex) {
            maybeFailEngine(ex, "flush");
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.engine.FlushFailedEngineException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.