Skip to content

Commit

Permalink
Allow _update and upsert to read from the transaction log (#29264)
Browse files Browse the repository at this point in the history
We historically removed reading from the transaction log to get consistent
results from _GET calls. There was also the motivation that the read-modify-update
principle we apply should not be hidden from the user. We still agree on the fact
that we should not hide these aspects but the impact on updates is quite significant
especially if the same documents is updated before it's written to disk and made serachable.

This change adds back the ability to read from the transaction log but only for update calls.
Calls to the _GET API will always do a refresh if necessary to return consistent results ie.
if stored fields or DocValues Fields are requested.

Closes #26802
  • Loading branch information
s1monw committed Mar 28, 2018
1 parent c3fdf8f commit 13e19e7
Show file tree
Hide file tree
Showing 21 changed files with 602 additions and 45 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -112,13 +112,13 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId
if (uidTerm == null) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
result = context.indexShard().get(new Engine.Get(false, request.type(), request.id(), uidTerm));
result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm));
if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreContext ctx : context.rescore()) {
Rescorer rescorer = ctx.rescorer();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup;

import java.io.IOException;
Expand All @@ -71,9 +70,8 @@ public UpdateHelper(Settings settings, ScriptService scriptService) {
* Prepares an update request by converting it into an index or delete request or an update response (no action).
*/
public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) {
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME},
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
final GetResult getResult = indexShard.getService().getForUpdate(request.type(), request.id(), request.version(),
request.versionType());
return prepare(indexShard.shardId(), request, getResult, nowInMillis);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context)
if (versions.advanceExact(docID) == false) {
throw new IllegalArgumentException("Document [" + docID + "] misses the [" + VersionFieldMapper.NAME + "] field");
}
return new DocIdAndVersion(docID, versions.longValue(), context);
return new DocIdAndVersion(docID, versions.longValue(), context.reader(), context.docBase);
} else {
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package org.elasticsearch.common.lucene.uid;

import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.Term;
Expand Down Expand Up @@ -97,12 +98,14 @@ private VersionsAndSeqNoResolver() {
public static class DocIdAndVersion {
public final int docId;
public final long version;
public final LeafReaderContext context;
public final LeafReader reader;
public final int docBase;

DocIdAndVersion(int docId, long version, LeafReaderContext context) {
public DocIdAndVersion(int docId, long version, LeafReader reader, int docBase) {
this.docId = docId;
this.version = version;
this.context = context;
this.reader = reader;
this.docBase = docBase;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1232,14 +1232,16 @@ public static class Get {
private final boolean realtime;
private final Term uid;
private final String type, id;
private final boolean readFromTranslog;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;

public Get(boolean realtime, String type, String id, Term uid) {
public Get(boolean realtime, boolean readFromTranslog, String type, String id, Term uid) {
this.realtime = realtime;
this.type = type;
this.id = id;
this.uid = uid;
this.readFromTranslog = readFromTranslog;
}

public boolean realtime() {
Expand Down Expand Up @@ -1275,6 +1277,10 @@ public Get versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}

public boolean isReadFromTranslog() {
return readFromTranslog;
}
}

public static class GetResult implements Releasable {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@
import org.elasticsearch.threadpool.ThreadPool;

import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
Expand Down Expand Up @@ -145,6 +146,7 @@ public class InternalEngine extends Engine {
* being indexed/deleted.
*/
private final AtomicLong writingBytes = new AtomicLong();
private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false);

@Nullable
private final String historyUUID;
Expand Down Expand Up @@ -558,6 +560,27 @@ public GetResult get(Get get, BiFunction<String, SearcherScope, Searcher> search
throw new VersionConflictEngineException(shardId, get.type(), get.id(),
get.versionType().explainConflictForReads(versionValue.version, get.version()));
}
if (get.isReadFromTranslog()) {
// this is only used for updates - API _GET calls will always read form a reader for consistency
// the update call doesn't need the consistency since it's source only + _parent but parent can go away in 7.0
if (versionValue.getLocation() != null) {
try {
Translog.Operation operation = translog.readOperation(versionValue.getLocation());
if (operation != null) {
// in the case of a already pruned translog generation we might get null here - yet very unlikely
TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig
.getIndexSettings().getIndexVersionCreated());
return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader)),
new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0));
}
} catch (IOException e) {
maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event
throw new EngineException(shardId, "failed to read operation from translog", e);
}
} else {
trackTranslogLocation.set(true);
}
}
refresh("realtime_get", SearcherScope.INTERNAL);
}
scope = SearcherScope.INTERNAL;
Expand Down Expand Up @@ -790,6 +813,10 @@ public IndexResult index(Index index) throws IOException {
}
indexResult.setTranslogLocation(location);
}
if (plan.indexIntoLucene && indexResult.hasFailure() == false) {
versionMap.maybePutUnderLock(index.uid().bytes(),
getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation()));
}
if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo());
}
Expand Down Expand Up @@ -916,8 +943,6 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan)
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
index(index.docs(), indexWriter);
}
versionMap.maybePutUnderLock(index.uid().bytes(),
new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm()));
return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
} catch (Exception ex) {
if (indexWriter.getTragicException() == null) {
Expand All @@ -941,6 +966,13 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan)
}
}

private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) {
if (location != null && trackTranslogLocation.get()) {
return new TranslogVersionValue(location, version, seqNo, term);
}
return new VersionValue(version, seqNo, term);
}

/**
* returns true if the indexing operation may have already be processed by this engine.
* Note that it is OK to rarely return true even if this is not the case. However a `false`
Expand Down
Loading

0 comments on commit 13e19e7

Please sign in to comment.