| Chunk |
|---|
| Conflicting content |
|---|
try {
if (skipInitialTranslogRecovery) {
// make sure we point at the latest translog from now on..
<<<<<<< HEAD
commitIndexWriter(writer, translog.currentId(), lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID));
=======
commitIndexWriter(writer, translog);
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
} else {
recoverFromTranslog(engineConfig, translogGeneration);
} |
| Solution content |
|---|
try {
if (skipInitialTranslogRecovery) {
// make sure we point at the latest translog from now on..
commitIndexWriter(writer, translog, lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID));
} else {
recoverFromTranslog(engineConfig, translogGeneration);
} |
| File |
|---|
| InternalEngine.java |
| Developer's decision |
|---|
| Manual |
| Kind of conflict |
|---|
| Other |
| Chunk |
|---|
| Conflicting content |
|---|
// flush if we recovered something or if we have references to older translogs
// note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length.
<<<<<<< HEAD
if (opsRecovered > 0) {
=======
if (opsRecovered > 0 || translog.isCurrent(translogGeneration) == false) {
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration());
flush(true, true); |
| Solution content |
|---|
// flush if we recovered something or if we have references to older translogs
// note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length.
if (opsRecovered > 0) {
logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration());
flush(true, true); |
| File |
|---|
| InternalEngine.java |
| Developer's decision |
|---|
| Version 1 |
| Kind of conflict |
|---|
| Other |
| Chunk |
|---|
| Conflicting content |
|---|
}
@Override
<<<<<<< HEAD
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException {
// best effort attempt before we acquire locks
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
logger.trace("starting sync commit [{}]", syncId);
final long translogId = translog.currentId();
commitIndexWriter(indexWriter, translogId, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
return SyncedFlushResult.SUCCESS;
} catch (IOException ex) {
maybeFailEngine("sync commit", ex);
throw new EngineException(shardId, "failed to sync commit", ex);
}
}
@Override
public CommitId flush() throws EngineException {
return flush(true, false, false);
}
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
return flush(true, force, waitIfOngoing);
}
private CommitId flush(boolean commitTranslog, boolean force, boolean waitIfOngoing) throws EngineException {
=======
public void flush() throws EngineException {
flush(false, false);
}
@Override
public void flush(boolean force, boolean waitIfOngoing) throws EngineException {
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
ensureOpen();
final byte[] newCommitId;
/* |
| Solution content |
|---|
}
@Override
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException {
// best effort attempt before we acquire locks
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
logger.trace("starting sync commit [{}]", syncId);
commitIndexWriter(indexWriter, translog, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
return SyncedFlushResult.SUCCESS;
} catch (IOException ex) {
maybeFailEngine("sync commit", ex);
throw new EngineException(shardId, "failed to sync commit", ex);
}
}
@Override
public CommitId flush() throws EngineException {
return flush(false, false);
}
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
ensureOpen();
final byte[] newCommitId;
/* |
| File |
|---|
| InternalEngine.java |
| Developer's decision |
|---|
| Manual |
| Kind of conflict |
|---|
| Annotation |
| Method declaration |
| Method signature |
| Chunk |
|---|
| Conflicting content |
|---|
}
}
<<<<<<< HEAD
/**
* Extended SearcherFactory that warms the segments if needed when acquiring a new searcher
*/
class SearchFactory extends EngineSearcherFactory {
=======
/** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */
final static class SearchFactory extends EngineSearcherFactory {
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
private final IndicesWarmer warmer;
private final ShardId shardId; |
| Solution content |
|---|
}
}
/** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */
final static class SearchFactory extends EngineSearcherFactory {
private final IndicesWarmer warmer;
private final ShardId shardId; |
| File |
|---|
| InternalEngine.java |
| Developer's decision |
|---|
| Version 2 |
| Kind of conflict |
|---|
| Class signature |
| Comment |
| Chunk |
|---|
| Conflicting content |
|---|
}
}
<<<<<<< HEAD
private void commitIndexWriter(IndexWriter writer, long translogId, String syncId) throws IOException {
try {
logger.trace("committing writer with translog id [{}] and sync id [{}]", translogId, syncId);
Map |
| Solution content |
|---|
}
}
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
try {
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
logger.trace("committing writer with translog id [{}] and sync id [{}] ", translogGeneration.translogFileGeneration, syncId);
Map |
| File |
|---|
| InternalEngine.java |
| Developer's decision |
|---|
| Manual |
| Kind of conflict |
|---|
| Method signature |
| Chunk |
|---|
| Conflicting content |
|---|
}
public void markAsInactive() {
<<<<<<< HEAD
updateBufferSize(EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER, Translog.INACTIVE_SHARD_TRANSLOG_BUFFER);
indicesLifecycle.onShardInactive(this);
=======
updateBufferSize(EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER, TranslogConfig.INACTIVE_SHARD_TRANSLOG_BUFFER);
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
}
public final boolean isFlushOnClose() { |
| Solution content |
|---|
}
public void markAsInactive() {
updateBufferSize(EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER, TranslogConfig.INACTIVE_SHARD_TRANSLOG_BUFFER);
indicesLifecycle.onShardInactive(this);
}
public final boolean isFlushOnClose() { |
| File |
|---|
| IndexShard.java |
| Developer's decision |
|---|
| Combination |
| Kind of conflict |
|---|
| Method invocation |
| Chunk |
|---|
| Conflicting content |
|---|
import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.CancellableThreads.Interruptable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; <<<<<<< HEAD import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.gateway.CorruptStateException; import org.elasticsearch.index.IndexService; ======= >>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4 import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; |
| Solution content |
|---|
import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.CancellableThreads.Interruptable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; |
| File |
|---|
| RecoverySourceHandler.java |
| Developer's decision |
|---|
| Version 2 |
| Kind of conflict |
|---|
| Import |
| Chunk |
|---|
| Conflicting content |
|---|
recoveredOps.incrementAndGet();
}
}
<<<<<<< HEAD
=======
public void testRecoverFromForeignTranslog() throws IOException {
boolean canHaveDuplicates = true;
boolean autoGeneratedId = true;
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
engine.create(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1l));
}
engine.refresh("test");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
if (directory != null) {
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
// this so we have to disable the check explicitly
directory.setPreventDoubleWrite(false);
}
Translog.TranslogGeneration generation = engine.getTranslog().getGeneration();
engine.close();
Translog translog = new Translog(new TranslogConfig(shardId, createTempDir(), ImmutableSettings.EMPTY, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool));
translog.add(new Translog.Create("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8"))));
assertEquals(generation.translogFileGeneration, translog.currentFileGeneration());
translog.close();
EngineConfig config = engine.config();
Path translogPath = config.getTranslogConfig().getTranslogPath();
/* create a TranslogConfig that has been created with a different UUID */
TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettingsService()
, null, store, createSnapshotDeletionPolicy(), createMergePolicy(), config.getMergeScheduler(),
config.getAnalyzer(), config.getSimilarity(), new CodecService(shardId.index()), config.getFailedEngineListener()
, config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
try {
new InternalEngine(brokenConfig, false);
fail("translog belongs to a different engine");
} catch (EngineCreationFailureException ex) {
}
engine = createEngine(store, primaryTranslogDir); // and recover again!
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
}
>>>>>>> 84b24e130eb0d4bf950f81d6915fe23bbc33bde4
} |
| Solution content |
|---|
}
recoveredOps.incrementAndGet();
}
}
public void testRecoverFromForeignTranslog() throws IOException {
boolean canHaveDuplicates = true;
boolean autoGeneratedId = true;
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
engine.create(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1l));
}
engine.refresh("test");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
if (directory != null) {
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
// this so we have to disable the check explicitly
directory.setPreventDoubleWrite(false);
}
Translog.TranslogGeneration generation = engine.getTranslog().getGeneration();
engine.close();
Translog translog = new Translog(new TranslogConfig(shardId, createTempDir(), ImmutableSettings.EMPTY, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool));
translog.add(new Translog.Create("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8"))));
assertEquals(generation.translogFileGeneration, translog.currentFileGeneration());
translog.close();
EngineConfig config = engine.config();
/* create a TranslogConfig that has been created with a different UUID */
TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettingsService()
, null, store, createSnapshotDeletionPolicy(), createMergePolicy(), config.getMergeScheduler(),
config.getAnalyzer(), config.getSimilarity(), new CodecService(shardId.index()), config.getFailedEngineListener()
, config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
try {
new InternalEngine(brokenConfig, false);
fail("translog belongs to a different engine");
} catch (EngineCreationFailureException ex) {
}
engine = createEngine(store, primaryTranslogDir); // and recover again!
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
}
} |
| File |
|---|
| InternalEngineTests.java |
| Developer's decision |
|---|
| Combination |
| Kind of conflict |
|---|
| Method declaration |