Projects >> lucene-solr >>e578a2220ee596a18930fcdcbde672d749397253

Chunk
Conflicting content
 * limitations under the License.
 */

<<<<<<< HEAD
=======
import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Bits;  // javadocs
import org.apache.lucene.util.IOUtils;

>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
Solution content
 * limitations under the License.
 */

import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
File
IndexReader.java
Developer's decision
Version 1
Kind of conflict
Comment
Import
Chunk
Conflicting content
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.atomic.AtomicInteger;
<<<<<<< HEAD

import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document2StoredFieldVisitor;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Bits; // javadocs
import org.apache.lucene.util.IOUtils;
=======
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f

/**
 IndexReader is an abstract class, providing an interface for accessing a
Solution content
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document2StoredFieldVisitor;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Bits; // javadocs
import org.apache.lucene.util.IOUtils;

/**
 IndexReader is an abstract class, providing an interface for accessing a
File
IndexReader.java
Developer's decision
Version 1
Kind of conflict
Comment
Import
Chunk
Conflicting content
        "a c e a b c"
      };
      for (int j = 0; j < docs.length; j++) {
<<<<<<< HEAD
        Document2 d = writer.newDocument();
        d.addLargeText("contents", docs[j]);
        d.addInt("id", j);
=======
        Document d = new Document();
        d.add(newTextField("contents", docs[j], Field.Store.YES));
        d.add(new IntField("id", j, Field.Store.NO));
        d.add(new NumericDocValuesField("id", j));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
        writer.addDocument(d);
      }
      writer.close();
Solution content
        "a c e a b c"
        Document2 d = writer.newDocument();
      };
      for (int j = 0; j < docs.length; j++) {
        d.addLargeText("contents", docs[j]);
        d.addInt("id", j);
        writer.addDocument(d);
      }
      writer.close();
File
TestSearch.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.junit.Test;
<<<<<<< HEAD
import com.carrotsearch.randomizedtesting.annotations.Repeat;
=======

>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import com.carrotsearch.randomizedtesting.generators.RandomInts;

public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTestCase {
Solution content
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.junit.Test;
import com.carrotsearch.randomizedtesting.generators.RandomInts;

public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTestCase {
File
TestCompressingStoredFieldsFormat.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      fail(new String(t.bytesLog.toString("UTF-8")));
    }
  }
<<<<<<< HEAD
  
  /** testThreadInterruptDeadlock but with 2 indexer threads */
  public void testTwoThreadsInterruptDeadlock() throws Exception {
    IndexerThreadInterrupt t1 = new IndexerThreadInterrupt(1);
    t1.setDaemon(true);
    t1.start();
    
    IndexerThreadInterrupt t2 = new IndexerThreadInterrupt(2);
    t2.setDaemon(true);
    t2.start();

    // Force class loader to load ThreadInterruptedException
    // up front... else we can see a false failure if 2nd
    // interrupt arrives while class loader is trying to
    // init this class (in servicing a first interrupt):
    assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);

    // issue 300 interrupts to child thread
    final int numInterrupts = atLeast(300);
    int i = 0;
    while(i < numInterrupts) {
      // TODO: would be nice to also sometimes interrupt the
      // CMS merge threads too ...
      Thread.sleep(10);
      IndexerThreadInterrupt t = random().nextBoolean() ? t1 : t2;
      if (t.allowInterrupt) {
        i++;
        t.interrupt();
      }
      if (!t1.isAlive() && !t2.isAlive()) {
        break;
      }
    }
    t1.finish = true;
    t2.finish = true;
    t1.join();
    t2.join();
    if (t1.failed) {
      System.out.println("Thread1 failed:\n" + new String(t1.bytesLog.toString("UTF-8")));
    }
    if (t2.failed) {
      System.out.println("Thread2 failed:\n" + new String(t2.bytesLog.toString("UTF-8")));
    }
    assertFalse(t1.failed || t2.failed);
=======

  public void testIndexStoreCombos() throws Exception {
    Directory dir = newDirectory();
    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
    byte[] b = new byte[50];
    for(int i=0;i<50;i++)
      b[i] = (byte) (i+77);

    Document doc = new Document();

    FieldType customType = new FieldType(StoredField.TYPE);
    customType.setTokenized(true);
    
    Field f = new Field("binary", b, 10, 17, customType);
    // TODO: this is evil, changing the type after creating the field:
    customType.setIndexOptions(IndexOptions.DOCS);
    final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc1field1.setReader(new StringReader("doc1field1"));
    f.setTokenStream(doc1field1);

    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
    
    Field f2 = newField("string", "value", customType2);
    final MockTokenizer doc1field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc1field2.setReader(new StringReader("doc1field2"));
    f2.setTokenStream(doc1field2);
    doc.add(f);
    doc.add(f2);
    w.addDocument(doc);

    // add 2 docs to test in-memory merging
    final MockTokenizer doc2field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc2field1.setReader(new StringReader("doc2field1"));
    f.setTokenStream(doc2field1);
    final MockTokenizer doc2field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc2field2.setReader(new StringReader("doc2field2"));
    f2.setTokenStream(doc2field2);
    w.addDocument(doc);

    // force segment flush so we can force a segment merge with doc3 later.
    w.commit();

    final MockTokenizer doc3field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc3field1.setReader(new StringReader("doc3field1"));
    f.setTokenStream(doc3field1);
    final MockTokenizer doc3field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    doc3field2.setReader(new StringReader("doc3field2"));
    f2.setTokenStream(doc3field2);

    w.addDocument(doc);
    w.commit();
    w.forceMerge(1);   // force segment merge.
    w.close();

    IndexReader ir = DirectoryReader.open(dir);
    StoredDocument doc2 = ir.document(0);
    StorableField f3 = doc2.getField("binary");
    b = f3.binaryValue().bytes;
    assertTrue(b != null);
    assertEquals(17, b.length, 17);
    assertEquals(87, b[0]);

    assertTrue(ir.document(0).getField("binary").binaryValue()!=null);
    assertTrue(ir.document(1).getField("binary").binaryValue()!=null);
    assertTrue(ir.document(2).getField("binary").binaryValue()!=null);

    assertEquals("value", ir.document(0).get("string"));
    assertEquals("value", ir.document(1).get("string"));
    assertEquals("value", ir.document(2).get("string"));


    // test that the terms were indexed.
    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

    ir.close();
    dir.close();

>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
  }

  public void testNoDocsIndex() throws Throwable {
Solution content
      fail(new String(t.bytesLog.toString("UTF-8")));
    }
  }

  public void testNoDocsIndex() throws Throwable {
File
TestIndexWriter.java
Developer's decision
None
Kind of conflict
Attribute
Comment
For statement
If statement
Method invocation
Method signature
Variable
While statement
Chunk
Conflicting content
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Rethrow;
<<<<<<< HEAD
import org.apache.lucene.util.TestUtil;
=======
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f

/** 
 * Causes a bunch of non-aborting and aborting exceptions and checks that
Solution content
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;

/** 
 * Causes a bunch of non-aborting and aborting exceptions and checks that
File
TestIndexWriterExceptions2.java
Developer's decision
Concatenation
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
<<<<<<< HEAD
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.TestUtil;
import org.junit.Ignore;
=======
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.TestUtil;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f

/** 
 * Causes a bunch of fake OOM and checks that no other exceptions are delivered instead,
Solution content
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.TestUtil;

/** 
 * Causes a bunch of fake OOM and checks that no other exceptions are delivered instead,
File
TestIndexWriterOutOfMemory.java
Developer's decision
Combination
Kind of conflict
Import
Chunk
Conflicting content
      Document2 doc = w.newDocument();
      doc.addAtom("field", "threadIDmain");
      w.addDocument(doc);
<<<<<<< HEAD

      for(String fileName : dir.listAll()) {
        if (fileName.endsWith(".si")) {
          String segName = IndexFileNames.parseSegmentName(fileName);
          if (segSeen.contains(segName) == false) {
            segSeen.add(segName);
            byte id[] = readSegmentInfoID(dir, fileName);
            SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
            si.setCodec(codec);
            SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, -1, -1, -1);
            SegmentReader sr = new SegmentReader(w.getFieldTypes(),
                                                 sci, IOContext.DEFAULT);
            try {
              thread0Count += sr.docFreq(new Term("field", "threadID0"));
              thread1Count += sr.docFreq(new Term("field", "threadID1"));
            } finally {
              sr.close();
=======
      if (counter++ == checkAt) {
        for(String fileName : dir.listAll()) {
          if (fileName.endsWith(".si")) {
            String segName = IndexFileNames.parseSegmentName(fileName);
            if (segSeen.contains(segName) == false) {
              segSeen.add(segName);
              byte id[] = readSegmentInfoID(dir, fileName);
              SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
              si.setCodec(codec);
              SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, -1, -1, -1);
              SegmentReader sr = new SegmentReader(sci, IOContext.DEFAULT);
              try {
                thread0Count += sr.docFreq(new Term("field", "threadID0"));
                thread1Count += sr.docFreq(new Term("field", "threadID1"));
              } finally {
                sr.close();
              }
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
            }
          }
        }
Solution content
      Document2 doc = w.newDocument();
      doc.addAtom("field", "threadIDmain");
      w.addDocument(doc);
      if (counter++ == checkAt) {
        for(String fileName : dir.listAll()) {
          if (fileName.endsWith(".si")) {
            String segName = IndexFileNames.parseSegmentName(fileName);
            if (segSeen.contains(segName) == false) {
              segSeen.add(segName);
              byte id[] = readSegmentInfoID(dir, fileName);
              SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
              si.setCodec(codec);
              SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, -1, -1, -1);
              SegmentReader sr = new SegmentReader(w.getFieldTypes(),
                                                   sci, IOContext.DEFAULT);
              try {
                thread0Count += sr.docFreq(new Term("field", "threadID0"));
                thread1Count += sr.docFreq(new Term("field", "threadID1"));
              } finally {
                sr.close();
              }
            }
          }
        }
File
TestIndexWriterThreadsToSegments.java
Developer's decision
Combination
Kind of conflict
Other
Chunk
Conflicting content
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
<<<<<<< HEAD
import org.apache.lucene.document.Document2;
=======
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
Solution content
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document2;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
File
TestBooleanQuery.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
      Document2 doc = writer.newDocument();
      if ((i % 5) != 0) { // some documents must not have an entry in the first
                          // sort field
<<<<<<< HEAD
        doc.addAtom("publicationDate_", random.getLuceneDate());
=======
        doc.add(new SortedDocValuesField("publicationDate_", new BytesRef(random.getLuceneDate())));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
      }
      if ((i % 7) == 0) { // some documents to match the query (see below)
        doc.addLargeText("content", "test");
Solution content
      Document2 doc = writer.newDocument();
      if ((i % 5) != 0) { // some documents must not have an entry in the first
                          // sort field
        doc.addAtom("publicationDate_", random.getLuceneDate());
      }
      if ((i % 7) == 0) { // some documents to match the query (see below)
        doc.addLargeText("content", "test");
File
TestCustomSearcherSort.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
<<<<<<< HEAD
import org.apache.lucene.document.FieldTypes;
=======
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
Solution content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
File
TestFilteredQuery.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitDocIdSet;
<<<<<<< HEAD
import org.apache.lucene.util.Bits;
=======
import org.apache.lucene.util.BytesRef;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
Solution content
import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
File
TestFilteredQuery.java
Developer's decision
Concatenation
Kind of conflict
Import
Chunk
Conflicting content
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter (random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

<<<<<<< HEAD
    Document2 doc = writer.newDocument();
    doc.addLargeText("field", "one two three four five");
    doc.addLargeText("sorter", "b");
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two three four");
    doc.addLargeText("sorter", "d");
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two three y");
    doc.addLargeText("sorter", "a");
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two x");
    doc.addLargeText("sorter", "c");
=======
    Document doc = new Document();
    doc.add (newTextField("field", "one two three four five", Field.Store.YES));
    doc.add (newTextField("sorter", "b", Field.Store.YES));
    doc.add (new SortedDocValuesField("sorter", new BytesRef("b")));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newTextField("field", "one two three four", Field.Store.YES));
    doc.add (newTextField("sorter", "d", Field.Store.YES));
    doc.add (new SortedDocValuesField("sorter", new BytesRef("d")));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newTextField("field", "one two three y", Field.Store.YES));
    doc.add (newTextField("sorter", "a", Field.Store.YES));
    doc.add (new SortedDocValuesField("sorter", new BytesRef("a")));
    writer.addDocument (doc);

    doc = new Document();
    doc.add (newTextField("field", "one two x", Field.Store.YES));
    doc.add (newTextField("sorter", "c", Field.Store.YES));
    doc.add (new SortedDocValuesField("sorter", new BytesRef("c")));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
    writer.addDocument (doc);

    // tests here require single segment (eg try seed
Solution content
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter (random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

    Document2 doc = writer.newDocument();
    doc.addLargeText("field", "one two three four five");
    doc.addAtom("sorter", new BytesRef("b"));
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two three four");
    doc.addAtom("sorter", new BytesRef("d"));
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two three y");
    doc.addAtom("sorter", new BytesRef("a"));
    writer.addDocument (doc);

    doc = writer.newDocument();
    doc.addLargeText("field", "one two x");
    doc.addAtom("sorter", new BytesRef("c"));
    writer.addDocument (doc);

    // tests here require single segment (eg try seed
File
TestFilteredQuery.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

<<<<<<< HEAD
import org.apache.lucene.document.Document2;
=======
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedDocValuesField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
Solution content
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import org.apache.lucene.document.Document2;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
File
TestIndexSearcher.java
Developer's decision
Combination
Kind of conflict
Import
Chunk
Conflicting content
    dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < 100; i++) {
<<<<<<< HEAD
      Document2 doc = iw.newDocument();
      doc.addUniqueAtom("field", Integer.toString(i));
      doc.addAtom("field2", Boolean.toString(i % 2 == 0));
=======
      Document doc = new Document();
      doc.add(newStringField("field", Integer.toString(i), Field.Store.NO));
      doc.add(newStringField("field2", Boolean.toString(i % 2 == 0), Field.Store.NO));
      doc.add(new SortedDocValuesField("field2", new BytesRef(Boolean.toString(i % 2 == 0))));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
      iw.addDocument(doc);
    }
    reader = iw.getReader();
Solution content
    dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < 100; i++) {
      Document2 doc = iw.newDocument();
      doc.addUniqueAtom("field", Integer.toString(i));
      doc.addAtom("field2", Boolean.toString(i % 2 == 0));
      iw.addDocument(doc);
    }
    reader = iw.getReader();
File
TestIndexSearcher.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
<<<<<<< HEAD
=======
import org.apache.lucene.document.DoubleDocValuesField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.document.FloatDocValuesField;
Solution content
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.document.FloatDocValuesField;
File
TestSearchAfter.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
    int numDocs = atLeast(200);
    Random r = random();
    for (int i = 0; i < numDocs; i++) {
<<<<<<< HEAD

      Document2 doc = iw.newDocument();
      if (random().nextInt(5) != 4) {
        doc.addLargeText("english", English.intToEnglish(i));
      }
      if (random().nextInt(5) != 4) {
        doc.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("byte", "" + ((byte) random().nextInt()));
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("short", "" + ((short) random().nextInt()));
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("int", random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addLong("long", random().nextLong());
      }
      if (random().nextInt(5) != 4) {
        doc.addFloat("float", random().nextFloat());
      }
      if (random().nextInt(5) != 4) {
        doc.addDouble("double", random().nextDouble());
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("bytes", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("bytesval", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("intdocvalues", random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addFloat("floatdocvalues", random().nextFloat());
      }
      if (random().nextInt(5) != 4) {
        doc.addShortText("sortedbytesdocvalues", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addShortText("sortedbytesdocvaluesval", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addBinary("straightbytesdocvalues", new BytesRef(TestUtil.randomRealisticUnicodeString(random())));
      }
=======
      List fields = new ArrayList<>();
      fields.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
      fields.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
      fields.add(new NumericDocValuesField("byte", (byte) r.nextInt()));
      fields.add(new NumericDocValuesField("short", (short) r.nextInt()));
      fields.add(new NumericDocValuesField("int", r.nextInt()));
      fields.add(new NumericDocValuesField("long", r.nextLong()));
      fields.add(new FloatDocValuesField("float", r.nextFloat()));
      fields.add(new DoubleDocValuesField("double", r.nextDouble()));
      fields.add(new SortedDocValuesField("bytes", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      fields.add(new BinaryDocValuesField("bytesval", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f

      doc.addUniqueInt("id", i);
      if (VERBOSE) {
Solution content
    int numDocs = atLeast(200);
    Random r = random();
    for (int i = 0; i < numDocs; i++) {
      Document2 doc = iw.newDocument();
      if (random().nextInt(5) != 4) {
        doc.addLargeText("english", English.intToEnglish(i));
      }
      if (random().nextInt(5) != 4) {
        doc.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("byte", (byte) random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("short", (short) random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("int", random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addLong("long", random().nextLong());
      }
      if (random().nextInt(5) != 4) {
        doc.addFloat("float", random().nextFloat());
      }
      if (random().nextInt(5) != 4) {
        doc.addDouble("double", random().nextDouble());
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("bytes", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addAtom("bytesval", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addInt("intdocvalues", random().nextInt());
      }
      if (random().nextInt(5) != 4) {
        doc.addFloat("floatdocvalues", random().nextFloat());
      }
      if (random().nextInt(5) != 4) {
        doc.addShortText("sortedbytesdocvalues", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addShortText("sortedbytesdocvaluesval", TestUtil.randomRealisticUnicodeString(random()));
      }
      if (random().nextInt(5) != 4) {
        doc.addBinary("straightbytesdocvalues", new BytesRef(TestUtil.randomRealisticUnicodeString(random())));
      }

      doc.addUniqueInt("id", i);
      if (VERBOSE) {
File
TestSearchAfter.java
Developer's decision
Manual
Kind of conflict
Other
Chunk
Conflicting content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
<<<<<<< HEAD
=======
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.LeafReaderContext;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.index.CompositeReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
Solution content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CompositeReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
File
TestTopDocsMerge.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
      }

      for(int docIDX=0;docIDX>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
        final int intValue;
        if (random().nextInt(100) == 17) {
          intValue = Integer.MIN_VALUE;
Solution content
      }

      for(int docIDX=0;docIDX
File
TestTopDocsMerge.java
Developer's decision
Version 1
Kind of conflict
Other
Chunk
Conflicting content
        } else {
          intValue = random().nextInt();
        }
<<<<<<< HEAD
        doc.addInt("int", intValue);
=======
        doc.add(new NumericDocValuesField("int", intValue));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
        if (VERBOSE) {
          System.out.println("  doc=" + doc);
        }
Solution content
        } else {
          intValue = random().nextInt();
        }
        doc.addInt("int", intValue);
        if (VERBOSE) {
          System.out.println("  doc=" + doc);
        }
File
TestTopDocsMerge.java
Developer's decision
Version 1
Kind of conflict
Other
Chunk
Conflicting content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
<<<<<<< HEAD
=======
import org.apache.lucene.document.DoubleDocValuesField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.NumericDocValuesField;
Solution content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.NumericDocValuesField;
File
TestExpressionSorts.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    int numDocs = TestUtil.nextInt(random(), 2049, 4000);
    for (int i = 0; i < numDocs; i++) {
<<<<<<< HEAD
      Document2 document = iw.newDocument();
      document.addLargeText("english", English.intToEnglish(i));
      document.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
      document.addAtom("byte", "" + ((byte) random().nextInt()));
      document.addAtom("short", "" + ((short) random().nextInt()));
      document.addInt("intdocvalues", random().nextInt());
      document.addFloat("floatdocvalues", random().nextFloat());
=======
      Document document = new Document();
      document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
      document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
      document.add(new NumericDocValuesField("int", random().nextInt()));
      document.add(new NumericDocValuesField("long", random().nextLong()));
      document.add(new FloatDocValuesField("float", random().nextFloat()));
      document.add(new DoubleDocValuesField("double", random().nextDouble()));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
      iw.addDocument(document);
    }
    reader = iw.getReader();
Solution content
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    int numDocs = TestUtil.nextInt(random(), 2049, 4000);
    for (int i = 0; i < numDocs; i++) {
      Document2 document = iw.newDocument();
      document.addLargeText("english", English.intToEnglish(i));
      document.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
      document.addInt("int", random().nextInt());
      document.addLong("long", random().nextLong());
      document.addFloat("float", random().nextFloat());
      document.addDouble("double", random().nextDouble());
      iw.addDocument(document);
    }
    reader = iw.getReader();
File
TestExpressionSorts.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    for (int i = 0; i < 10; i++) {
      boolean reversed = random().nextBoolean();
      SortField fields[] = new SortField[] {
<<<<<<< HEAD
          new SortField("intdocvalues", SortField.Type.INT, reversed),
          new SortField("floatdocvalues", SortField.Type.FLOAT, reversed),
=======
          new SortField("int", SortField.Type.INT, reversed),
          new SortField("long", SortField.Type.LONG, reversed),
          new SortField("float", SortField.Type.FLOAT, reversed),
          new SortField("double", SortField.Type.DOUBLE, reversed),
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
          new SortField("score", SortField.Type.SCORE)
      };
      Collections.shuffle(Arrays.asList(fields), random());
Solution content
    for (int i = 0; i < 10; i++) {
      boolean reversed = random().nextBoolean();
      SortField fields[] = new SortField[] {
          new SortField("score", SortField.Type.SCORE)
      };
      Collections.shuffle(Arrays.asList(fields), random());
File
TestExpressionSorts.java
Developer's decision
None
Kind of conflict
Method invocation
Chunk
Conflicting content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
<<<<<<< HEAD
import org.apache.lucene.document.FieldTypes;
=======
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
Solution content
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
File
SpatialExample.java
Developer's decision
Combination
Kind of conflict
Import
Chunk
Conflicting content
    indexWriter.close();
  }

<<<<<<< HEAD
  private Document2 newSampleDocument(IndexWriter indexWriter, int id, Shape... shapes) {
    Document2 doc = indexWriter.newDocument();
    FieldTypes fieldTypes = indexWriter.getFieldTypes();
    fieldTypes.setMultiValued(strategy.getFieldName() + "_stored");
    doc.addInt("id", id);
=======
  private Document newSampleDocument(int id, Shape... shapes) {
    Document doc = new Document();
    doc.add(new StoredField("id", id));
    doc.add(new NumericDocValuesField("id", id));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
    //Potentially more than one shape in this field is supported by some
    // strategies; see the javadocs of the SpatialStrategy impl to see.
    for (Shape shape : shapes) {
Solution content
    indexWriter.close();
  }

  private Document2 newSampleDocument(IndexWriter indexWriter, int id, Shape... shapes) {
    Document2 doc = indexWriter.newDocument();
    FieldTypes fieldTypes = indexWriter.getFieldTypes();
    fieldTypes.setMultiValued(strategy.getFieldName() + "_stored");
    doc.addInt("id", id);
    //Potentially more than one shape in this field is supported by some
    // strategies; see the javadocs of the SpatialStrategy impl to see.
    for (Shape shape : shapes) {
File
SpatialExample.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Method signature
Variable
Chunk
Conflicting content
        BytesRef tempPayload;
        if (hasPayloads) {
<<<<<<< HEAD
          IndexableField payload = doc.getField(payloadField);
          if (payload == null || (payload.binaryValue() == null && payload.stringValue() == null)) {
=======
          StorableField payload = doc.getField(payloadField);
          if (payload == null) {
            continue;
          } else if (payload.binaryValue() != null) {
            tempPayload =  payload.binaryValue();
          } else if (payload.stringValue() != null) {
            tempPayload = new BytesRef(payload.stringValue());
          } else {
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
            continue;
          }
        } else {
Solution content
        BytesRef tempPayload;
        if (hasPayloads) {
          IndexableField payload = doc.getField(payloadField);
          if (payload == null) {
            continue;
          } else if (payload.binaryValue() != null) {
            tempPayload =  payload.binaryValue();
          } else if (payload.stringValue() != null) {
            tempPayload = new BytesRef(payload.stringValue());
          } else {
            continue;
          }
        } else {
File
DocumentDictionary.java
Developer's decision
Combination
Kind of conflict
Other
Chunk
Conflicting content
        }

        if (hasContexts) {
<<<<<<< HEAD
          for (IndexableField contextField : doc.getFields(contextsField)) {
            if (contextField.binaryValue() == null && contextField.stringValue() == null) {
              continue;
=======
          final StorableField[] contextFields = doc.getFields(contextsField);
          for (StorableField contextField : contextFields) {
            if (contextField.binaryValue() != null) {
              tempContexts.add(contextField.binaryValue());
            } else if (contextField.stringValue() != null) {
              tempContexts.add(new BytesRef(contextField.stringValue()));
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
            } else {
              continue;
            }
Solution content
            } else {
        }

        if (hasContexts) {
          for (IndexableField contextField : doc.getFields(contextsField)) {
            if (contextField.binaryValue() != null) {
              tempContexts.add(contextField.binaryValue());
            } else if (contextField.stringValue() != null) {
              tempContexts.add(new BytesRef(contextField.stringValue()));
              continue;
            }
File
DocumentDictionary.java
Developer's decision
Combination
Kind of conflict
Other
Chunk
Conflicting content
          }
        }

<<<<<<< HEAD
        IndexableField fieldVal = doc.getField(field);
        if (fieldVal == null || (fieldVal.binaryValue() == null && fieldVal.stringValue() == null)) {
=======
        currentDocFields = doc.getFields(field);
        nextFieldsPosition = 0;
        if (currentDocFields.length == 0) { // no values in this document
          continue;
        }
        StorableField fieldValue = currentDocFields[nextFieldsPosition++];
        BytesRef tempTerm;
        if (fieldValue.binaryValue() != null) {
          tempTerm = fieldValue.binaryValue();
        } else if (fieldValue.stringValue() != null) {
          tempTerm = new BytesRef(fieldValue.stringValue());
        } else {
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
          continue;
        }
Solution content
          }
        }
        currentDocFields = doc.getFields(field);
        nextFieldsPosition = 0;
        if (currentDocFields.size() == 0) { // no values in this document
          continue;
        }
        IndexableField fieldValue = currentDocFields.get(nextFieldsPosition++);
        BytesRef tempTerm;
        if (fieldValue.binaryValue() != null) {
          tempTerm = fieldValue.binaryValue();
        } else if (fieldValue.stringValue() != null) {
          tempTerm = new BytesRef(fieldValue.stringValue());
        } else {
          continue;
        }
File
DocumentDictionary.java
Developer's decision
Manual
Kind of conflict
Comment
Chunk
Conflicting content
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
<<<<<<< HEAD
import org.apache.lucene.document.FieldTypes;
=======
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
Solution content
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FieldTypes;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
File
LineFileDocs.java
Developer's decision
Combination
Kind of conflict
Import
Chunk
Conflicting content
  private final static char SEP = '\t';

<<<<<<< HEAD
=======
  private static final class DocState {
    final Document doc;
    final Field titleTokenized;
    final Field title;
    final Field titleDV;
    final Field body;
    final Field id;
    final Field idNum;
    final Field idNumDV;
    final Field date;

    public DocState(boolean useDocValues) {
      doc = new Document();
      
      title = new StringField("title", "", Field.Store.NO);
      doc.add(title);

      FieldType ft = new FieldType(TextField.TYPE_STORED);
      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
      ft.setStoreTermVectors(true);
      ft.setStoreTermVectorOffsets(true);
      ft.setStoreTermVectorPositions(true);
      
      titleTokenized = new Field("titleTokenized", "", ft);
      doc.add(titleTokenized);

      body = new Field("body", "", ft);
      doc.add(body);

      id = new StringField("docid", "", Field.Store.YES);
      doc.add(id);

      idNum = new IntField("docid_int", 0, Field.Store.NO);
      doc.add(idNum);

      date = new StringField("date", "", Field.Store.YES);
      doc.add(date);

      if (useDocValues) {
        titleDV = new SortedDocValuesField("titleDV", new BytesRef());
        idNumDV = new NumericDocValuesField("docid_intDV", 0);
        doc.add(titleDV);
        doc.add(idNumDV);
      } else {
        titleDV = null;
        idNumDV = null;
      }
    }
  }

  private final ThreadLocal threadDocs = new ThreadLocal<>();

>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
  /** Note: Document instance is re-used per-thread */
  public Document2 nextDoc() throws IOException {
    String line;
Solution content
  private final static char SEP = '\t';

  /** Note: Document instance is re-used per-thread */
  public Document2 nextDoc() throws IOException {
    String line;
File
LineFileDocs.java
Developer's decision
Version 1
Kind of conflict
Attribute
Class declaration
Chunk
Conflicting content
    doc.addLargeText("body", line.substring(1+spot2, line.length()));

    final String title = line.substring(0, spot);
<<<<<<< HEAD
    doc.addLargeText("titleTokenized", title);
    doc.addAtom("title", title);
    doc.addShortText("titleDV", title);

    doc.addAtom("date", line.substring(1+spot, spot2));
    int i = id.getAndIncrement();
    doc.addAtom("docid", Integer.toString(i));
    doc.addInt("docid_int", i);
    return doc;
=======
    docState.title.setStringValue(title);
    if (docState.titleDV != null) {
      docState.titleDV.setBytesValue(new BytesRef(title));
    }
    docState.titleTokenized.setStringValue(title);
    docState.date.setStringValue(line.substring(1+spot, spot2));
    final int i = id.getAndIncrement();
    docState.id.setStringValue(Integer.toString(i));
    docState.idNum.setIntValue(i);
    if (docState.idNumDV != null) {
      docState.idNumDV.setLongValue(i);
    }
    return docState.doc;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
  }
}
Solution content
    doc.addLargeText("body", line.substring(1+spot2, line.length()));

    final String title = line.substring(0, spot);
    doc.addLargeText("titleTokenized", title);
    doc.addAtom("title", title);
    doc.addShortText("titleDV", title);

    doc.addAtom("date", line.substring(1+spot, spot2));
    int i = id.getAndIncrement();
    doc.addAtom("docid", Integer.toString(i));
    doc.addInt("docid_int", i);
    return doc;
  }
}
File
LineFileDocs.java
Developer's decision
Version 1
Kind of conflict
Attribute
If statement
Method invocation
Return statement
Variable
Chunk
Conflicting content
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
<<<<<<< HEAD
import org.apache.lucene.document.Document2;
=======
import org.apache.lucene.document.Document;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
Solution content
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document2;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
File
LuceneTestCase.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
    }
  }

<<<<<<< HEAD
  /** For debugging: tries to include br.utf8ToString(), but if that
   *  fails (because it's not valid utf8, which is fine!), just
   *  use ordinary toString. */
  public static String brToString(BytesRef br) {
    if (br == null) {
      return "(null)";
    } else {
      try {
        return br.utf8ToString() + " " + br.toString();
      } catch (Throwable t) {
        // If BytesRef isn't actually UTF8, or it's eg a
        // prefix of UTF8 that ends mid-unicode-char, we
        // fallback to hex:
        return br.toString();
=======
  /** Returns true if this is an FSDirectory backed by {@link WindowsFS}. */
  public static boolean isWindowsFS(Directory dir) {
    // First unwrap directory to see if there is an FSDir:
    while (true) {
      if (dir instanceof FSDirectory) {
        return isWindowsFS(((FSDirectory) dir).getDirectory());
      } else if (dir instanceof FilterDirectory) {
        dir = ((FilterDirectory) dir).getDelegate();
      } else {
        return false;
      }
    }
  }

  /** Returns true if this Path is backed by {@link WindowsFS}. */
  public static boolean isWindowsFS(Path path) {
    FileSystem fs = path.getFileSystem();
    while (true) {
      if (fs instanceof FilterFileSystem) {
        if (((FilterFileSystem) fs).getParent() instanceof WindowsFS) {
          return true;
        }
        fs = ((FilterFileSystem) fs).getDelegate();
      } else {
        return false;
>>>>>>> 3b6e1677a097dca1611cd7c25ca2b16aadb9b43f
      }
    }
  }
Solution content
    }
  }

  /** For debugging: tries to include br.utf8ToString(), but if that
   *  fails (because it's not valid utf8, which is fine!), just
   *  use ordinary toString. */
  public static String brToString(BytesRef br) {
    if (br == null) {
      return "(null)";
    } else {
      try {
        return br.utf8ToString() + " " + br.toString();
      } catch (Throwable t) {
        // If BytesRef isn't actually UTF8, or it's eg a
        // prefix of UTF8 that ends mid-unicode-char, we
        // fallback to hex:
        return br.toString();
      }
    }
  }

  /** Returns true if this is an FSDirectory backed by {@link WindowsFS}. */
  public static boolean isWindowsFS(Directory dir) {
    // First unwrap directory to see if there is an FSDir:
    while (true) {
      if (dir instanceof FSDirectory) {
        return isWindowsFS(((FSDirectory) dir).getDirectory());
      } else if (dir instanceof FilterDirectory) {
        dir = ((FilterDirectory) dir).getDelegate();
      } else {
        return false;
      }
    }
  }

  /** Returns true if this Path is backed by {@link WindowsFS}. */
  public static boolean isWindowsFS(Path path) {
    FileSystem fs = path.getFileSystem();
    while (true) {
      if (fs instanceof FilterFileSystem) {
        if (((FilterFileSystem) fs).getParent() instanceof WindowsFS) {
          return true;
        }
        fs = ((FilterFileSystem) fs).getDelegate();
      } else {
        return false;
      }
    }
  }
File
TestUtil.java
Developer's decision
Manual
Kind of conflict
Catch clause
Comment
If statement
Method declaration
Method invocation
Method signature
Return statement
Try statement
Variable
While statement