| reverted: |
| --- lucene/src/java/org/apache/lucene/index/LogMergePolicy.java 2011-04-29 15:01:09.334508900 +0200 |
| +++ lucene/src/java/org/apache/lucene/index/LogMergePolicy.java 2011-04-03 19:24:37.805755100 +0200 |
| @@ -75,7 +75,7 @@ |
| protected double noCFSRatio = DEFAULT_NO_CFS_RATIO; |
| |
| protected boolean calibrateSizeByDeletes = true; |
| + |
| - |
| protected boolean useCompoundFile = true; |
| |
| public LogMergePolicy() { |
| @@ -103,7 +103,7 @@ |
| } |
| this.noCFSRatio = noCFSRatio; |
| } |
| + |
| - |
| protected void message(String message) { |
| if (verbose()) |
| writer.get().message("LMP: " + message); |
| @@ -169,7 +169,7 @@ |
| this.calibrateSizeByDeletes = calibrateSizeByDeletes; |
| } |
| |
| + /** Returns true if the segment size should be calibrated |
| - /** Returns true if the segment size should be calibrated |
| * by the number of deletes when choosing segments for merge. */ |
| public boolean getCalibrateSizeByDeletes() { |
| return calibrateSizeByDeletes; |
| @@ -189,7 +189,7 @@ |
| return info.docCount; |
| } |
| } |
| + |
| - |
| protected long sizeBytes(SegmentInfo info) throws IOException { |
| long byteSize = info.sizeInBytes(true); |
| if (calibrateSizeByDeletes) { |
| @@ -201,7 +201,7 @@ |
| return byteSize; |
| } |
| } |
| + |
| - |
| protected boolean isOptimized(SegmentInfos infos, int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException { |
| final int numSegments = infos.size(); |
| int numToOptimize = 0; |
| @@ -273,7 +273,7 @@ |
| |
| return spec.merges.size() == 0 ? null : spec; |
| } |
| + |
| - |
| /** |
| * Returns the merges necessary to optimize the index. This method constraints |
| * the returned merges only by the {@code maxNumSegments} parameter, and |
| @@ -281,7 +281,7 @@ |
| */ |
| private MergeSpecification findMergesForOptimizeMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException { |
| MergeSpecification spec = new MergeSpecification(); |
| + |
| - |
| // First, enroll all "full" merges (size |
| // mergeFactor) to potentially be run concurrently: |
| while (last - maxNumSegments + 1 >= mergeFactor) { |
| @@ -331,7 +331,7 @@ |
| } |
| return spec.merges.size() == 0 ? null : spec; |
| } |
| + |
| - |
| /** Returns the merges necessary to optimize the index. |
| * This merge policy defines "optimized" to mean only the |
| * requested number of segments is left in the index, and |
| @@ -379,7 +379,7 @@ |
| } |
| return null; |
| } |
| + |
| - |
| // There is only one segment already, and it is optimized |
| if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) { |
| if (verbose()) { |
| @@ -397,7 +397,7 @@ |
| break; |
| } |
| } |
| + |
| - |
| if (anyTooLarge) { |
| return findMergesForOptimizeSizeLimit(infos, maxNumSegments, last); |
| } else { |
| @@ -409,7 +409,7 @@ |
| * Finds merges necessary to expunge all deletes from the |
| * index. We simply merge adjacent segments that have |
| * deletes, up to mergeFactor at a time. |
| + */ |
| - */ |
| @Override |
| public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos) |
| throws CorruptIndexException, IOException { |
| @@ -462,7 +462,7 @@ |
| SegmentInfo info; |
| float level; |
| int index; |
| + |
| - |
| public SegmentInfoAndLevel(SegmentInfo info, float level, int index) { |
| this.info = info; |
| this.level = level; |
| @@ -658,5 +658,5 @@ |
| sb.append("]"); |
| return sb.toString(); |
| } |
| + |
| - |
| } |
| reverted: |
| --- lucene/src/java/org/apache/lucene/index/SegmentWriteState.java 2011-04-29 15:01:16.446915700 +0200 |
| +++ lucene/src/java/org/apache/lucene/index/SegmentWriteState.java 2011-02-15 09:56:15.754035600 +0100 |
| @@ -65,7 +65,7 @@ |
| this.segmentCodecs = segmentCodecs; |
| codecId = ""; |
| } |
| + |
| - |
| /** |
| * Create a shallow {@link SegmentWriteState} copy final a codec ID |
| */ |
| reverted: |
| --- lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java 2011-04-29 15:01:27.452545200 +0200 |
| +++ lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java 2011-01-26 17:07:29.588994300 +0100 |
| @@ -36,7 +36,7 @@ |
| * indexed terms (many pairs of CharSequence text + long |
| * fileOffset), and then this reader must be able to |
| * retrieve the nearest index term to a provided term |
| + * text. |
| - * text. |
| * @lucene.experimental */ |
| |
| public abstract class TermsIndexReaderBase implements Closeable { |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java 2011-04-29 14:59:44.189638900 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java 2011-04-29 13:14:03.856676100 +0200 |
| @@ -42,7 +42,7 @@ |
| import org.apache.lucene.util._TestUtil; |
| |
| public class TestAddIndexes extends LuceneTestCase { |
| + |
| - |
| public void testSimpleCase() throws IOException { |
| // main directory |
| Directory dir = newDirectory(); |
| @@ -204,9 +204,9 @@ |
| doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); |
| writer.updateDocument(new Term("id", "" + (i%10)), doc); |
| } |
| + |
| - |
| writer.addIndexes(aux); |
| + |
| - |
| // Deletes one of the 10 added docs, leaving 9: |
| PhraseQuery q = new PhraseQuery(); |
| q.add(new Term("content", "bbb")); |
| @@ -619,7 +619,7 @@ |
| doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| writer.addDocument(doc); |
| } |
| + |
| - |
| private abstract class RunAddIndexesThreads { |
| |
| Directory dir, dir2; |
| @@ -646,7 +646,7 @@ |
| writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| writer2.setInfoStream(VERBOSE ? System.out : null); |
| writer2.commit(); |
| + |
| - |
| |
| readers = new IndexReader[NUM_COPY]; |
| for(int i=0;i<NUM_COPY;i++) |
| @@ -754,7 +754,7 @@ |
| } |
| } |
| } |
| + |
| - |
| // LUCENE-1335: test simultaneous addIndexes & commits |
| // from multiple threads |
| public void testAddIndexesWithThreads() throws Throwable { |
| @@ -1069,9 +1069,9 @@ |
| w.addDocument(d); |
| w.close(); |
| } |
| + |
| - |
| IndexReader[] readers = new IndexReader[] { IndexReader.open(dirs[0]), IndexReader.open(dirs[1]) }; |
| + |
| - |
| Directory dir = new RAMDirectory(); |
| IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()); |
| LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); |
| @@ -1083,5 +1083,5 @@ |
| // we should now see segments_X, segments.gen,_Y.cfs, _Z.fnx |
| assertEquals("Only one compound segment should exist", 4, dir.listAll().length); |
| } |
| + |
| - |
| } |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestByteSlices.java 2011-04-29 14:59:38.754328000 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestByteSlices.java 2011-01-26 17:02:37.784304000 +0100 |
| @@ -39,7 +39,7 @@ |
| starts[stream] = -1; |
| counters[stream] = 0; |
| } |
| + |
| - |
| int num = 10000 * RANDOM_MULTIPLIER; |
| for (int iter = 0; iter < num; iter++) { |
| int stream = random.nextInt(NUM_STREAM); |
| @@ -67,7 +67,7 @@ |
| if (VERBOSE) |
| System.out.println(" addr now " + uptos[stream]); |
| } |
| + |
| - |
| for(int stream=0;stream<NUM_STREAM;stream++) { |
| if (VERBOSE) |
| System.out.println(" stream=" + stream + " count=" + counters[stream]); |
| @@ -76,7 +76,7 @@ |
| reader.init(pool, starts[stream], uptos[stream]); |
| for(int j=0;j<counters[stream];j++) { |
| reader.readVInt(); |
| + assertEquals(j, reader.readVInt()); |
| - assertEquals(j, reader.readVInt()); |
| } |
| } |
| } |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestCodecs.java 2011-04-29 14:59:36.405193600 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java 2011-04-14 09:38:41.284849400 +0200 |
| @@ -381,7 +381,7 @@ |
| this.register(new MockSepCodec()); |
| this.setDefaultFieldCodec("MockSep"); |
| } |
| + |
| - |
| } |
| |
| private class Verify extends Thread { |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java 2011-04-29 14:59:49.744956600 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java 2011-04-29 13:14:03.856676100 +0200 |
| @@ -40,7 +40,7 @@ |
| */ |
| |
| public class TestDeletionPolicy extends LuceneTestCase { |
| + |
| - |
| private void verifyCommitOrder(List<? extends IndexCommit> commits) throws IOException { |
| final IndexCommit firstCommit = commits.get(0); |
| long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName()); |
| @@ -135,7 +135,7 @@ |
| verifyCommitOrder(commits); |
| doDeletes(commits, true); |
| } |
| + |
| - |
| private void doDeletes(List<? extends IndexCommit> commits, boolean isCommit) { |
| |
| // Assert that we really are only called for each new |
| @@ -248,7 +248,7 @@ |
| // seconds of the last one's mod time, and, that I can |
| // open a reader on each: |
| long gen = SegmentInfos.getCurrentSegmentGeneration(dir); |
| + |
| - |
| String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, |
| "", |
| gen); |
| @@ -276,7 +276,7 @@ |
| // OK |
| break; |
| } |
| + |
| - |
| dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen)); |
| gen--; |
| } |
| @@ -449,7 +449,7 @@ |
| |
| // Now 8 because we made another commit |
| assertEquals(7, IndexReader.listCommits(dir).size()); |
| + |
| - |
| r = IndexReader.open(dir, true); |
| // Not optimized because we rolled it back, and now only |
| // 10 docs |
| @@ -471,7 +471,7 @@ |
| // but this time keeping only the last commit: |
| writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit)); |
| assertEquals(10, writer.numDocs()); |
| + |
| - |
| // Reader still sees optimized index, because writer |
| // opened on the prior commit has not yet committed: |
| r = IndexReader.open(dir, true); |
| @@ -626,7 +626,7 @@ |
| } |
| IndexWriter writer = new IndexWriter(dir, conf); |
| writer.close(); |
| + Term searchTerm = new Term("content", "aaa"); |
| - Term searchTerm = new Term("content", "aaa"); |
| Query query = new TermQuery(searchTerm); |
| |
| for(int i=0;i<N+1;i++) { |
| @@ -731,7 +731,7 @@ |
| * around, through creates. |
| */ |
| public void testKeepLastNDeletionPolicyWithCreates() throws IOException { |
| + |
| - |
| final int N = 10; |
| |
| for(int pass=0;pass<2;pass++) { |
| @@ -751,7 +751,7 @@ |
| } |
| IndexWriter writer = new IndexWriter(dir, conf); |
| writer.close(); |
| + Term searchTerm = new Term("content", "aaa"); |
| - Term searchTerm = new Term("content", "aaa"); |
| Query query = new TermQuery(searchTerm); |
| |
| for(int i=0;i<N+1;i++) { |
| @@ -833,7 +833,7 @@ |
| } |
| gen--; |
| } |
| + |
| - |
| dir.close(); |
| } |
| } |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestDoc.java 2011-04-29 14:59:28.030714600 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestDoc.java 2011-04-22 21:46:44.015868900 +0200 |
| @@ -111,7 +111,7 @@ |
| public void testIndexAndMerge() throws Exception { |
| StringWriter sw = new StringWriter(); |
| PrintWriter out = new PrintWriter(sw, true); |
| + |
| - |
| Directory directory = newFSDirectory(indexDir); |
| IndexWriter writer = new IndexWriter( |
| directory, |
| @@ -136,7 +136,7 @@ |
| |
| SegmentInfo siMerge3 = merge(siMerge, siMerge2, "merge3", false); |
| printSegment(out, siMerge3); |
| + |
| - |
| directory.close(); |
| out.close(); |
| sw.close(); |
| @@ -170,7 +170,7 @@ |
| |
| siMerge3 = merge(siMerge, siMerge2, "merge3", true); |
| printSegment(out, siMerge3); |
| + |
| - |
| directory.close(); |
| out.close(); |
| sw.close(); |
| @@ -207,11 +207,11 @@ |
| final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, |
| false, fieldInfos.hasProx(), merger.getSegmentCodecs(), |
| fieldInfos.hasVectors(), fieldInfos); |
| + |
| - |
| if (useCompoundFile) { |
| Collection<String> filesToDelete = merger.createCompoundFile(merged + ".cfs", info); |
| info.setUseCompoundFile(true); |
| + for (final String fileToDelete : filesToDelete) |
| - for (final String fileToDelete : filesToDelete) |
| si1.dir.deleteFile(fileToDelete); |
| } |
| |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java 2011-04-29 14:59:34.785100900 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java 2011-04-14 09:38:41.220848000 +0200 |
| @@ -38,7 +38,7 @@ |
| */ |
| |
| public class TestIndexFileDeleter extends LuceneTestCase { |
| + |
| - |
| public void testDeleteLeftoverFiles() throws IOException { |
| MockDirectoryWrapper dir = newDirectory(); |
| dir.setPreventDoubleWrite(false); |
| @@ -124,7 +124,7 @@ |
| copyFile(dir, "_2_1." + normSuffix, "_1_1.f" + contentFieldIndex); |
| |
| // Create a bogus separate del file for a |
| + // segment that already has a separate del file: |
| - // segment that already has a separate del file: |
| copyFile(dir, "_0_1.del", "_0_2.del"); |
| |
| // Create a bogus separate del file for a |
| @@ -140,14 +140,14 @@ |
| |
| // Create a bogus fnm file when the CFS already exists: |
| copyFile(dir, "_0.cfs", "_0.fnm"); |
| + |
| - |
| // Create some old segments file: |
| copyFile(dir, "segments_2", "segments"); |
| copyFile(dir, "segments_2", "segments_1"); |
| |
| // Create a bogus cfs file shadowing a non-cfs segment: |
| copyFile(dir, "_1.cfs", "_2.cfs"); |
| + |
| - |
| String[] filesPre = dir.listAll(); |
| |
| // Open & close a writer: it should delete the above 4 |
| @@ -160,9 +160,9 @@ |
| |
| Arrays.sort(files); |
| Arrays.sort(files2); |
| + |
| - |
| Set<String> dif = difFiles(files, files2); |
| + |
| - |
| if (!Arrays.equals(files, files2)) { |
| fail("IndexFileDeleter failed to delete unreferenced extra files: should have deleted " + (filesPre.length-files.length) + " files but only deleted " + (filesPre.length - files2.length) + "; expected files:\n " + asString(files) + "\n actual files:\n " + asString(files2)+"\ndif: "+dif); |
| } |
| @@ -172,7 +172,7 @@ |
| Set<String> set1 = new HashSet<String>(); |
| Set<String> set2 = new HashSet<String>(); |
| Set<String> extra = new HashSet<String>(); |
| + |
| - |
| for (int x=0; x < files1.length; x++) { |
| set1.add(files1[x]); |
| } |
| @@ -195,7 +195,7 @@ |
| } |
| return extra; |
| } |
| + |
| - |
| private String asString(String[] l) { |
| String s = ""; |
| for(int i=0;i<l.length;i++) { |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestIndexReader.java 2011-04-29 14:59:30.314845300 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java 2011-04-22 21:46:44.024869400 +0200 |
| @@ -60,13 +60,13 @@ |
| |
| public class TestIndexReader extends LuceneTestCase |
| { |
| + |
| - |
| public void testCommitUserData() throws Exception { |
| Directory d = newDirectory(); |
| |
| Map<String,String> commitUserData = new HashMap<String,String>(); |
| commitUserData.put("foo", "fighters"); |
| + |
| - |
| // set up writer |
| IndexWriter writer = new IndexWriter(d, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random)) |
| @@ -74,12 +74,12 @@ |
| for(int i=0;i<27;i++) |
| addDocumentWithFields(writer); |
| writer.close(); |
| + |
| - |
| IndexReader r = IndexReader.open(d, false); |
| r.deleteDocument(5); |
| r.flush(commitUserData); |
| r.close(); |
| + |
| - |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(d); |
| IndexReader r2 = IndexReader.open(d, false); |
| @@ -115,10 +115,10 @@ |
| r3.close(); |
| d.close(); |
| } |
| + |
| - |
| public void testIsCurrent() throws Exception { |
| Directory d = newDirectory(); |
| + IndexWriter writer = new IndexWriter(d, newIndexWriterConfig( |
| - IndexWriter writer = new IndexWriter(d, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| addDocumentWithFields(writer); |
| writer.close(); |
| @@ -205,7 +205,7 @@ |
| doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| writer.addDocument(doc); |
| } |
| + |
| - |
| writer.close(); |
| // verify fields again |
| reader = IndexReader.open(d, false); |
| @@ -224,10 +224,10 @@ |
| assertTrue(fieldNames.contains("tvposition")); |
| assertTrue(fieldNames.contains("tvoffset")); |
| assertTrue(fieldNames.contains("tvpositionoffset")); |
| + |
| - |
| // verify that only indexed fields were returned |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED); |
| + assertEquals(11, fieldNames.size()); // 6 original + the 5 termvector fields |
| - assertEquals(11, fieldNames.size()); // 6 original + the 5 termvector fields |
| assertTrue(fieldNames.contains("keyword")); |
| assertTrue(fieldNames.contains("text")); |
| assertTrue(fieldNames.contains("unstored")); |
| @@ -239,26 +239,26 @@ |
| assertTrue(fieldNames.contains("tvposition")); |
| assertTrue(fieldNames.contains("tvoffset")); |
| assertTrue(fieldNames.contains("tvpositionoffset")); |
| + |
| - |
| // verify that only unindexed fields were returned |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED); |
| assertEquals(2, fieldNames.size()); // the following fields |
| assertTrue(fieldNames.contains("unindexed")); |
| assertTrue(fieldNames.contains("unindexed2")); |
| + |
| + // verify index term vector fields |
| - |
| - // verify index term vector fields |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR); |
| assertEquals(1, fieldNames.size()); // 1 field has term vector only |
| assertTrue(fieldNames.contains("termvector")); |
| + |
| - |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION); |
| assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors |
| assertTrue(fieldNames.contains("tvposition")); |
| + |
| - |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET); |
| assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors |
| assertTrue(fieldNames.contains("tvoffset")); |
| + |
| - |
| fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET); |
| assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors |
| assertTrue(fieldNames.contains("tvpositionoffset")); |
| @@ -366,13 +366,13 @@ |
| reader2.close(); |
| dir.close(); |
| } |
| + |
| - |
| public void testBinaryFields() throws IOException { |
| Directory dir = newDirectory(); |
| byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; |
| + |
| - |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); |
| + |
| - |
| for (int i = 0; i < 10; i++) { |
| addDoc(writer, "document number " + (i + 1)); |
| addDocumentWithFields(writer); |
| @@ -589,14 +589,14 @@ |
| reader = IndexReader.open(dir, false); |
| reader.setNorm(0, "content", sim.encodeNormValue(2.0f)); |
| reader.close(); |
| + |
| - |
| // now open reader again & set norm for doc 0 (writes to _0_2.s0) |
| reader = IndexReader.open(dir, false); |
| reader.setNorm(0, "content", sim.encodeNormValue(2.0f)); |
| reader.close(); |
| assertFalse("failed to remove first generation norms file on writing second generation", |
| dir.fileExists("_0_1.s0")); |
| + |
| - |
| dir.close(); |
| } |
| |
| @@ -619,7 +619,7 @@ |
| } |
| rmDir(fileDirName); |
| }*/ |
| + |
| - |
| public void testDeleteReaderWriterConflictOptimized() throws IOException{ |
| deleteReaderWriterConflict(true); |
| } |
| @@ -802,7 +802,7 @@ |
| // expected exception |
| } |
| try { |
| + IndexWriter.unlock(dir); // this should not be done in the real world! |
| - IndexWriter.unlock(dir); // this should not be done in the real world! |
| } catch (LockReleaseFailedException lrfe) { |
| writer.close(); |
| } |
| @@ -866,7 +866,7 @@ |
| public void testDeleteReaderReaderConflictUnoptimized() throws IOException{ |
| deleteReaderReaderConflict(false); |
| } |
| + |
| - |
| public void testDeleteReaderReaderConflictOptimized() throws IOException{ |
| deleteReaderReaderConflict(true); |
| } |
| @@ -880,7 +880,7 @@ |
| Term searchTerm = new Term("content", "aaa"); |
| int START_COUNT = 157; |
| int END_COUNT = 144; |
| + |
| - |
| // First build up a starting index: |
| MockDirectoryWrapper startDir = newDirectory(); |
| IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| @@ -1066,7 +1066,7 @@ |
| } |
| |
| public void testDocsOutOfOrderJIRA140() throws IOException { |
| + Directory dir = newDirectory(); |
| - Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| for(int i=0;i<11;i++) { |
| addDoc(writer, "aaa"); |
| @@ -1106,7 +1106,7 @@ |
| |
| public void testExceptionReleaseWriteLockJIRA768() throws IOException { |
| |
| + Directory dir = newDirectory(); |
| - Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| addDoc(writer, "aaa"); |
| writer.close(); |
| @@ -1157,7 +1157,7 @@ |
| } catch (FileNotFoundException e) { |
| // expected |
| } |
| + |
| - |
| dir.close(); |
| } |
| |
| @@ -1315,10 +1315,10 @@ |
| doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); |
| doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + |
| - |
| writer.addDocument(doc); |
| } |
| + |
| - |
| private void addDoc(IndexWriter writer, String value) throws IOException { |
| Document doc = new Document(); |
| doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED)); |
| @@ -1330,7 +1330,7 @@ |
| assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc()); |
| assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions()); |
| assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized()); |
| + |
| - |
| // check field names |
| Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL); |
| Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL); |
| @@ -1340,7 +1340,7 @@ |
| while (it1.hasNext()) { |
| assertEquals("Different field names.", it1.next(), it2.next()); |
| } |
| + |
| - |
| // check norms |
| it1 = fields1.iterator(); |
| while (it1.hasNext()) { |
| @@ -1359,7 +1359,7 @@ |
| assertSame(norms1, norms2); |
| } |
| } |
| + |
| - |
| // check deletions |
| final Bits delDocs1 = MultiFields.getDeletedDocs(index1); |
| final Bits delDocs2 = MultiFields.getDeletedDocs(index2); |
| @@ -1368,7 +1368,7 @@ |
| delDocs1 == null || delDocs1.get(i), |
| delDocs2 == null || delDocs2.get(i)); |
| } |
| + |
| - |
| // check stored fields |
| for (int i = 0; i < index1.maxDoc(); i++) { |
| if (delDocs1 == null || !delDocs1.get(i)) { |
| @@ -1384,10 +1384,10 @@ |
| Field curField2 = (Field) itField2.next(); |
| assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name()); |
| assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue()); |
| + } |
| - } |
| } |
| } |
| + |
| - |
| // check dictionary and posting lists |
| FieldsEnum fenum1 = MultiFields.getFields(index1).iterator(); |
| FieldsEnum fenum2 = MultiFields.getFields(index1).iterator(); |
| @@ -1467,7 +1467,7 @@ |
| r.close(); |
| r2.close(); |
| d.close(); |
| + } |
| - } |
| |
| public void testReadOnly() throws Throwable { |
| Directory d = newDirectory(); |
| @@ -1518,7 +1518,7 @@ |
| IndexReader r3 = r2.reopen(); |
| assertFalse(r3 == r2); |
| r2.close(); |
| + |
| - |
| assertFalse(r == r2); |
| |
| try { |
| @@ -1602,7 +1602,7 @@ |
| public void testNoDupCommitFileNames() throws Throwable { |
| |
| Directory dir = newDirectory(); |
| + |
| - |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random)) |
| .setMaxBufferedDocs(2)); |
| @@ -1610,12 +1610,12 @@ |
| writer.addDocument(createDocument("a")); |
| writer.addDocument(createDocument("a")); |
| writer.close(); |
| + |
| - |
| Collection<IndexCommit> commits = IndexReader.listCommits(dir); |
| for (final IndexCommit commit : commits) { |
| Collection<String> files = commit.getFileNames(); |
| HashSet<String> seen = new HashSet<String>(); |
| + for (final String fileName : files) { |
| - for (final String fileName : files) { |
| assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName)); |
| seen.add(fileName); |
| } |
| @@ -1820,7 +1820,7 @@ |
| // LUCENE-2046 |
| public void testPrepareCommitIsCurrent() throws Throwable { |
| Directory dir = newDirectory(); |
| + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| writer.commit(); |
| Document doc = new Document(); |
| @@ -1838,12 +1838,12 @@ |
| r.close(); |
| dir.close(); |
| } |
| + |
| - |
| // LUCENE-2753 |
| public void testListCommits() throws Exception { |
| Directory dir = newDirectory(); |
| SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); |
| + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, null).setIndexDeletionPolicy(sdp)); |
| writer.addDocument(new Document()); |
| writer.commit(); |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java 2011-04-29 14:59:28.424737200 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java 2011-04-14 09:38:40.969639700 +0200 |
| @@ -34,7 +34,7 @@ |
| * implemented properly |
| */ |
| public class TestIndexReaderClone extends LuceneTestCase { |
| + |
| - |
| public void testCloneReadOnlySegmentReader() throws Exception { |
| final Directory dir1 = newDirectory(); |
| |
| @@ -67,7 +67,7 @@ |
| r2.close(); |
| dir1.close(); |
| } |
| + |
| - |
| // open non-readOnly reader1, clone to non-readOnly |
| // reader2, make sure we can change reader1 |
| public void testCloneWriteToOrig() throws Exception { |
| @@ -83,7 +83,7 @@ |
| r2.close(); |
| dir1.close(); |
| } |
| + |
| - |
| // open non-readOnly reader1, clone to non-readOnly |
| // reader2, make sure we can change reader2 |
| public void testCloneWriteToClone() throws Exception { |
| @@ -105,7 +105,7 @@ |
| |
| dir1.close(); |
| } |
| + |
| - |
| // create single-segment index, open non-readOnly |
| // SegmentReader, add docs, reopen to multireader, then do |
| // delete |
| @@ -116,7 +116,7 @@ |
| IndexReader reader1 = IndexReader.open(dir1, false); |
| |
| TestIndexReaderReopen.modifyIndex(5, dir1); |
| + |
| - |
| IndexReader reader2 = reader1.reopen(); |
| assertTrue(reader1 != reader2); |
| |
| @@ -208,7 +208,7 @@ |
| reader2.close(); |
| dir1.close(); |
| } |
| + |
| - |
| private static boolean deleteWorked(int doc, IndexReader r) { |
| boolean exception = false; |
| try { |
| @@ -219,7 +219,7 @@ |
| } |
| return !exception; |
| } |
| + |
| - |
| public void testCloneReadOnlyDirectoryReader() throws Exception { |
| final Directory dir1 = newDirectory(); |
| |
| @@ -268,7 +268,7 @@ |
| * are not the same on each reader 5. Verify the doc deleted is only in the |
| * cloned reader 6. Try to delete a document in the original reader, an |
| * exception should be thrown |
| + * |
| - * |
| * @param r1 IndexReader to perform tests on |
| * @throws Exception |
| */ |
| @@ -323,7 +323,7 @@ |
| // need to test norms? |
| dir1.close(); |
| } |
| + |
| - |
| public void testSegmentReaderCloseReferencing() throws Exception { |
| final Directory dir1 = newDirectory(); |
| TestIndexReaderReopen.createIndex(random, dir1, false); |
| @@ -343,7 +343,7 @@ |
| clonedSegmentReader.close(); |
| dir1.close(); |
| } |
| + |
| - |
| public void testSegmentReaderDelDocsReferenceCounting() throws Exception { |
| final Directory dir1 = newDirectory(); |
| TestIndexReaderReopen.createIndex(random, dir1, false); |
| @@ -454,16 +454,16 @@ |
| private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) { |
| assertEquals(refCount, reader.deletedDocsRef.get()); |
| } |
| + |
| - |
| public void testCloneSubreaders() throws Exception { |
| final Directory dir1 = newDirectory(); |
| + |
| - |
| TestIndexReaderReopen.createIndex(random, dir1, true); |
| IndexReader reader = IndexReader.open(dir1, false); |
| reader.deleteDocument(1); // acquire write lock |
| IndexReader[] subs = reader.getSequentialSubReaders(); |
| assert subs.length > 1; |
| + |
| - |
| IndexReader[] clones = new IndexReader[subs.length]; |
| for (int x=0; x < subs.length; x++) { |
| clones[x] = (IndexReader) subs[x].clone(); |
| @@ -483,9 +483,9 @@ |
| IndexReader r2 = r1.clone(false); |
| r1.deleteDocument(5); |
| r1.decRef(); |
| + |
| - |
| r1.incRef(); |
| + |
| - |
| r2.close(); |
| r1.decRef(); |
| r1.close(); |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java 2011-04-29 14:59:49.422938200 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java 2011-04-24 12:25:14.895465900 +0200 |
| @@ -37,7 +37,7 @@ |
| */ |
| public class TestIndexWriterOnJRECrash extends TestNRTThreads { |
| private File tempDir; |
| + |
| - |
| @Override |
| public void setUp() throws Exception { |
| super.setUp(); |
| @@ -45,13 +45,13 @@ |
| tempDir.delete(); |
| tempDir.mkdir(); |
| } |
| + |
| - |
| @Override |
| public void testNRTThreads() throws Exception { |
| String vendor = Constants.JAVA_VENDOR; |
| + assumeTrue(vendor + " JRE not supported.", |
| - assumeTrue(vendor + " JRE not supported.", |
| vendor.startsWith("Sun") || vendor.startsWith("Apple")); |
| + |
| - |
| // if we are not the fork |
| if (System.getProperty("tests.crashmode") == null) { |
| // try up to 10 times to create an index |
| @@ -81,11 +81,11 @@ |
| } |
| } |
| } |
| + |
| - |
| /** fork ourselves in a new jvm. sets -Dtests.crashmode=true */ |
| public void forkTest() throws Exception { |
| List<String> cmd = new ArrayList<String>(); |
| + cmd.add(System.getProperty("java.home") |
| - cmd.add(System.getProperty("java.home") |
| + System.getProperty("file.separator") |
| + "bin" |
| + System.getProperty("file.separator") |
| @@ -116,7 +116,7 @@ |
| if (VERBOSE) System.err.println("<<< End subprocess output"); |
| p.waitFor(); |
| } |
| + |
| - |
| /** |
| * Recursively looks for indexes underneath <code>file</code>, |
| * and runs checkindex on them. returns true if it found any indexes. |
| @@ -139,7 +139,7 @@ |
| } |
| return false; |
| } |
| + |
| - |
| /** |
| * currently, this only works/tested on Sun and IBM. |
| */ |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java 2011-04-29 14:59:51.683067500 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java 2011-04-14 09:38:41.833854900 +0200 |
| @@ -44,7 +44,7 @@ |
| public class TestLazyProxSkipping extends LuceneTestCase { |
| private IndexSearcher searcher; |
| private int seeksCounter = 0; |
| + |
| - |
| private String field = "tokens"; |
| private String term1 = "xx"; |
| private String term2 = "yy"; |
| @@ -64,12 +64,12 @@ |
| } |
| return ii; |
| } |
| + |
| - |
| } |
| + |
| - |
| private void createIndex(int numHits) throws IOException { |
| int numDocs = 500; |
| + |
| - |
| final Analyzer analyzer = new Analyzer() { |
| @Override |
| public TokenStream tokenStream(String fieldName, Reader reader) { |
| @@ -101,7 +101,7 @@ |
| doc.add(newField(this.field, content, Field.Store.YES, Field.Index.ANALYZED)); |
| writer.addDocument(doc); |
| } |
| + |
| - |
| // make sure the index has only a single segment |
| writer.optimize(); |
| writer.close(); |
| @@ -110,27 +110,27 @@ |
| |
| this.searcher = newSearcher(reader); |
| } |
| + |
| - |
| private ScoreDoc[] search() throws IOException { |
| // create PhraseQuery "term1 term2" and search |
| PhraseQuery pq = new PhraseQuery(); |
| pq.add(new Term(this.field, this.term1)); |
| pq.add(new Term(this.field, this.term2)); |
| + return this.searcher.search(pq, null, 1000).scoreDocs; |
| - return this.searcher.search(pq, null, 1000).scoreDocs; |
| } |
| + |
| - |
| private void performTest(int numHits) throws IOException { |
| createIndex(numHits); |
| this.seeksCounter = 0; |
| ScoreDoc[] hits = search(); |
| // verify that the right number of docs was found |
| assertEquals(numHits, hits.length); |
| + |
| - |
| // check if the number of calls of seek() does not exceed the number of hits |
| assertTrue(this.seeksCounter > 0); |
| assertTrue("seeksCounter=" + this.seeksCounter + " numHits=" + numHits, this.seeksCounter <= numHits + 1); |
| } |
| + |
| - |
| public void testLazySkipping() throws IOException { |
| assumeFalse("This test cannot run with SimpleText codec", CodecProvider.getDefault().getFieldCodec(this.field).equals("SimpleText")); |
| // test whether only the minimum amount of seeks() |
| @@ -140,7 +140,7 @@ |
| performTest(10); |
| searcher.close(); |
| } |
| + |
| - |
| public void testSeek() throws IOException { |
| Directory directory = newDirectory(); |
| IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| @@ -149,7 +149,7 @@ |
| doc.add(newField(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED)); |
| writer.addDocument(doc); |
| } |
| + |
| - |
| writer.close(); |
| IndexReader reader = IndexReader.open(directory, true); |
| |
| @@ -176,55 +176,55 @@ |
| } |
| reader.close(); |
| directory.close(); |
| + |
| - |
| } |
| + |
| - |
| |
| // Simply extends IndexInput in a way that we are able to count the number |
| // of invocations of seek() |
| class SeeksCountingStream extends IndexInput { |
| + private IndexInput input; |
| + |
| + |
| - private IndexInput input; |
| - |
| - |
| SeeksCountingStream(IndexInput input) { |
| this.input = input; |
| + } |
| + |
| - } |
| - |
| @Override |
| public byte readByte() throws IOException { |
| return this.input.readByte(); |
| } |
| + |
| - |
| @Override |
| public void readBytes(byte[] b, int offset, int len) throws IOException { |
| + this.input.readBytes(b, offset, len); |
| - this.input.readBytes(b, offset, len); |
| } |
| + |
| - |
| @Override |
| public void close() throws IOException { |
| this.input.close(); |
| } |
| + |
| - |
| @Override |
| public long getFilePointer() { |
| return this.input.getFilePointer(); |
| } |
| + |
| - |
| @Override |
| public void seek(long pos) throws IOException { |
| TestLazyProxSkipping.this.seeksCounter++; |
| this.input.seek(pos); |
| } |
| + |
| - |
| @Override |
| public long length() { |
| return this.input.length(); |
| } |
| + |
| - |
| @Override |
| public Object clone() { |
| return new SeeksCountingStream((IndexInput) this.input.clone()); |
| } |
| + |
| - |
| } |
| } |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java 2011-04-29 14:59:43.109577100 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java 2011-04-14 09:38:41.521854400 +0200 |
| @@ -44,7 +44,7 @@ |
| indexThreads[x] = new RunThread(x % 2, writer); |
| indexThreads[x].setName("Thread " + x); |
| indexThreads[x].start(); |
| + } |
| - } |
| long startTime = System.currentTimeMillis(); |
| long duration = 1000; |
| while ((System.currentTimeMillis() - startTime) < duration) { |
| @@ -78,7 +78,7 @@ |
| int addCount = 0; |
| int type; |
| final Random r = new Random(random.nextLong()); |
| + |
| - |
| public RunThread(int type, IndexWriter writer) { |
| this.type = type; |
| this.writer = writer; |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestNRTThreads.java 2011-04-29 14:59:40.105405200 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestNRTThreads.java 2011-04-14 09:38:41.349853200 +0200 |
| @@ -341,7 +341,7 @@ |
| if (VERBOSE) { |
| System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount); |
| } |
| + |
| - |
| final IndexReader r2 = writer.getReader(); |
| final IndexSearcher s = newSearcher(r2); |
| boolean doFail = false; |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java 2011-04-29 14:59:50.201982700 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java 2011-04-14 09:38:41.787054800 +0200 |
| @@ -43,7 +43,7 @@ |
| import org.junit.Test; |
| |
| /** |
| + * |
| - * |
| * |
| */ |
| public class TestPerFieldCodecSupport extends LuceneTestCase { |
| @@ -312,4 +312,4 @@ |
| } |
| dir.close(); |
| } |
| +} |
| -} |
| \ No newline at end of file |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java 2011-04-29 14:59:31.205896200 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java 2011-01-26 17:02:37.468285900 +0100 |
| @@ -33,7 +33,7 @@ |
| } |
| writer.commit(); |
| } |
| + |
| - |
| private static IndexWriterConfig newWriterConfig() throws IOException { |
| IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null); |
| conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); |
| @@ -42,7 +42,7 @@ |
| conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES); |
| return conf; |
| } |
| + |
| - |
| public void testByteSizeLimit() throws Exception { |
| // tests that the max merge size constraint is applied during optimize. |
| Directory dir = new RAMDirectory(); |
| @@ -65,7 +65,7 @@ |
| LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(); |
| lmp.setMaxMergeMBForOptimize((min + 1) / (1 << 20)); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| @@ -91,14 +91,14 @@ |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| @@ -119,14 +119,14 @@ |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 5); |
| + |
| - |
| writer.close(); |
| |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| @@ -135,120 +135,120 @@ |
| sis.read(dir); |
| assertEquals(2, sis.size()); |
| } |
| + |
| - |
| public void testFirstSegmentTooLarge() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 5); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(2, sis.size()); |
| } |
| + |
| - |
| public void testAllSegmentsSmall() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(1, sis.size()); |
| } |
| + |
| - |
| public void testAllSegmentsLarge() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(2); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(3, sis.size()); |
| } |
| + |
| - |
| public void testOneLargeOneSmall() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| addDocs(writer, 5); |
| addDocs(writer, 3); |
| addDocs(writer, 5); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(4, sis.size()); |
| } |
| + |
| - |
| public void testMergeFactor() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| @@ -256,78 +256,78 @@ |
| addDocs(writer, 5); |
| addDocs(writer, 3); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| lmp.setMergeFactor(2); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| // Should only be 4 segments in the index, because of the merge factor and |
| // max merge docs settings. |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(4, sis.size()); |
| } |
| + |
| - |
| public void testSingleNonOptimizedSegment() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| addDocs(writer, 5); |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| // delete the last document, so that the last segment is optimized. |
| IndexReader r = IndexReader.open(dir, false); |
| r.deleteDocument(r.numDocs() - 1); |
| r.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| // Verify that the last segment does not have deletions. |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| assertEquals(3, sis.size()); |
| assertFalse(sis.info(2).hasDeletions()); |
| } |
| + |
| - |
| public void testSingleOptimizedSegment() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 3); |
| + |
| - |
| writer.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(3); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| // Verify that the last segment does not have deletions. |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| @@ -336,28 +336,28 @@ |
| |
| public void testSingleNonOptimizedTooLargeSegment() throws Exception { |
| Directory dir = new RAMDirectory(); |
| + |
| - |
| IndexWriterConfig conf = newWriterConfig(); |
| IndexWriter writer = new IndexWriter(dir, conf); |
| + |
| - |
| addDocs(writer, 5); |
| + |
| - |
| writer.close(); |
| + |
| - |
| // delete the last document |
| IndexReader r = IndexReader.open(dir, false); |
| r.deleteDocument(r.numDocs() - 1); |
| r.close(); |
| + |
| - |
| conf = newWriterConfig(); |
| LogMergePolicy lmp = new LogDocMergePolicy(); |
| lmp.setMaxMergeDocs(2); |
| conf.setMergePolicy(lmp); |
| + |
| - |
| writer = new IndexWriter(dir, conf); |
| writer.optimize(); |
| writer.close(); |
| + |
| - |
| // Verify that the last segment does not have deletions. |
| SegmentInfos sis = new SegmentInfos(); |
| sis.read(dir); |
| reverted: |
| --- lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java 2011-04-29 14:59:32.242955500 +0200 |
| +++ lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java 2011-01-26 17:02:37.489287100 +0100 |
| @@ -121,7 +121,7 @@ |
| |
| fieldInfos = new FieldInfos(dir, IndexFileNames.segmentFileName(seg, "", IndexFileNames.FIELD_INFOS_EXTENSION)); |
| } |
| + |
| - |
| @Override |
| public void tearDown() throws Exception { |
| dir.close(); |
| @@ -130,17 +130,17 @@ |
| |
| private class MyTokenStream extends TokenStream { |
| int tokenUpto; |
| + |
| - |
| CharTermAttribute termAtt; |
| PositionIncrementAttribute posIncrAtt; |
| OffsetAttribute offsetAtt; |
| + |
| - |
| public MyTokenStream() { |
| termAtt = addAttribute(CharTermAttribute.class); |
| posIncrAtt = addAttribute(PositionIncrementAttribute.class); |
| offsetAtt = addAttribute(OffsetAttribute.class); |
| } |
| + |
| - |
| @Override |
| public boolean incrementToken() { |
| if (tokenUpto >= tokens.length) |
| reverted: |
| --- lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java 2011-04-29 14:59:21.668350700 +0200 |
| +++ lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java 2011-04-14 09:38:37.350433300 +0200 |
| @@ -67,7 +67,7 @@ |
| |
| // ignore deletions |
| CachingSpanFilter filter = new CachingSpanFilter(startFilter, CachingWrapperFilter.DeletesMode.IGNORE); |
| + |
| - |
| docs = searcher.search(new MatchAllDocsQuery(), filter, 1); |
| assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); |
| ConstantScoreQuery constantScore = new ConstantScoreQuery(filter); |
| @@ -97,7 +97,7 @@ |
| reader = refreshReader(reader); |
| searcher.close(); |
| searcher = newSearcher(reader); |
| + |
| - |
| docs = searcher.search(new MatchAllDocsQuery(), filter, 1); |
| assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); |
| |
| reverted: |
| --- lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java 2011-04-29 14:59:54.524230000 +0200 |
| +++ lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java 2011-04-14 09:38:41.989855200 +0200 |
| @@ -38,12 +38,12 @@ |
| Set<String> fileExtensions = new HashSet<String>(); |
| fileExtensions.add(IndexFileNames.FIELDS_EXTENSION); |
| fileExtensions.add(IndexFileNames.FIELDS_INDEX_EXTENSION); |
| + |
| - |
| MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); |
| primaryDir.setCheckIndexOnClose(false); // only part of an index |
| MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); |
| secondaryDir.setCheckIndexOnClose(false); // only part of an index |
| + |
| - |
| FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true); |
| IndexWriter writer = new IndexWriter( |
| fsd, |
| reverted: |
| --- lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java 2011-04-29 15:02:07.248821400 +0200 |
| +++ lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java 2011-04-20 23:25:31.372789600 +0200 |
| @@ -330,14 +330,14 @@ |
| |
| @Override |
| public void files(Directory dir, SegmentInfo segmentInfo, String codecId, Set<String> files) throws IOException { |
| + final String seedFileName = IndexFileNames.segmentFileName(segmentInfo.name, codecId, SEED_EXT); |
| - final String seedFileName = IndexFileNames.segmentFileName(segmentInfo.name, codecId, SEED_EXT); |
| files.add(seedFileName); |
| SepPostingsReaderImpl.files(segmentInfo, codecId, files); |
| StandardPostingsReader.files(dir, segmentInfo, codecId, files); |
| BlockTermsReader.files(dir, segmentInfo, codecId, files); |
| FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files); |
| VariableGapTermsIndexReader.files(dir, segmentInfo, codecId, files); |
| + |
| - |
| // hackish! |
| Iterator<String> it = files.iterator(); |
| while(it.hasNext()) { |
| reverted: |
| --- lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java 2011-04-29 15:02:06.945804000 +0200 |
| +++ lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java 2011-03-02 19:39:28.248540600 +0100 |
| @@ -68,7 +68,7 @@ |
| return in.readVInt(); |
| } |
| } |
| + |
| - |
| class Index extends IntIndexInput.Index { |
| private long fp; |
| |
| reverted: |
| --- lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java 2011-04-29 15:02:06.066753800 +0200 |
| +++ lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java 2011-02-15 09:56:18.593240600 +0100 |
| @@ -25,7 +25,7 @@ |
| |
| /** Writes ints directly to the file (not in blocks) as |
| * vInt. |
| + * |
| - * |
| * @lucene.experimental |
| */ |
| public class MockSingleIntIndexOutput extends IntIndexOutput { |
| @@ -77,7 +77,7 @@ |
| } |
| lastFP = fp; |
| } |
| + |
| - |
| @Override |
| public String toString() { |
| return Long.toString(fp); |
| reverted: |
| --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java 2011-04-29 15:02:11.340055400 +0200 |
| +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java 2011-04-22 21:46:44.448893600 +0200 |
| @@ -79,23 +79,23 @@ |
| } |
| } |
| |
| + /** |
| + * Convenience method: Unzip zipName + ".zip" under destDir, removing destDir first |
| - /** |
| - * Convenience method: Unzip zipName + ".zip" under destDir, removing destDir first |
| */ |
| public static void unzip(File zipName, File destDir) throws IOException { |
| + |
| - |
| ZipFile zipFile = new ZipFile(zipName); |
| + |
| - |
| Enumeration<? extends ZipEntry> entries = zipFile.entries(); |
| + |
| - |
| rmDir(destDir); |
| + |
| - |
| destDir.mkdir(); |
| LuceneTestCase.tempDirs.add(destDir.getAbsolutePath()); |
| |
| while (entries.hasMoreElements()) { |
| ZipEntry entry = entries.nextElement(); |
| + |
| - |
| InputStream in = zipFile.getInputStream(entry); |
| File targetFile = new File(destDir, entry.getName()); |
| if (entry.isDirectory()) { |
| @@ -105,24 +105,24 @@ |
| if (targetFile.getParentFile()!=null) { |
| // be on the safe side: do not rely on that directories are always extracted |
| // before their children (although this makes sense, but is it guaranteed?) |
| + targetFile.getParentFile().mkdirs(); |
| - targetFile.getParentFile().mkdirs(); |
| } |
| OutputStream out = new BufferedOutputStream(new FileOutputStream(targetFile)); |
| + |
| - |
| byte[] buffer = new byte[8192]; |
| int len; |
| while((len = in.read(buffer)) >= 0) { |
| out.write(buffer, 0, len); |
| } |
| + |
| - |
| in.close(); |
| out.close(); |
| } |
| } |
| + |
| - |
| zipFile.close(); |
| } |
| + |
| - |
| public static void syncConcurrentMerges(IndexWriter writer) { |
| syncConcurrentMerges(writer.getConfig().getMergeScheduler()); |
| } |
| @@ -138,7 +138,7 @@ |
| public static CheckIndex.Status checkIndex(Directory dir) throws IOException { |
| return checkIndex(dir, CodecProvider.getDefault()); |
| } |
| + |
| - |
| /** This runs the CheckIndex tool on the index in. If any |
| * issues are hit, a RuntimeException is thrown; else, |
| * true is returned. */ |
| @@ -245,7 +245,7 @@ |
| 0x1D200, 0x1D300, 0x1D360, 0x1D400, 0x1F000, 0x1F030, 0x1F100, 0x1F200, |
| 0x20000, 0x2A700, 0x2F800, 0xE0000, 0xE0100, 0xF0000, 0x100000 |
| }; |
| + |
| - |
| private static final int[] blockEnds = { |
| 0x007F, 0x00FF, 0x017F, 0x024F, 0x02AF, 0x02FF, 0x036F, 0x03FF, 0x04FF, |
| 0x052F, 0x058F, 0x05FF, 0x06FF, 0x074F, 0x077F, 0x07BF, 0x07FF, 0x083F, |
| @@ -271,12 +271,12 @@ |
| 0x1D24F, 0x1D35F, 0x1D37F, 0x1D7FF, 0x1F02F, 0x1F09F, 0x1F1FF, 0x1F2FF, |
| 0x2A6DF, 0x2B73F, 0x2FA1F, 0xE007F, 0xE01EF, 0xFFFFF, 0x10FFFF |
| }; |
| + |
| - |
| /** Returns random string, all codepoints within the same unicode block. */ |
| public static String randomRealisticUnicodeString(Random r) { |
| return randomRealisticUnicodeString(r, 20); |
| } |
| + |
| - |
| /** Returns random string, all codepoints within the same unicode block. */ |
| public static String randomRealisticUnicodeString(Random r, int maxLength) { |
| final int end = r.nextInt(maxLength); |