| Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
|
| ===================================================================
|
| --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1297029)
|
| +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
|
| @@ -50,6 +50,7 @@
|
| import org.apache.lucene.search.spans.*; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| +import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.util.automaton.BasicAutomata; |
| import org.apache.lucene.util.automaton.CharacterRunAutomaton; |
| import org.apache.lucene.util.automaton.RegExp; |
| @@ -1969,16 +1970,16 @@
|
| |
| String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, |
| fragmentSeparator); |
| - if (HighlighterTest.VERBOSE) System.out.println("\t" + result); |
| + if (LuceneTestCase.VERBOSE) System.out.println("\t" + result); |
| } |
| } |
| |
| abstract void run() throws Exception; |
| |
| void start() throws Exception { |
| - if (HighlighterTest.VERBOSE) System.out.println("Run QueryScorer"); |
| + if (LuceneTestCase.VERBOSE) System.out.println("Run QueryScorer"); |
| run(); |
| - if (HighlighterTest.VERBOSE) System.out.println("Run QueryTermScorer"); |
| + if (LuceneTestCase.VERBOSE) System.out.println("Run QueryTermScorer"); |
| mode = QUERY_TERM; |
| run(); |
| } |
| Index: lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
|
| ===================================================================
|
| --- lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java (revision 1297029)
|
| +++ lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java (working copy)
|
| @@ -18,6 +18,7 @@
|
| |
| import org.apache.lucene.index.*; |
| import org.apache.lucene.search.DocIdSet; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.Filter; |
| import org.apache.lucene.util.Bits; |
| import org.apache.lucene.util.BytesRef; |
| @@ -94,7 +95,7 @@
|
| } else { |
| docs = termsEnum.docs(acceptDocs, docs, false); |
| int doc = docs.nextDoc(); |
| - if (doc != DocsEnum.NO_MORE_DOCS) { |
| + if (doc != DocIdSetIterator.NO_MORE_DOCS) { |
| if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) { |
| bits.set(doc); |
| } else { |
| @@ -102,7 +103,7 @@
|
| while (true) { |
| lastDoc = doc; |
| doc = docs.nextDoc(); |
| - if (doc == DocsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| } |
| @@ -134,7 +135,7 @@
|
| // unset potential duplicates |
| docs = termsEnum.docs(acceptDocs, docs, false); |
| int doc = docs.nextDoc(); |
| - if (doc != DocsEnum.NO_MORE_DOCS) { |
| + if (doc != DocIdSetIterator.NO_MORE_DOCS) { |
| if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) { |
| doc = docs.nextDoc(); |
| } |
| @@ -145,7 +146,7 @@
|
| lastDoc = doc; |
| bits.clear(lastDoc); |
| doc = docs.nextDoc(); |
| - if (doc == DocsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| } |
| Index: lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
|
| ===================================================================
|
| --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (revision 1297029)
|
| +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (working copy)
|
| @@ -25,6 +25,7 @@
|
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.document.TextField; |
| import org.apache.lucene.index.*; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.ScoreDoc; |
| import org.apache.lucene.search.TermQuery; |
| @@ -142,7 +143,7 @@
|
| false); |
| |
| int lastDoc = 0; |
| - while (td.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| lastDoc = td.docID(); |
| } |
| assertEquals("Duplicate urls should return last doc", lastDoc, hit.doc); |
| Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java (working copy)
|
| @@ -488,7 +488,7 @@
|
| |
| private Frame[] stack; |
| |
| - @SuppressWarnings("unchecked") private FST.Arc<BytesRef>[] arcs = new FST.Arc[5]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc<BytesRef>[] arcs = new FST.Arc[5]; |
| |
| private final RunAutomaton runAutomaton; |
| private final CompiledAutomaton compiledAutomaton; |
| @@ -821,7 +821,8 @@
|
| |
| private FST.Arc<BytesRef> getArc(int ord) { |
| if (ord >= arcs.length) { |
| - @SuppressWarnings("unchecked") final FST.Arc<BytesRef>[] next = new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next = |
| + new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(arcs, 0, next, 0, arcs.length); |
| for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) { |
| next[arcOrd] = new FST.Arc<BytesRef>(); |
| @@ -1198,7 +1199,8 @@
|
| final BytesRef term = new BytesRef(); |
| private final FST.BytesReader fstReader; |
| |
| - @SuppressWarnings("unchecked") private FST.Arc<BytesRef>[] arcs = new FST.Arc[1]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc<BytesRef>[] arcs = |
| + new FST.Arc[1]; |
| |
| public SegmentTermsEnum() throws IOException { |
| //if (DEBUG) System.out.println("BTTR.init seg=" + segment); |
| @@ -1354,7 +1356,8 @@
|
| |
| private FST.Arc<BytesRef> getArc(int ord) { |
| if (ord >= arcs.length) { |
| - @SuppressWarnings("unchecked") final FST.Arc<BytesRef>[] next = new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next = |
| + new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(arcs, 0, next, 0, arcs.length); |
| for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) { |
| next[arcOrd] = new FST.Arc<BytesRef>(); |
| Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java (working copy)
|
| @@ -639,7 +639,6 @@
|
| } |
| |
| // for debugging |
| - @SuppressWarnings("unused") |
| private String toString(BytesRef b) { |
| try { |
| return b.utf8ToString() + " " + b; |
| Index: lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java (working copy)
|
| @@ -30,6 +30,7 @@
|
| import org.apache.lucene.index.MergeState; |
| import org.apache.lucene.index.Terms; |
| import org.apache.lucene.index.TermsEnum; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.DataInput; |
| import org.apache.lucene.util.Bits; |
| import org.apache.lucene.util.BytesRef; |
| @@ -236,7 +237,7 @@
|
| |
| if (docsAndPositionsEnum != null) { |
| final int docID = docsAndPositionsEnum.nextDoc(); |
| - assert docID != DocsEnum.NO_MORE_DOCS; |
| + assert docID != DocIdSetIterator.NO_MORE_DOCS; |
| assert docsAndPositionsEnum.freq() == freq; |
| |
| for(int posUpto=0; posUpto<freq; posUpto++) { |
| Index: lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (working copy)
|
| @@ -401,7 +401,7 @@
|
| while (true) { |
| final int docID = docsEnum.nextDoc(); |
| //System.out.println(Thread.currentThread().getName() + " del term=" + term + " doc=" + docID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| // NOTE: there is no limit check on the docID |
| Index: lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (working copy)
|
| @@ -925,7 +925,7 @@
|
| final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8); |
| postings = termsEnum.docsAndPositions(liveDocs, postings, false); |
| final int docID = postings.advance(skipDocID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } else { |
| if (docID < skipDocID) { |
| @@ -948,7 +948,7 @@
|
| } |
| |
| final int nextDocID = postings.nextDoc(); |
| - if (nextDocID == DocsEnum.NO_MORE_DOCS) { |
| + if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| if (nextDocID <= docID) { |
| @@ -961,14 +961,14 @@
|
| final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8); |
| docs = termsEnum.docs(liveDocs, docs, false); |
| final int docID = docs.advance(skipDocID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } else { |
| if (docID < skipDocID) { |
| throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID); |
| } |
| final int nextDocID = docs.nextDoc(); |
| - if (nextDocID == DocsEnum.NO_MORE_DOCS) { |
| + if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| if (nextDocID <= docID) { |
| @@ -1067,7 +1067,7 @@
|
| throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]); |
| } |
| |
| - while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| totDocCount++; |
| } |
| } |
| Index: lucene/core/src/java/org/apache/lucene/search/ConjunctionTermScorer.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/ConjunctionTermScorer.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/ConjunctionTermScorer.java (working copy)
|
| @@ -49,7 +49,7 @@
|
| |
| private int doNext(int doc) throws IOException { |
| do { |
| - if (lead.doc == DocsEnum.NO_MORE_DOCS) { |
| + if (lead.doc == DocIdSetIterator.NO_MORE_DOCS) { |
| return NO_MORE_DOCS; |
| } |
| advanceHead: do { |
| Index: lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java (working copy)
|
| @@ -76,7 +76,7 @@
|
| // freq of rarest 2 terms is close: |
| final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq; |
| chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance); |
| - if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) { |
| + if (i > 0 && postings[i].postings.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { |
| noDocs = true; |
| return; |
| } |
| @@ -89,7 +89,7 @@
|
| |
| // first (rarest) term |
| final int doc = chunkStates[0].posEnum.nextDoc(); |
| - if (doc == DocsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| docID = doc; |
| return doc; |
| } |
| @@ -140,8 +140,8 @@
|
| |
| // first term |
| int doc = chunkStates[0].posEnum.advance(target); |
| - if (doc == DocsEnum.NO_MORE_DOCS) { |
| - docID = DocsEnum.NO_MORE_DOCS; |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| + docID = DocIdSetIterator.NO_MORE_DOCS; |
| return doc; |
| } |
| |
| @@ -171,7 +171,7 @@
|
| } |
| |
| doc = chunkStates[0].posEnum.nextDoc(); |
| - if (doc == DocsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| docID = doc; |
| return doc; |
| } |
| Index: lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java (working copy)
|
| @@ -367,7 +367,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -440,7 +440,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -544,7 +544,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -612,7 +612,7 @@
|
| // TODO: use bulk API |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| res.set(docID); |
| @@ -694,7 +694,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -782,7 +782,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -871,7 +871,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| retArray[docID] = termval; |
| @@ -1172,7 +1172,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| docToTermOrd.set(docID, termOrd); |
| @@ -1293,7 +1293,7 @@
|
| docs = termsEnum.docs(null, docs, false); |
| while (true) { |
| final int docID = docs.nextDoc(); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| docToOffset.set(docID, pointer); |
| Index: lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (working copy)
|
| @@ -459,6 +459,7 @@
|
| } |
| |
| @Override |
| + @SuppressWarnings({"unchecked","rawtypes"}) |
| public final boolean equals(Object o) { |
| if (this == o) return true; |
| if (!(o instanceof FieldCacheRangeFilter)) return false; |
| Index: lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/FieldComparator.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/FieldComparator.java (working copy)
|
| @@ -150,7 +150,7 @@
|
| * comparator across segments |
| * @throws IOException |
| */ |
| - public abstract FieldComparator setNextReader(AtomicReaderContext context) throws IOException; |
| + public abstract FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException; |
| |
| /** Sets the Scorer to use in case a document's score is |
| * needed. |
| @@ -201,7 +201,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException { |
| if (missingValue != null) { |
| docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field); |
| // optimization to remove unneeded checks on the bit interface: |
| @@ -258,7 +258,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Byte> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader(), field, parser, missingValue != null); |
| @@ -335,7 +335,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader(), field, parser, missingValue != null); |
| @@ -396,7 +396,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException { |
| final DocValues docValues = context.reader().docValues(field); |
| if (docValues != null) { |
| currentReaderValues = docValues.getSource(); |
| @@ -478,7 +478,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Float> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader(), field, parser, missingValue != null); |
| @@ -540,7 +540,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Short> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader(), field, parser, missingValue != null); |
| @@ -624,7 +624,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getInts(context.reader(), field, parser, missingValue != null); |
| @@ -689,7 +689,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException { |
| DocValues docValues = context.reader().docValues(field); |
| if (docValues != null) { |
| currentReaderValues = docValues.getSource(); |
| @@ -772,7 +772,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException { |
| // NOTE: must do this before calling super otherwise |
| // we compute the docsWithField Bits twice! |
| currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader(), field, parser, missingValue != null); |
| @@ -824,7 +824,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) { |
| + public FieldComparator<Float> setNextReader(AtomicReaderContext context) { |
| return this; |
| } |
| |
| @@ -887,7 +887,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) { |
| + public FieldComparator<Integer> setNextReader(AtomicReaderContext context) { |
| // TODO: can we "map" our docIDs to the current |
| // reader? saves having to then subtract on every |
| // compare call |
| @@ -1007,7 +1007,7 @@
|
| abstract class PerSegmentComparator extends FieldComparator<BytesRef> { |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| return TermOrdValComparator.this.setNextReader(context); |
| } |
| |
| @@ -1226,11 +1226,11 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| final int docBase = context.docBase; |
| termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field); |
| final PackedInts.Reader docToOrd = termsIndex.getDocToOrd(); |
| - FieldComparator perSegComp = null; |
| + FieldComparator<BytesRef> perSegComp = null; |
| if (docToOrd.hasArray()) { |
| final Object arr = docToOrd.getArray(); |
| if (arr instanceof byte[]) { |
| @@ -1397,7 +1397,7 @@
|
| abstract class PerSegmentComparator extends FieldComparator<BytesRef> { |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| return TermOrdValDocValuesComparator.this.setNextReader(context); |
| } |
| |
| @@ -1625,7 +1625,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| final int docBase = context.docBase; |
| |
| final DocValues dv = context.reader().docValues(field); |
| @@ -1646,7 +1646,7 @@
|
| |
| comp = termsIndex.getComparator(); |
| |
| - FieldComparator perSegComp = null; |
| + FieldComparator<BytesRef> perSegComp = null; |
| if (termsIndex.hasPackedDocToOrd()) { |
| final PackedInts.Reader docToOrd = termsIndex.getDocToOrd(); |
| if (docToOrd.hasArray()) { |
| @@ -1774,7 +1774,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| docTerms = FieldCache.DEFAULT.getTerms(context.reader(), field); |
| return this; |
| } |
| @@ -1843,7 +1843,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { |
| final DocValues dv = context.reader().docValues(field); |
| if (dv != null) { |
| docTerms = dv.getSource(); |
| Index: lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java (working copy)
|
| @@ -36,6 +36,6 @@
|
| * @throws IOException |
| * If an error occurs reading the index. |
| */ |
| - public abstract FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) |
| + public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) |
| throws IOException; |
| } |
| Index: lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java (working copy)
|
| @@ -129,6 +129,7 @@
|
| } |
| |
| // prevent instantiation and extension. |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| private FieldValueHitQueue(SortField[] fields, int size) { |
| super(size); |
| // When we get here, fields.length is guaranteed to be > 0, therefore no |
| @@ -169,7 +170,7 @@
|
| } |
| } |
| |
| - public FieldComparator[] getComparators() { |
| + public FieldComparator<?>[] getComparators() { |
| return comparators; |
| } |
| |
| @@ -177,15 +178,15 @@
|
| return reverseMul; |
| } |
| |
| - public void setComparator(int pos, FieldComparator comparator) { |
| + public void setComparator(int pos, FieldComparator<?> comparator) { |
| if (pos==0) firstComparator = comparator; |
| comparators[pos] = comparator; |
| } |
| |
| /** Stores the sort criteria being used. */ |
| protected final SortField[] fields; |
| - protected final FieldComparator[] comparators; // use setComparator to change this array |
| - protected FieldComparator firstComparator; // this must always be equal to comparators[0] |
| + protected final FieldComparator<?>[] comparators; // use setComparator to change this array |
| + protected FieldComparator<?> firstComparator; // this must always be equal to comparators[0] |
| protected final int[] reverseMul; |
| |
| @Override |
| Index: lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy)
|
| @@ -408,7 +408,7 @@
|
| Iterator<DocsAndPositionsEnum> i = docsEnums.iterator(); |
| while (i.hasNext()) { |
| DocsAndPositionsEnum postings = i.next(); |
| - if (postings.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) { |
| + if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| add(postings); |
| } |
| } |
| Index: lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (working copy)
|
| @@ -60,6 +60,7 @@
|
| } |
| |
| @Override |
| + @SuppressWarnings({"unchecked","rawtypes"}) |
| public final boolean equals(final Object o) { |
| if (o==this) return true; |
| if (o==null) return false; |
| Index: lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java (working copy)
|
| @@ -352,6 +352,7 @@
|
| } |
| |
| @Override |
| + @SuppressWarnings({"unchecked","rawtypes"}) |
| public final boolean equals(final Object o) { |
| if (o==this) return true; |
| if (!super.equals(o)) |
| Index: lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java (working copy)
|
| @@ -135,7 +135,8 @@
|
| } |
| } |
| |
| - @SuppressWarnings("unchecked") final List<Query>[] disjunctLists = new List[maxPosition + 1]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final List<Query>[] disjunctLists = |
| + new List[maxPosition + 1]; |
| int distinctPositions = 0; |
| |
| for (int i = 0; i < termArrays.size(); ++i) { |
| Index: lucene/core/src/java/org/apache/lucene/search/SortField.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/SortField.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/SortField.java (working copy)
|
| @@ -376,7 +376,7 @@
|
| * optimize themselves when they are the primary sort. |
| * @return {@link FieldComparator} to use when sorting |
| */ |
| - public FieldComparator getComparator(final int numHits, final int sortPos) throws IOException { |
| + public FieldComparator<?> getComparator(final int numHits, final int sortPos) throws IOException { |
| |
| switch (type) { |
| case SCORE: |
| Index: lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java (working copy)
|
| @@ -60,6 +60,7 @@
|
| * Be sure to not change the rewrite method on the wrapped query afterwards! Doing so will |
| * throw {@link UnsupportedOperationException} on rewriting this query! |
| */ |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| public SpanMultiTermQueryWrapper(Q query) { |
| this.query = query; |
| |
| @@ -123,6 +124,7 @@
|
| } |
| |
| @Override |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| public boolean equals(Object obj) { |
| if (this == obj) return true; |
| if (obj == null) return false; |
| Index: lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java (working copy)
|
| @@ -56,7 +56,7 @@
|
| return false; |
| } |
| doc = postings.nextDoc(); |
| - if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| return false; |
| } |
| freq = postings.freq(); |
| @@ -70,7 +70,7 @@
|
| @Override |
| public boolean skipTo(int target) throws IOException { |
| doc = postings.advance(target); |
| - if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) { |
| + if (doc == DocIdSetIterator.NO_MORE_DOCS) { |
| return false; |
| } |
| |
| Index: lucene/core/src/java/org/apache/lucene/search/TopDocs.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/TopDocs.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/TopDocs.java (working copy)
|
| @@ -116,10 +116,11 @@
|
| } |
| } |
| |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| private static class MergeSortQueue extends PriorityQueue<ShardRef> { |
| // These are really FieldDoc instances: |
| final ScoreDoc[][] shardHits; |
| - final FieldComparator[] comparators; |
| + final FieldComparator<?>[] comparators; |
| final int[] reverseMul; |
| |
| public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException { |
| @@ -155,7 +156,7 @@
|
| } |
| |
| // Returns true if first is < second |
| - @SuppressWarnings("unchecked") |
| + @SuppressWarnings({"unchecked","rawtypes"}) |
| public boolean lessThan(ShardRef first, ShardRef second) { |
| assert first != second; |
| final FieldDoc firstFD = (FieldDoc) shardHits[first.shardIndex][first.hitIndex]; |
| Index: lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java (working copy)
|
| @@ -46,7 +46,7 @@
|
| private static class OneComparatorNonScoringCollector extends |
| TopFieldCollector { |
| |
| - FieldComparator comparator; |
| + FieldComparator<?> comparator; |
| final int reverseMul; |
| final FieldValueHitQueue<Entry> queue; |
| |
| @@ -382,7 +382,7 @@
|
| */ |
| private static class MultiComparatorNonScoringCollector extends TopFieldCollector { |
| |
| - final FieldComparator[] comparators; |
| + final FieldComparator<?>[] comparators; |
| final int[] reverseMul; |
| final FieldValueHitQueue<Entry> queue; |
| public MultiComparatorNonScoringCollector(FieldValueHitQueue<Entry> queue, |
| Index: lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java (working copy)
|
| @@ -430,7 +430,7 @@
|
| } |
| } |
| // map<state, set<state>> |
| - @SuppressWarnings("unchecked") Set<State> map[] = new Set[states.length]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) Set<State> map[] = new Set[states.length]; |
| for (int i = 0; i < map.length; i++) |
| map[i] = new HashSet<State>(); |
| for (State s : states) { |
| Index: lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java (working copy)
|
| @@ -74,11 +74,11 @@
|
| final int[] sigma = a.getStartPoints(); |
| final State[] states = a.getNumberedStates(); |
| final int sigmaLen = sigma.length, statesLen = states.length; |
| - @SuppressWarnings("unchecked") final ArrayList<State>[][] reverse = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final ArrayList<State>[][] reverse = |
| (ArrayList<State>[][]) new ArrayList[statesLen][sigmaLen]; |
| - @SuppressWarnings("unchecked") final HashSet<State>[] partition = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final HashSet<State>[] partition = |
| (HashSet<State>[]) new HashSet[statesLen]; |
| - @SuppressWarnings("unchecked") final ArrayList<State>[] splitblock = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final ArrayList<State>[] splitblock = |
| (ArrayList<State>[]) new ArrayList[statesLen]; |
| final int[] block = new int[statesLen]; |
| final StateList[][] active = new StateList[statesLen][sigmaLen]; |
| Index: lucene/core/src/java/org/apache/lucene/util/fst/Builder.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/util/fst/Builder.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/util/fst/Builder.java (working copy)
|
| @@ -144,7 +144,8 @@
|
| } |
| NO_OUTPUT = outputs.getNoOutput(); |
| |
| - @SuppressWarnings("unchecked") final UnCompiledNode<T>[] f = (UnCompiledNode<T>[]) new UnCompiledNode[10]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T>[] f = |
| + (UnCompiledNode<T>[]) new UnCompiledNode[10]; |
| frontier = f; |
| for(int idx=0;idx<frontier.length;idx++) { |
| frontier[idx] = new UnCompiledNode<T>(this, idx); |
| @@ -239,7 +240,8 @@
|
| if (node.inputCount < minSuffixCount2 || (minSuffixCount2 == 1 && node.inputCount == 1 && idx > 1)) { |
| // drop all arcs |
| for(int arcIdx=0;arcIdx<node.numArcs;arcIdx++) { |
| - @SuppressWarnings("unchecked") final UnCompiledNode<T> target = (UnCompiledNode<T>) node.arcs[arcIdx].target; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T> target = |
| + (UnCompiledNode<T>) node.arcs[arcIdx].target; |
| target.clear(); |
| } |
| node.numArcs = 0; |
| @@ -356,7 +358,7 @@
|
| final int prefixLenPlus1 = pos1+1; |
| |
| if (frontier.length < input.length+1) { |
| - @SuppressWarnings("unchecked") final UnCompiledNode<T>[] next = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T>[] next = |
| new UnCompiledNode[ArrayUtil.oversize(input.length+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(frontier, 0, next, 0, frontier.length); |
| for(int idx=frontier.length;idx<next.length;idx++) { |
| @@ -458,7 +460,7 @@
|
| final Arc<T> arc = node.arcs[arcIdx]; |
| if (!arc.target.isCompiled()) { |
| // not yet compiled |
| - @SuppressWarnings("unchecked") final UnCompiledNode<T> n = (UnCompiledNode<T>) arc.target; |
| + @SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T> n = (UnCompiledNode<T>) arc.target; |
| if (n.numArcs == 0) { |
| //System.out.println("seg=" + segment + " FORCE final arc=" + (char) arc.label); |
| arc.isFinal = n.isFinal = true; |
| @@ -512,7 +514,7 @@
|
| * LUCENE-2934 (node expansion based on conditions other than the |
| * fanout size). |
| */ |
| - @SuppressWarnings("unchecked") |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| public UnCompiledNode(Builder<T> owner, int depth) { |
| this.owner = owner; |
| arcs = (Arc<T>[]) new Arc[1]; |
| @@ -545,7 +547,7 @@
|
| assert label >= 0; |
| assert numArcs == 0 || label > arcs[numArcs-1].label: "arc[-1].label=" + arcs[numArcs-1].label + " new label=" + label + " numArcs=" + numArcs; |
| if (numArcs == arcs.length) { |
| - @SuppressWarnings("unchecked") final Arc<T>[] newArcs = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final Arc<T>[] newArcs = |
| new Arc[ArrayUtil.oversize(numArcs+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(arcs, 0, newArcs, 0, arcs.length); |
| for(int arcIdx=numArcs;arcIdx<newArcs.length;arcIdx++) { |
| Index: lucene/core/src/java/org/apache/lucene/util/fst/FST.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/util/fst/FST.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/util/fst/FST.java (working copy)
|
| @@ -376,7 +376,7 @@
|
| } |
| |
| // Caches first 128 labels |
| - @SuppressWarnings("unchecked") |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| private void cacheRootArcs() throws IOException { |
| cachedRootArcs = (Arc<T>[]) new Arc[0x80]; |
| final Arc<T> arc = new Arc<T>(); |
| Index: lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java
|
| ===================================================================
|
| --- lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java (revision 1297029)
|
| +++ lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java (working copy)
|
| @@ -30,9 +30,9 @@
|
| abstract class FSTEnum<T> { |
| protected final FST<T> fst; |
| |
| - @SuppressWarnings("unchecked") protected FST.Arc<T>[] arcs = new FST.Arc[10]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) protected FST.Arc<T>[] arcs = new FST.Arc[10]; |
| // outputs are cumulative |
| - @SuppressWarnings("unchecked") protected T[] output = (T[]) new Object[10]; |
| + @SuppressWarnings({"rawtypes","unchecked"}) protected T[] output = (T[]) new Object[10]; |
| |
| protected final T NO_OUTPUT; |
| protected final FST.Arc<T> scratchArc = new FST.Arc<T>(); |
| @@ -462,13 +462,13 @@
|
| upto++; |
| grow(); |
| if (arcs.length <= upto) { |
| - @SuppressWarnings("unchecked") final FST.Arc<T>[] newArcs = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<T>[] newArcs = |
| new FST.Arc[ArrayUtil.oversize(1+upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(arcs, 0, newArcs, 0, arcs.length); |
| arcs = newArcs; |
| } |
| if (output.length <= upto) { |
| - @SuppressWarnings("unchecked") final T[] newOutput = |
| + @SuppressWarnings({"rawtypes","unchecked"}) final T[] newOutput = |
| (T[]) new Object[ArrayUtil.oversize(1+upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; |
| System.arraycopy(output, 0, newOutput, 0, output.length); |
| output = newOutput; |
| Index: lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy)
|
| @@ -28,6 +28,7 @@
|
| import org.apache.lucene.index.MultiFields; |
| import org.apache.lucene.index.DocsAndPositionsEnum; |
| import org.apache.lucene.index.RandomIndexWriter; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| |
| @@ -76,7 +77,7 @@
|
| "preanalyzed", |
| new BytesRef("term1"), |
| false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, termPositions.freq()); |
| assertEquals(0, termPositions.nextPosition()); |
| |
| @@ -85,7 +86,7 @@
|
| "preanalyzed", |
| new BytesRef("term2"), |
| false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(2, termPositions.freq()); |
| assertEquals(1, termPositions.nextPosition()); |
| assertEquals(3, termPositions.nextPosition()); |
| @@ -95,7 +96,7 @@
|
| "preanalyzed", |
| new BytesRef("term3"), |
| false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, termPositions.freq()); |
| assertEquals(2, termPositions.nextPosition()); |
| reader.close(); |
| Index: lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (working copy)
|
| @@ -35,6 +35,7 @@
|
| import org.apache.lucene.index.TermsEnum.SeekStatus; |
| import org.apache.lucene.index.TermsEnum; |
| import org.apache.lucene.index.TieredMergePolicy; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.IOContext; |
| import org.apache.lucene.store.IndexOutput; |
| @@ -141,10 +142,10 @@
|
| assertEquals(SeekStatus.FOUND, te.seekCeil(new BytesRef("dog"))); |
| assertEquals(SeekStatus.FOUND, te.seekCeil(new BytesRef("the"))); |
| DocsEnum de = te.docs(null, null, true); |
| - assertTrue(de.advance(0) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(de.advance(0) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(2, de.freq()); |
| - assertTrue(de.advance(1) != DocsEnum.NO_MORE_DOCS); |
| - assertTrue(de.advance(2) == DocsEnum.NO_MORE_DOCS); |
| + assertTrue(de.advance(1) != DocIdSetIterator.NO_MORE_DOCS); |
| + assertTrue(de.advance(2) == DocIdSetIterator.NO_MORE_DOCS); |
| reader.close(); |
| } |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (working copy)
|
| @@ -81,7 +81,7 @@
|
| NUM_TEST_ITER = atLeast(20); |
| } |
| |
| - class FieldData implements Comparable { |
| + class FieldData implements Comparable<FieldData> { |
| final FieldInfo fieldInfo; |
| final TermData[] terms; |
| final boolean omitTF; |
| @@ -102,8 +102,8 @@
|
| Arrays.sort(terms); |
| } |
| |
| - public int compareTo(final Object other) { |
| - return fieldInfo.name.compareTo(((FieldData) other).fieldInfo.name); |
| + public int compareTo(final FieldData other) { |
| + return fieldInfo.name.compareTo(other.fieldInfo.name); |
| } |
| |
| public void write(final FieldsConsumer consumer) throws Throwable { |
| @@ -133,7 +133,7 @@
|
| } |
| } |
| |
| - class TermData implements Comparable { |
| + class TermData implements Comparable<TermData> { |
| String text2; |
| final BytesRef text; |
| int[] docs; |
| @@ -147,8 +147,8 @@
|
| this.positions = positions; |
| } |
| |
| - public int compareTo(final Object o) { |
| - return text.compareTo(((TermData) o).text); |
| + public int compareTo(final TermData o) { |
| + return text.compareTo(o.text); |
| } |
| |
| public long write(final TermsConsumer termsConsumer) throws Throwable { |
| @@ -281,7 +281,7 @@
|
| for(int iter=0;iter<2;iter++) { |
| docsEnum = _TestUtil.docs(random, termsEnum, null, docsEnum, false); |
| assertEquals(terms[i].docs[0], docsEnum.nextDoc()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc()); |
| } |
| } |
| assertNull(termsEnum.next()); |
| @@ -439,7 +439,7 @@
|
| assertEquals(positions[i].pos, pos); |
| if (positions[i].payload != null) { |
| assertTrue(posEnum.hasPayload()); |
| - if (TestCodecs.random.nextInt(3) < 2) { |
| + if (LuceneTestCase.random.nextInt(3) < 2) { |
| // Verify the payload bytes |
| final BytesRef otherPayload = posEnum.getPayload(); |
| assertTrue("expected=" + positions[i].payload.toString() + " got=" + otherPayload.toString(), positions[i].payload.equals(otherPayload)); |
| @@ -453,7 +453,7 @@
|
| public void _run() throws Throwable { |
| |
| for(int iter=0;iter<NUM_TEST_ITER;iter++) { |
| - final FieldData field = fields[TestCodecs.random.nextInt(fields.length)]; |
| + final FieldData field = fields[LuceneTestCase.random.nextInt(fields.length)]; |
| final TermsEnum termsEnum = termsDict.terms(field.fieldInfo.name).iterator(null); |
| if (si.getCodec() instanceof Lucene3xCodec) { |
| // code below expects unicode sort order |
| @@ -473,7 +473,7 @@
|
| assertEquals(upto, field.terms.length); |
| |
| // Test random seek: |
| - TermData term = field.terms[TestCodecs.random.nextInt(field.terms.length)]; |
| + TermData term = field.terms[LuceneTestCase.random.nextInt(field.terms.length)]; |
| TermsEnum.SeekStatus status = termsEnum.seekCeil(new BytesRef(term.text2)); |
| assertEquals(status, TermsEnum.SeekStatus.FOUND); |
| assertEquals(term.docs.length, termsEnum.docFreq()); |
| @@ -484,7 +484,7 @@
|
| } |
| |
| // Test random seek by ord: |
| - final int idx = TestCodecs.random.nextInt(field.terms.length); |
| + final int idx = LuceneTestCase.random.nextInt(field.terms.length); |
| term = field.terms[idx]; |
| boolean success = false; |
| try { |
| @@ -547,7 +547,7 @@
|
| upto = 0; |
| do { |
| term = field.terms[upto]; |
| - if (TestCodecs.random.nextInt(3) == 1) { |
| + if (LuceneTestCase.random.nextInt(3) == 1) { |
| final DocsEnum docs; |
| final DocsEnum docsAndFreqs; |
| final DocsAndPositionsEnum postings; |
| @@ -569,10 +569,10 @@
|
| // Maybe skip: |
| final int left = term.docs.length-upto2; |
| int doc; |
| - if (TestCodecs.random.nextInt(3) == 1 && left >= 1) { |
| - final int inc = 1+TestCodecs.random.nextInt(left-1); |
| + if (LuceneTestCase.random.nextInt(3) == 1 && left >= 1) { |
| + final int inc = 1+LuceneTestCase.random.nextInt(left-1); |
| upto2 += inc; |
| - if (TestCodecs.random.nextInt(2) == 1) { |
| + if (LuceneTestCase.random.nextInt(2) == 1) { |
| doc = docs.advance(term.docs[upto2]); |
| assertEquals(term.docs[upto2], doc); |
| } else { |
| @@ -597,7 +597,7 @@
|
| assertEquals(term.docs[upto2], doc); |
| if (!field.omitTF) { |
| assertEquals(term.positions[upto2].length, postings.freq()); |
| - if (TestCodecs.random.nextInt(2) == 1) { |
| + if (LuceneTestCase.random.nextInt(2) == 1) { |
| this.verifyPositions(term.positions[upto2], postings); |
| } |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy)
|
| @@ -110,7 +110,7 @@
|
| |
| // This should blow up if we forget to check that the TermEnum is from the same |
| // reader as the TermDocs. |
| - while (td.nextDoc() != td.NO_MORE_DOCS) ret += td.docID(); |
| + while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) ret += td.docID(); |
| |
| // really a dummy assert to ensure that we got some docs and to ensure that |
| // nothing is eliminated by hotspot |
| Index: lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java (working copy)
|
| @@ -84,7 +84,7 @@
|
| assertEquals(msg, 20, docsAndPosEnum.nextPosition()); |
| assertEquals(msg, 4, docsAndPosEnum.freq()); |
| assertEquals(msg, 30, docsAndPosEnum.nextPosition()); |
| - } while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); |
| + } while (docsAndPosEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| } |
| } |
| reader.close(); |
| @@ -156,7 +156,7 @@
|
| // now run through the scorer and check if all positions are there... |
| do { |
| int docID = docsAndPosEnum.docID(); |
| - if (docID == DocsAndPositionsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| Integer[] pos = positionsInDoc[atomicReaderContext.docBase + docID]; |
| @@ -177,7 +177,7 @@
|
| .advance(docID + 1 + random.nextInt((maxDoc - docID))); |
| } |
| |
| - } while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); |
| + } while (docsAndPosEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| } |
| |
| } |
| @@ -234,7 +234,7 @@
|
| int next = findNext(freqInDoc, context.docBase+j+1, context.docBase + maxDoc) - context.docBase; |
| int advancedTo = docsEnum.advance(next); |
| if (next >= maxDoc) { |
| - assertEquals(DocsEnum.NO_MORE_DOCS, advancedTo); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, advancedTo); |
| } else { |
| assertTrue("advanced to: " +advancedTo + " but should be <= " + next, next >= advancedTo); |
| } |
| @@ -243,7 +243,7 @@
|
| } |
| } |
| } |
| - assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocsEnum.NO_MORE_DOCS, docsEnum.docID()); |
| + assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, docsEnum.docID()); |
| } |
| |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy)
|
| @@ -30,6 +30,7 @@
|
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.document.TextField; |
| import org.apache.lucene.index.FieldInfo.IndexOptions; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.AttributeSource; |
| import org.apache.lucene.util.BytesRef; |
| @@ -129,7 +130,7 @@
|
| |
| DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), |
| "repeated", new BytesRef("repeated"), false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| int freq = termPositions.freq(); |
| assertEquals(2, freq); |
| assertEquals(0, termPositions.nextPosition()); |
| @@ -200,7 +201,7 @@
|
| SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); |
| |
| DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"), false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| int freq = termPositions.freq(); |
| assertEquals(3, freq); |
| assertEquals(0, termPositions.nextPosition()); |
| @@ -244,18 +245,18 @@
|
| SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); |
| |
| DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"), false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, termPositions.freq()); |
| assertEquals(0, termPositions.nextPosition()); |
| |
| termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term2"), false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(2, termPositions.freq()); |
| assertEquals(1, termPositions.nextPosition()); |
| assertEquals(3, termPositions.nextPosition()); |
| |
| termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term3"), false); |
| - assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); |
| + assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, termPositions.freq()); |
| assertEquals(2, termPositions.nextPosition()); |
| reader.close(); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java (working copy)
|
| @@ -816,7 +816,7 @@
|
| assertEquals(1, docFreq); |
| DocsEnum termDocsEnum = reader.termDocsEnum(null, term.field, term.bytes, false); |
| int nextDoc = termDocsEnum.nextDoc(); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, termDocsEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocsEnum.nextDoc()); |
| return nextDoc; |
| } |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java (working copy)
|
| @@ -24,6 +24,7 @@
|
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.util.Bits; |
| @@ -165,7 +166,7 @@
|
| |
| DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader), |
| null, false); |
| - while (positions.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| assertTrue((positions.docID() % 2) == 1); |
| } |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java (working copy)
|
| @@ -28,6 +28,7 @@
|
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.search.BooleanClause; |
| import org.apache.lucene.search.BooleanQuery; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.TermQuery; |
| import org.apache.lucene.search.TopDocs; |
| @@ -264,14 +265,14 @@
|
| assertEquals(new BytesRef(""+counter), termsEnum.next()); |
| assertEquals(1, termsEnum.totalTermFreq()); |
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(1, dpEnum.nextPosition()); |
| |
| assertEquals(new BytesRef("text"), termsEnum.next()); |
| assertEquals(1, termsEnum.totalTermFreq()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(0, dpEnum.nextPosition()); |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
|
| @@ -969,14 +969,14 @@
|
| assertNotNull(termsEnum.next()); |
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(100, dpEnum.nextPosition()); |
| |
| assertNotNull(termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(101, dpEnum.nextPosition()); |
| assertNull(termsEnum.next()); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy)
|
| @@ -31,6 +31,7 @@
|
| import org.apache.lucene.document.Field; |
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.Query; |
| import org.apache.lucene.search.TermQuery; |
| @@ -57,7 +58,7 @@
|
| false); |
| |
| if (td != null) { |
| - while (td.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| td.docID(); |
| count++; |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (working copy)
|
| @@ -25,6 +25,7 @@
|
| import org.apache.lucene.document.Field; |
| import org.apache.lucene.document.FieldType; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.AlreadyClosedException; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| @@ -217,7 +218,7 @@
|
| null, |
| false); |
| int count = 0; |
| - while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| count++; |
| } |
| assertTrue(count > 0); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (working copy)
|
| @@ -29,6 +29,7 @@
|
| import org.apache.lucene.document.FieldType; |
| import org.apache.lucene.index.FieldInfo.IndexOptions; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -176,7 +177,7 @@
|
| final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term), false); |
| |
| int docID = -1; |
| - while(docID < DocsEnum.NO_MORE_DOCS) { |
| + while(docID < DocIdSetIterator.NO_MORE_DOCS) { |
| final int what = random.nextInt(3); |
| if (what == 0) { |
| if (VERBOSE) { |
| @@ -199,7 +200,7 @@
|
| System.out.println(" got docID=" + docID); |
| } |
| assertEquals(expected, docID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| |
| @@ -241,7 +242,7 @@
|
| System.out.println(" got docID=" + docID); |
| } |
| assertEquals(expected, docID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| |
| @@ -380,7 +381,7 @@
|
| assert docs != null; |
| |
| int docID = -1; |
| - while(docID < DocsEnum.NO_MORE_DOCS) { |
| + while(docID < DocIdSetIterator.NO_MORE_DOCS) { |
| final int what = random.nextInt(3); |
| if (what == 0) { |
| if (VERBOSE) { |
| @@ -403,7 +404,7 @@
|
| System.out.println(" got docID=" + docID); |
| } |
| assertEquals(expected, docID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| |
| @@ -439,7 +440,7 @@
|
| System.out.println(" got docID=" + docID); |
| } |
| assertEquals(expected, docID); |
| - if (docID == DocsEnum.NO_MORE_DOCS) { |
| + if (docID == DocIdSetIterator.NO_MORE_DOCS) { |
| break; |
| } |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java (working copy)
|
| @@ -17,6 +17,7 @@
|
| * limitations under the License. |
| */ |
| |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.*; |
| import org.apache.lucene.util.*; |
| import org.apache.lucene.document.*; |
| @@ -128,7 +129,7 @@
|
| assertEquals(docID, docsEnum.nextDoc()); |
| } |
| } |
| - assertEquals(docsEnum.NO_MORE_DOCS, docsEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc()); |
| } |
| |
| reader.close(); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java (working copy)
|
| @@ -22,6 +22,7 @@
|
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.Bits; |
| import org.apache.lucene.util.BytesRef; |
| @@ -79,9 +80,9 @@
|
| assertNotNull(b); |
| assertEquals(t, b.utf8ToString()); |
| DocsEnum td = _TestUtil.docs(random, te, liveDocs, null, false); |
| - assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(0, td.docID()); |
| - assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); |
| + assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS); |
| } |
| assertNull(te.next()); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (working copy)
|
| @@ -24,6 +24,7 @@
|
| import java.util.Random; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.store.RAMDirectory; |
| @@ -233,7 +234,7 @@
|
| |
| public static int[] toArray(DocsEnum docsEnum) throws IOException { |
| List<Integer> docs = new ArrayList<Integer>(); |
| - while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| int docID = docsEnum.docID(); |
| docs.add(docID); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java (working copy)
|
| @@ -88,7 +88,7 @@
|
| assertEquals(2, dp.nextPosition()); |
| assertEquals(9, dp.startOffset()); |
| assertEquals(17, dp.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc()); |
| |
| dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("b"), true); |
| assertNotNull(dp); |
| @@ -97,7 +97,7 @@
|
| assertEquals(1, dp.nextPosition()); |
| assertEquals(8, dp.startOffset()); |
| assertEquals(9, dp.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc()); |
| |
| dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("c"), true); |
| assertNotNull(dp); |
| @@ -106,7 +106,7 @@
|
| assertEquals(3, dp.nextPosition()); |
| assertEquals(19, dp.startOffset()); |
| assertEquals(50, dp.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc()); |
| |
| r.close(); |
| dir.close(); |
| @@ -156,7 +156,7 @@
|
| for (String term : terms) { |
| DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term), true); |
| int doc; |
| - while((doc = dp.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| String storedNumbers = reader.document(doc).get("numbers"); |
| int freq = dp.freq(); |
| for (int i = 0; i < freq; i++) { |
| @@ -304,7 +304,7 @@
|
| assertNotNull(docs); |
| int doc; |
| //System.out.println(" doc/freq"); |
| - while((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + while((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]); |
| //System.out.println(" doc=" + docIDToID[doc] + " docID=" + doc + " " + expected.size() + " freq"); |
| assertNotNull(expected); |
| @@ -314,7 +314,7 @@
|
| docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, false); |
| assertNotNull(docsAndPositions); |
| //System.out.println(" doc/freq/pos"); |
| - while((doc = docsAndPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]); |
| //System.out.println(" doc=" + docIDToID[doc] + " " + expected.size() + " freq"); |
| assertNotNull(expected); |
| @@ -329,7 +329,7 @@
|
| docsAndPositionsAndOffsets = termsEnum.docsAndPositions(null, docsAndPositions, true); |
| assertNotNull(docsAndPositionsAndOffsets); |
| //System.out.println(" doc/freq/pos/offs"); |
| - while((doc = docsAndPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]); |
| //System.out.println(" doc=" + docIDToID[doc] + " " + expected.size() + " freq"); |
| assertNotNull(expected); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy)
|
| @@ -23,6 +23,7 @@
|
| import org.apache.lucene.codecs.Codec; |
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.InfoStream; |
| @@ -105,7 +106,7 @@
|
| null, |
| false); |
| assertTrue(termDocs != null); |
| - assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| |
| int tvCount = 0; |
| for(FieldInfo fieldInfo : mergedReader.getFieldInfos()) { |
| Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy)
|
| @@ -23,6 +23,7 @@
|
| import java.util.List; |
| |
| import org.apache.lucene.document.Document; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.IOContext; |
| import org.apache.lucene.util.BytesRef; |
| @@ -133,7 +134,7 @@
|
| MultiFields.getLiveDocs(reader), |
| null, |
| false); |
| - assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| |
| termDocs = _TestUtil.docs(random, reader, |
| DocHelper.NO_NORMS_KEY, |
| @@ -142,7 +143,7 @@
|
| null, |
| false); |
| |
| - assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| |
| |
| DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, |
| @@ -152,7 +153,7 @@
|
| false); |
| // NOTE: prior rev of this test was failing to first |
| // call next here: |
| - assertTrue(positions.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertTrue(positions.docID() == 0); |
| assertTrue(positions.nextPosition() >= 0); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy)
|
| @@ -22,6 +22,7 @@
|
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -63,7 +64,7 @@
|
| TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null); |
| terms.seekCeil(new BytesRef("field")); |
| DocsEnum termDocs = _TestUtil.docs(random, terms, reader.getLiveDocs(), null, true); |
| - if (termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| int docId = termDocs.docID(); |
| assertTrue(docId == 0); |
| int freq = termDocs.freq(); |
| @@ -142,19 +143,19 @@
|
| // without optimization (assumption skipInterval == 16) |
| |
| // with next |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(0, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.advance(0) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(0) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(2, tdocs.docID()); |
| - assertTrue(tdocs.advance(4) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(4) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(4, tdocs.docID()); |
| - assertTrue(tdocs.advance(9) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(9) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(9, tdocs.docID()); |
| - assertFalse(tdocs.advance(10) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| // without next |
| tdocs = _TestUtil.docs(random, reader, |
| @@ -164,13 +165,13 @@
|
| null, |
| false); |
| |
| - assertTrue(tdocs.advance(0) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(0) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(0, tdocs.docID()); |
| - assertTrue(tdocs.advance(4) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(4) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(4, tdocs.docID()); |
| - assertTrue(tdocs.advance(9) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(9) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(9, tdocs.docID()); |
| - assertFalse(tdocs.advance(10) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| // exactly skipInterval documents and therefore with optimization |
| |
| @@ -182,21 +183,21 @@
|
| null, |
| true); |
| |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(10, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(11, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(12, tdocs.docID()); |
| - assertTrue(tdocs.advance(15) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(15) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(15, tdocs.docID()); |
| - assertTrue(tdocs.advance(24) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(24) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(24, tdocs.docID()); |
| - assertTrue(tdocs.advance(25) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(25) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(25, tdocs.docID()); |
| - assertFalse(tdocs.advance(26) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| // without next |
| tdocs = _TestUtil.docs(random, reader, |
| @@ -206,15 +207,15 @@
|
| null, |
| true); |
| |
| - assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(10, tdocs.docID()); |
| - assertTrue(tdocs.advance(15) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(15) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(15, tdocs.docID()); |
| - assertTrue(tdocs.advance(24) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(24) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(24, tdocs.docID()); |
| - assertTrue(tdocs.advance(25) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(25) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(25, tdocs.docID()); |
| - assertFalse(tdocs.advance(26) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| // much more than skipInterval documents and therefore with optimization |
| |
| @@ -226,23 +227,23 @@
|
| null, |
| true); |
| |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(26, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(27, tdocs.docID()); |
| assertEquals(4, tdocs.freq()); |
| - assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(28, tdocs.docID()); |
| - assertTrue(tdocs.advance(40) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(40) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(40, tdocs.docID()); |
| - assertTrue(tdocs.advance(57) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(57) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(57, tdocs.docID()); |
| - assertTrue(tdocs.advance(74) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(74) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(74, tdocs.docID()); |
| - assertTrue(tdocs.advance(75) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(75) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(75, tdocs.docID()); |
| - assertFalse(tdocs.advance(76) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| //without next |
| tdocs = _TestUtil.docs(random, reader, |
| @@ -251,17 +252,17 @@
|
| MultiFields.getLiveDocs(reader), |
| null, |
| false); |
| - assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(26, tdocs.docID()); |
| - assertTrue(tdocs.advance(40) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(40) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(40, tdocs.docID()); |
| - assertTrue(tdocs.advance(57) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(57) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(57, tdocs.docID()); |
| - assertTrue(tdocs.advance(74) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(74) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(74, tdocs.docID()); |
| - assertTrue(tdocs.advance(75) != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tdocs.advance(75) != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(75, tdocs.docID()); |
| - assertFalse(tdocs.advance(76) != DocsEnum.NO_MORE_DOCS); |
| + assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS); |
| |
| reader.close(); |
| dir.close(); |
| Index: lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java (working copy)
|
| @@ -22,6 +22,7 @@
|
| import java.util.Set; |
| |
| import org.apache.lucene.util.*; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.*; |
| import org.apache.lucene.document.*; |
| |
| @@ -117,14 +118,14 @@
|
| } |
| if (upto == expected.size()) { |
| if (VERBOSE) { |
| - System.out.println(" expect docID=" + DocsEnum.NO_MORE_DOCS + " actual=" + docID); |
| + System.out.println(" expect docID=" + DocIdSetIterator.NO_MORE_DOCS + " actual=" + docID); |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, docID); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, docID); |
| } else { |
| if (VERBOSE) { |
| System.out.println(" expect docID=" + expected.get(upto) + " actual=" + docID); |
| } |
| - assertTrue(docID != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(expected.get(upto).intValue(), docID); |
| } |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy)
|
| @@ -34,6 +34,7 @@
|
| import org.apache.lucene.document.FieldType; |
| import org.apache.lucene.document.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.TermQuery; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.*; |
| @@ -336,7 +337,7 @@
|
| DocsEnum docs = null; |
| while(termsEnum.next() != null) { |
| docs = _TestUtil.docs(random, termsEnum, null, docs, false); |
| - while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| fail("r1 is not empty but r2 is"); |
| } |
| } |
| @@ -362,18 +363,18 @@
|
| termDocs2 = null; |
| } |
| |
| - if (termDocs1.nextDoc() == DocsEnum.NO_MORE_DOCS) { |
| + if (termDocs1.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { |
| // This doc is deleted and wasn't replaced |
| - assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocsEnum.NO_MORE_DOCS); |
| + assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocIdSetIterator.NO_MORE_DOCS); |
| continue; |
| } |
| |
| int id1 = termDocs1.docID(); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, termDocs1.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs1.nextDoc()); |
| |
| - assertTrue(termDocs2.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(termDocs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| int id2 = termDocs2.docID(); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, termDocs2.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs2.nextDoc()); |
| |
| r2r1[id2] = id1; |
| |
| @@ -409,7 +410,7 @@
|
| System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq()); |
| dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false); |
| if (dpEnum != null) { |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| final int freq = dpEnum.freq(); |
| System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq); |
| for(int posUpto=0;posUpto<freq;posUpto++) { |
| @@ -418,7 +419,7 @@
|
| } else { |
| dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); |
| assertNotNull(dEnum); |
| - assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| final int freq = dEnum.freq(); |
| System.out.println(" doc=" + dEnum.docID() + " freq=" + freq); |
| } |
| @@ -443,7 +444,7 @@
|
| System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq()); |
| dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false); |
| if (dpEnum != null) { |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| final int freq = dpEnum.freq(); |
| System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq); |
| for(int posUpto=0;posUpto<freq;posUpto++) { |
| @@ -452,7 +453,7 @@
|
| } else { |
| dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); |
| assertNotNull(dEnum); |
| - assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| final int freq = dEnum.freq(); |
| System.out.println(" doc=" + dEnum.docID() + " freq=" + freq); |
| } |
| @@ -508,7 +509,7 @@
|
| |
| //System.out.println("TEST: term1=" + term1); |
| docs1 = _TestUtil.docs(random, termsEnum1, liveDocs1, docs1, true); |
| - while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| int d = docs1.docID(); |
| int f = docs1.freq(); |
| info1[len1] = (((long)d)<<32) | f; |
| @@ -542,7 +543,7 @@
|
| |
| //System.out.println("TEST: term1=" + term1); |
| docs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, docs2, true); |
| - while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| int d = r2r1[docs2.docID()]; |
| int f = docs2.freq(); |
| info2[len2] = (((long)d)<<32) | f; |
| @@ -640,7 +641,7 @@
|
| // docIDs are not supposed to be equal |
| //int docID2 = dpEnum2.nextDoc(); |
| //assertEquals(docID1, docID2); |
| - assertTrue(docID1 != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS); |
| |
| int freq1 = dpEnum1.freq(); |
| int freq2 = dpEnum2.freq(); |
| @@ -665,8 +666,8 @@
|
| offsetAtt2.endOffset()); |
| } |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum1.nextDoc()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum2.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc()); |
| } else { |
| dEnum1 = _TestUtil.docs(random, termsEnum1, null, dEnum1, true); |
| dEnum2 = _TestUtil.docs(random, termsEnum2, null, dEnum2, true); |
| @@ -677,12 +678,12 @@
|
| // docIDs are not supposed to be equal |
| //int docID2 = dEnum2.nextDoc(); |
| //assertEquals(docID1, docID2); |
| - assertTrue(docID1 != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS); |
| int freq1 = dEnum1.freq(); |
| int freq2 = dEnum2.freq(); |
| assertEquals(freq1, freq2); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dEnum1.nextDoc()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dEnum2.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum1.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum2.nextDoc()); |
| } |
| } |
| |
| Index: lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (working copy)
|
| @@ -27,6 +27,7 @@
|
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -123,7 +124,7 @@
|
| for (int i=0; i<iter; i++) { |
| tenum.seekCeil(new BytesRef("val")); |
| tdocs = _TestUtil.docs(random, tenum, MultiFields.getLiveDocs(reader), tdocs, false); |
| - while (tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| ret += tdocs.docID(); |
| } |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (working copy)
|
| @@ -35,6 +35,7 @@
|
| import org.apache.lucene.document.IntField; |
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.FieldCache; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| @@ -333,7 +334,7 @@
|
| assertEquals(1, te.docFreq()); |
| docsEnum = _TestUtil.docs(random, te, null, docsEnum, false); |
| final int docID = docsEnum.nextDoc(); |
| - assertTrue(docID != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(docIDToID[docID], termToID.get(expected).intValue()); |
| do { |
| loc++; |
| Index: lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java (working copy)
|
| @@ -237,7 +237,7 @@
|
| assertNotNull(docsEnum); |
| int doc = docsEnum.docID(); |
| assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS); |
| - assertTrue(docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc()); |
| } |
| assertNull(termsEnum.next()); |
| @@ -264,17 +264,17 @@
|
| assertNotNull(dpEnum); |
| int doc = dpEnum.docID(); |
| assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(dpEnum.freq(), positions[i].length); |
| for (int j = 0; j < positions[i].length; j++) { |
| assertEquals(positions[i][j], dpEnum.nextPosition()); |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| doc = dpEnum.docID(); |
| assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertNotNull(dpEnum); |
| assertEquals(dpEnum.freq(), positions[i].length); |
| for (int j = 0; j < positions[i].length; j++) { |
| @@ -282,7 +282,7 @@
|
| assertEquals(j*10, dpEnum.startOffset()); |
| assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset()); |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| } |
| |
| Terms freqVector = reader.get(0).terms(testFields[1]); //no pos, no offset |
| @@ -316,15 +316,15 @@
|
| |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(dpEnum.freq(), positions[i].length); |
| for (int j = 0; j < positions[i].length; j++) { |
| assertEquals(positions[i][j], dpEnum.nextPosition()); |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertNotNull(dpEnum); |
| assertEquals(dpEnum.freq(), positions[i].length); |
| for (int j = 0; j < positions[i].length; j++) { |
| @@ -332,7 +332,7 @@
|
| assertEquals(j*10, dpEnum.startOffset()); |
| assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset()); |
| } |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| } |
| reader.close(); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (working copy)
|
| @@ -31,6 +31,7 @@
|
| import org.apache.lucene.document.FieldType; |
| import org.apache.lucene.document.StringField; |
| import org.apache.lucene.document.TextField; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.store.RAMDirectory; |
| @@ -69,18 +70,18 @@
|
| assertEquals(1, termsEnum.totalTermFreq()); |
| |
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(8, dpEnum.startOffset()); |
| assertEquals(8, dpEnum.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| // Token "abcd" occurred three times |
| assertEquals(new BytesRef("abcd"), termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| assertEquals(3, termsEnum.totalTermFreq()); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| @@ -93,7 +94,7 @@
|
| assertEquals(8, dpEnum.startOffset()); |
| assertEquals(12, dpEnum.endOffset()); |
| |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| assertNull(termsEnum.next()); |
| r.close(); |
| dir.close(); |
| @@ -120,7 +121,7 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| assertEquals(2, termsEnum.totalTermFreq()); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| @@ -128,7 +129,7 @@
|
| dpEnum.nextPosition(); |
| assertEquals(5, dpEnum.startOffset()); |
| assertEquals(9, dpEnum.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| r.close(); |
| dir.close(); |
| @@ -155,7 +156,7 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| assertEquals(2, termsEnum.totalTermFreq()); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| @@ -163,7 +164,7 @@
|
| dpEnum.nextPosition(); |
| assertEquals(8, dpEnum.startOffset()); |
| assertEquals(12, dpEnum.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| r.close(); |
| dir.close(); |
| @@ -194,7 +195,7 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| assertEquals(2, termsEnum.totalTermFreq()); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| @@ -202,7 +203,7 @@
|
| dpEnum.nextPosition(); |
| assertEquals(8, dpEnum.startOffset()); |
| assertEquals(12, dpEnum.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| r.close(); |
| dir.close(); |
| @@ -230,7 +231,7 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| assertEquals(2, termsEnum.totalTermFreq()); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| @@ -238,7 +239,7 @@
|
| dpEnum.nextPosition(); |
| assertEquals(9, dpEnum.startOffset()); |
| assertEquals(13, dpEnum.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc()); |
| |
| r.close(); |
| dir.close(); |
| @@ -266,21 +267,21 @@
|
| assertNotNull(termsEnum.next()); |
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| |
| assertNotNull(termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(11, dpEnum.startOffset()); |
| assertEquals(17, dpEnum.endOffset()); |
| |
| assertNotNull(termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(18, dpEnum.startOffset()); |
| assertEquals(21, dpEnum.endOffset()); |
| @@ -312,14 +313,14 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| |
| assertEquals(1, (int) termsEnum.totalTermFreq()); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(1, dpEnum.startOffset()); |
| assertEquals(7, dpEnum.endOffset()); |
| |
| assertNotNull(termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(8, dpEnum.startOffset()); |
| assertEquals(11, dpEnum.endOffset()); |
| @@ -355,14 +356,14 @@
|
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true); |
| |
| assertEquals(1, (int) termsEnum.totalTermFreq()); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(0, dpEnum.startOffset()); |
| assertEquals(4, dpEnum.endOffset()); |
| |
| assertNotNull(termsEnum.next()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| dpEnum.nextPosition(); |
| assertEquals(6, dpEnum.startOffset()); |
| assertEquals(12, dpEnum.endOffset()); |
| Index: lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java (working copy)
|
| @@ -129,7 +129,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) |
| + public FieldComparator<Object> setNextReader(AtomicReaderContext context) |
| throws IOException { |
| throw new UnsupportedOperationException(UNSUPPORTED_MSG); |
| } |
| @@ -144,7 +144,7 @@
|
| static final class JustCompileFieldComparatorSource extends FieldComparatorSource { |
| |
| @Override |
| - public FieldComparator newComparator(String fieldname, int numHits, |
| + public FieldComparator<?> newComparator(String fieldname, int numHits, |
| int sortPos, boolean reversed) throws IOException { |
| throw new UnsupportedOperationException(UNSUPPORTED_MSG); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java (working copy)
|
| @@ -28,6 +28,7 @@
|
| import org.apache.lucene.index.DocsEnum; |
| import org.apache.lucene.index.IndexReaderContext; |
| import org.apache.lucene.index.Term; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.util.ReaderUtil; |
| import org.apache.lucene.util.TermContext; |
| |
| @@ -121,7 +122,7 @@
|
| @Override |
| public int doc() { |
| if (current == null) { |
| - return DocsEnum.NO_MORE_DOCS; |
| + return DocIdSetIterator.NO_MORE_DOCS; |
| } |
| return current.doc() + leaves[leafOrd].docBase; |
| } |
| @@ -129,7 +130,7 @@
|
| @Override |
| public int start() { |
| if (current == null) { |
| - return DocsEnum.NO_MORE_DOCS; |
| + return DocIdSetIterator.NO_MORE_DOCS; |
| } |
| return current.start(); |
| } |
| @@ -137,7 +138,7 @@
|
| @Override |
| public int end() { |
| if (current == null) { |
| - return DocsEnum.NO_MORE_DOCS; |
| + return DocIdSetIterator.NO_MORE_DOCS; |
| } |
| return current.end(); |
| } |
| Index: lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy)
|
| @@ -51,7 +51,7 @@
|
| INDEX_SIZE = atLeast(2000); |
| index = newDirectory(); |
| RandomIndexWriter writer = new RandomIndexWriter(random, index); |
| - RandomGen random = new RandomGen(this.random); |
| + RandomGen random = new RandomGen(LuceneTestCase.random); |
| for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the |
| // problem doesn't show up |
| Document doc = new Document(); |
| Index: lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java (working copy)
|
| @@ -139,7 +139,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException { |
| + public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException { |
| return new FieldComparator<Integer>() { |
| |
| FieldCache.DocTermsIndex idIndex; |
| @@ -179,7 +179,7 @@
|
| } |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException { |
| idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldname); |
| return this; |
| } |
| Index: lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy)
|
| @@ -218,7 +218,7 @@
|
| false); |
| |
| int count = 0; |
| - assertTrue(tp.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); |
| + assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| // "a" occurs 4 times |
| assertEquals(4, tp.freq()); |
| int expected = 0; |
| @@ -228,7 +228,7 @@
|
| assertEquals(6, tp.nextPosition()); |
| |
| // only one doc has "a" |
| - assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, tp.nextDoc()); |
| |
| IndexSearcher is = newSearcher(readerFromWriter); |
| |
| Index: lucene/core/src/test/org/apache/lucene/search/TestSort.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestSort.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestSort.java (working copy)
|
| @@ -699,7 +699,7 @@
|
| }; |
| |
| @Override |
| - public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { |
| + public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException { |
| docValues = FieldCache.DEFAULT.getInts(context.reader(), "parser", testIntParser, false); |
| return this; |
| } |
| @@ -712,7 +712,7 @@
|
| |
| static class MyFieldComparatorSource extends FieldComparatorSource { |
| @Override |
| - public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { |
| + public FieldComparator<Integer> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { |
| return new MyFieldComparator(numHits); |
| } |
| } |
| Index: lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java (working copy)
|
| @@ -161,7 +161,7 @@
|
| query.add(inner, Occur.MUST); |
| query.add(aQuery, Occur.MUST); |
| query.add(dQuery, Occur.MUST); |
| - Set<String>[] occurList = new Set[] { |
| + @SuppressWarnings({"rawtypes","unchecked"}) Set<String>[] occurList = new Set[] { |
| Collections.singleton(Occur.MUST.toString()), |
| new HashSet<String>(Arrays.asList(Occur.MUST.toString(), Occur.SHOULD.toString())) |
| }; |
| Index: lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java (working copy)
|
| @@ -135,19 +135,19 @@
|
| TermsEnum termsEnum = terms.iterator(null); |
| assertEquals("content", termsEnum.next().utf8ToString()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(expectedPositions[0], dpEnum.nextPosition()); |
| |
| assertEquals("here", termsEnum.next().utf8ToString()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(expectedPositions[1], dpEnum.nextPosition()); |
| |
| assertEquals("some", termsEnum.next().utf8ToString()); |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, false); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, dpEnum.freq()); |
| assertEquals(expectedPositions[2], dpEnum.nextPosition()); |
| |
| @@ -178,7 +178,7 @@
|
| while(true) { |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, shouldBeOffVector); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| |
| dpEnum.nextPosition(); |
| |
| @@ -263,7 +263,7 @@
|
| String text = termsEnum.term().utf8ToString(); |
| docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); |
| |
| - while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| int docId = docs.docID(); |
| int freq = docs.freq(); |
| //System.out.println("Doc Id: " + docId + " freq " + freq); |
| @@ -428,7 +428,7 @@
|
| assertEquals(5, termsEnum.totalTermFreq()); |
| DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(5, dpEnum.freq()); |
| for(int i=0;i<5;i++) { |
| assertEquals(i, dpEnum.nextPosition()); |
| @@ -436,7 +436,7 @@
|
| |
| dpEnum = termsEnum.docsAndPositions(null, dpEnum, true); |
| assertNotNull(dpEnum); |
| - assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(5, dpEnum.freq()); |
| for(int i=0;i<5;i++) { |
| dpEnum.nextPosition(); |
| Index: lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (working copy)
|
| @@ -57,6 +57,7 @@
|
| import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec; |
| import org.apache.lucene.util.UnicodeUtil; |
| import org.apache.lucene.util._TestUtil; |
| +import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput; |
| import org.apache.lucene.util.fst.FST.Arc; |
| import org.apache.lucene.util.fst.FST.BytesReader; |
| import org.apache.lucene.util.fst.PairOutputs.Pair; |
| @@ -494,7 +495,7 @@
|
| |
| if (random.nextBoolean() && fst != null && !willRewrite) { |
| TestFSTs t = new TestFSTs(); |
| - IOContext context = t.newIOContext(random); |
| + IOContext context = LuceneTestCase.newIOContext(random); |
| IndexOutput out = dir.createOutput("fst.bin", context); |
| fst.save(out); |
| out.close(); |
| @@ -984,7 +985,7 @@
|
| if (VERBOSE) { |
| System.out.println(" fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output)); |
| } |
| - final CountMinOutput cmo = prefixes.get(current.input); |
| + final CountMinOutput<T> cmo = prefixes.get(current.input); |
| assertNotNull(cmo); |
| assertTrue(cmo.isLeaf || cmo.isFinal); |
| //if (cmo.isFinal && !cmo.isLeaf) { |
| @@ -1183,7 +1184,7 @@
|
| } |
| |
| final TermsEnum.SeekStatus seekResult = termsEnum.seekCeil(randomTerm); |
| - final BytesRefFSTEnum.InputOutput fstSeekResult = fstEnum.seekCeil(randomTerm); |
| + final InputOutput<Long> fstSeekResult = fstEnum.seekCeil(randomTerm); |
| |
| if (seekResult == TermsEnum.SeekStatus.END) { |
| assertNull("got " + (fstSeekResult == null ? "null" : fstSeekResult.input.utf8ToString()) + " but expected null", fstSeekResult); |
| @@ -1224,7 +1225,7 @@
|
| dir.close(); |
| } |
| |
| - private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum fstEnum, boolean storeOrd) throws Exception { |
| + private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum<?> fstEnum, boolean storeOrd) throws Exception { |
| if (termsEnum.term() == null) { |
| assertNull(fstEnum.current()); |
| } else { |
| @@ -1829,7 +1830,7 @@
|
| |
| public int verifyStateAndBelow(FST<Object> fst, Arc<Object> arc, int depth) |
| throws IOException { |
| - if (fst.targetHasArcs(arc)) { |
| + if (FST.targetHasArcs(arc)) { |
| int childCount = 0; |
| for (arc = fst.readFirstTargetArc(arc, arc);; |
| arc = fst.readNextArc(arc), childCount++) |
| Index: lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java (working copy)
|
| @@ -126,7 +126,7 @@
|
| src.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl); |
| } |
| |
| - @SuppressWarnings("unchecked") |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| public void testInvalidArguments() throws Exception { |
| try { |
| AttributeSource src = new AttributeSource(); |
| Index: lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java
|
| ===================================================================
|
| --- lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java (revision 1297029)
|
| +++ lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java (working copy)
|
| @@ -69,7 +69,7 @@
|
| assertEquals(0, VirtualMethod.compareImplementationDistance(TestClass5.class, publicTestMethod, protectedTestMethod)); |
| } |
| |
| - @SuppressWarnings("unchecked") |
| + @SuppressWarnings({"rawtypes","unchecked"}) |
| public void testExceptions() { |
| try { |
| // cast to Class to remove generics: |
| Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
|
| ===================================================================
|
| --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (revision 1297029)
|
| +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (working copy)
|
| @@ -12,6 +12,7 @@
|
| import org.apache.lucene.index.IndexWriterConfig; |
| import org.apache.lucene.index.MultiFields; |
| import org.apache.lucene.index.Term; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.RAMDirectory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.Version; |
| @@ -285,7 +286,7 @@
|
| "content", |
| new BytesRef("another"), |
| false); |
| - assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(1, tps.freq()); |
| assertEquals(3, tps.nextPosition()); |
| |
| Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
|
| ===================================================================
|
| --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (revision 1297029)
|
| +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (working copy)
|
| @@ -31,6 +31,7 @@
|
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.IndexWriterConfig; |
| import org.apache.lucene.index.MultiFields; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.RAMDirectory; |
| @@ -102,7 +103,7 @@
|
| MultiFields.getLiveDocs(reader), |
| null, |
| false); |
| - assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| td = _TestUtil.docs(random, |
| reader, |
| "partnum", |
| @@ -110,7 +111,7 @@
|
| MultiFields.getLiveDocs(reader), |
| null, |
| false); |
| - assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| } |
| |
| // LUCENE-1441 |
| Index: modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
|
| ===================================================================
|
| --- modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (revision 1297029)
|
| +++ modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (working copy)
|
| @@ -59,7 +59,7 @@
|
| termAtt.copyBuffer(t.buffer(), 0, t.length()); |
| offsetAtt.setOffset(t.startOffset(), t.endOffset()); |
| posIncrAtt.setPositionIncrement(t.getPositionIncrement()); |
| - typeAtt.setType(TypeAttributeImpl.DEFAULT_TYPE); |
| + typeAtt.setType(TypeAttribute.DEFAULT_TYPE); |
| return true; |
| } else { |
| return false; |
| @@ -1018,14 +1018,14 @@
|
| assertTokenStreamContents(filter, |
| new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"}, |
| new int[]{0,0,7,7,14,14,19}, new int[]{6,13,13,18,18,27,27}, |
| - new String[]{TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE}, |
| + new String[]{TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE}, |
| new int[]{1,0,1,0,1,0,1} |
| ); |
| wsTokenizer.reset(new StringReader("please divide this sentence")); |
| assertTokenStreamContents(filter, |
| new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"}, |
| new int[]{0,0,7,7,14,14,19}, new int[]{6,13,13,18,18,27,27}, |
| - new String[]{TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE}, |
| + new String[]{TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE}, |
| new int[]{1,0,1,0,1,0,1} |
| ); |
| } |
| Index: modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
|
| ===================================================================
|
| --- modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (revision 1297029)
|
| +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (working copy)
|
| @@ -35,6 +35,7 @@
|
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.Terms; |
| import org.apache.lucene.index.TermsEnum; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.AttributeSource; |
| import org.apache.lucene.util.English; |
| @@ -110,7 +111,7 @@
|
| termsEnum.next(); |
| assertEquals(2, termsEnum.totalTermFreq()); |
| DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null, true); |
| - assertTrue(positions.nextDoc() != DocsEnum.NO_MORE_DOCS); |
| + assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); |
| assertEquals(2, positions.freq()); |
| positions.nextPosition(); |
| assertEquals(0, positions.startOffset()); |
| @@ -118,7 +119,7 @@
|
| positions.nextPosition(); |
| assertEquals(8, positions.startOffset()); |
| assertEquals(12, positions.endOffset()); |
| - assertEquals(DocsEnum.NO_MORE_DOCS, positions.nextDoc()); |
| + assertEquals(DocIdSetIterator.NO_MORE_DOCS, positions.nextDoc()); |
| r.close(); |
| dir.close(); |
| } |
| Index: modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java
|
| ===================================================================
|
| --- modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java (revision 1297029)
|
| +++ modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java (working copy)
|
| @@ -29,6 +29,7 @@
|
| */ |
| |
| import com.ibm.icu.lang.UCharacter; |
| +import com.ibm.icu.lang.UCharacterEnums.ECharacterCategory; |
| import com.ibm.icu.lang.UScript; |
| import com.ibm.icu.text.UTF16; |
| |
| @@ -110,7 +111,7 @@
|
| * value — should inherit the script value of its base character. |
| */ |
| if (isSameScript(scriptCode, sc) |
| - || UCharacter.getType(ch) == UCharacter.NON_SPACING_MARK) { |
| + || UCharacter.getType(ch) == ECharacterCategory.NON_SPACING_MARK) { |
| index += UTF16.getCharCount(ch); |
| |
| /* |
| Index: modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java
|
| ===================================================================
|
| --- modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java (revision 1297029)
|
| +++ modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java (working copy)
|
| @@ -85,7 +85,7 @@
|
| * @return unicode String |
| */ |
| public String getCCByGB2312Id(int ccid) { |
| - if (ccid < 0 || ccid > WordDictionary.GB2312_CHAR_NUM) |
| + if (ccid < 0 || ccid > AbstractDictionary.GB2312_CHAR_NUM) |
| return ""; |
| int cc1 = ccid / 94 + 161; |
| int cc2 = ccid % 94 + 161; |
| Index: modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java
|
| ===================================================================
|
| --- modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (revision 1297029)
|
| +++ modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (working copy)
|
| @@ -21,6 +21,7 @@
|
| import org.apache.lucene.benchmark.quality.QualityQueryParser; |
| import org.apache.lucene.queryparser.classic.ParseException; |
| import org.apache.lucene.queryparser.classic.QueryParser; |
| +import org.apache.lucene.queryparser.classic.QueryParserBase; |
| import org.apache.lucene.search.BooleanClause; |
| import org.apache.lucene.search.BooleanQuery; |
| import org.apache.lucene.search.Query; |
| @@ -66,7 +67,7 @@
|
| } |
| BooleanQuery bq = new BooleanQuery(); |
| for (int i = 0; i < qqNames.length; i++) |
| - bq.add(qp.parse(QueryParser.escape(qq.getValue(qqNames[i]))), BooleanClause.Occur.SHOULD); |
| + bq.add(qp.parse(QueryParserBase.escape(qq.getValue(qqNames[i]))), BooleanClause.Occur.SHOULD); |
| |
| return bq; |
| } |
| Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
|
| ===================================================================
|
| --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 1297029)
|
| +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (working copy)
|
| @@ -55,6 +55,7 @@
|
| import org.apache.lucene.index.SlowCompositeReaderWrapper; |
| import org.apache.lucene.index.Terms; |
| import org.apache.lucene.index.TermsEnum; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.FieldCache.DocTermsIndex; |
| import org.apache.lucene.search.FieldCache; |
| import org.apache.lucene.store.Directory; |
| @@ -497,7 +498,7 @@
|
| DocsEnum docs = null; |
| while(termsEnum.next() != null) { |
| docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(reader), docs, true); |
| - while(docs.nextDoc() != docs.NO_MORE_DOCS) { |
| + while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| totalTokenCount2 += docs.freq(); |
| } |
| } |
| Index: modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParentArray.java
|
| ===================================================================
|
| --- modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParentArray.java (revision 1297029)
|
| +++ modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/ParentArray.java (working copy)
|
| @@ -6,6 +6,7 @@
|
| import org.apache.lucene.index.DocsAndPositionsEnum; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.MultiFields; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.util.Bits; |
| import org.apache.lucene.util.BytesRef; |
| |
| @@ -106,7 +107,7 @@
|
| DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(indexReader, liveDocs, |
| Consts.FIELD_PAYLOADS, new BytesRef(Consts.PAYLOAD_PARENT), |
| false); |
| - if ((positions == null || positions.advance(first) == DocsAndPositionsEnum.NO_MORE_DOCS) && first < num) { |
| + if ((positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) && first < num) { |
| throw new CorruptIndexException("Missing parent data for category " + first); |
| } |
| for (int i=first; i<num; i++) { |
| @@ -124,7 +125,7 @@
|
| // increment we added originally, so we get here the right numbers: |
| prefetchParentOrdinal[i] = positions.nextPosition(); |
| |
| - if (positions.nextDoc() == DocsAndPositionsEnum.NO_MORE_DOCS) { |
| + if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { |
| if ( i+1 < num ) { |
| throw new CorruptIndexException( |
| "Missing parent data for category "+(i+1)); |
| Index: modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
|
| ===================================================================
|
| --- modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java (revision 1297029)
|
| +++ modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java (working copy)
|
| @@ -69,7 +69,7 @@
|
| public BoostedWeight(IndexSearcher searcher) throws IOException { |
| this.searcher = searcher; |
| this.qWeight = q.createWeight(searcher); |
| - this.fcontext = boostVal.newContext(searcher); |
| + this.fcontext = ValueSource.newContext(searcher); |
| boostVal.createWeight(fcontext,searcher); |
| } |
| |
| Index: modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
|
| ===================================================================
|
| --- modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java (revision 1297029)
|
| +++ modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java (working copy)
|
| @@ -67,7 +67,7 @@
|
| |
| public FunctionWeight(IndexSearcher searcher) throws IOException { |
| this.searcher = searcher; |
| - this.context = func.newContext(searcher); |
| + this.context = ValueSource.newContext(searcher); |
| func.createWeight(context, searcher); |
| } |
| |
| Index: modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java
|
| ===================================================================
|
| --- modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java (revision 1297029)
|
| +++ modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java (working copy)
|
| @@ -19,6 +19,7 @@
|
| |
| import org.apache.lucene.index.*; |
| import org.apache.lucene.search.DocIdSet; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.Filter; |
| import org.apache.lucene.util.Bits; |
| import org.apache.lucene.util.BytesRef; |
| @@ -81,7 +82,7 @@
|
| br.copyBytes(term.bytes()); |
| if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) { |
| docs = termsEnum.docs(acceptDocs, docs, false); |
| - while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { |
| + while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
| result.set(docs.docID()); |
| } |
| } |
| Index: modules/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java
|
| ===================================================================
|
| --- modules/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java (revision 1297029)
|
| +++ modules/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java (working copy)
|
| @@ -17,6 +17,7 @@
|
| * limitations under the License. |
| */ |
| import org.apache.lucene.queryparser.classic.QueryParser; |
| +import org.apache.lucene.queryparser.classic.QueryParserBase; |
| |
| import java.util.HashMap; |
| import java.util.Map; |
| @@ -139,7 +140,7 @@
|
| * a backslash character. |
| */ |
| public String escapeExtensionField(String extfield) { |
| - return QueryParser.escape(extfield); |
| + return QueryParserBase.escape(extfield); |
| } |
| |
| /** |
| Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java
|
| ===================================================================
|
| --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java (revision 1297029)
|
| +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java (working copy)
|
| @@ -88,7 +88,7 @@
|
| qp.setPhraseSlop(0); |
| |
| // non-default operator: |
| - qp.setDefaultOperator(QueryParser.AND_OPERATOR); |
| + qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); |
| assertEquals("+(multi multi2) +foo", qp.parse("multi foo").toString()); |
| |
| } |
| Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
|
| ===================================================================
|
| --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (revision 1297029)
|
| +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (working copy)
|
| @@ -117,7 +117,7 @@
|
| assertEquals("(b:one t:one) f:two", q.toString()); |
| |
| // AND mode: |
| - mfqp.setDefaultOperator(QueryParser.AND_OPERATOR); |
| + mfqp.setDefaultOperator(QueryParserBase.AND_OPERATOR); |
| q = mfqp.parse("one two"); |
| assertEquals("+(b:one t:one) +(b:two t:two)", q.toString()); |
| q = mfqp.parse("\"aa bb cc\" \"dd ee\""); |
| Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
|
| ===================================================================
|
| --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java (revision 1297029)
|
| +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java (working copy)
|
| @@ -32,7 +32,7 @@
|
| if (a == null) |
| a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); |
| QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); |
| - qp.setDefaultOperator(QueryParser.OR_OPERATOR); |
| + qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); |
| return qp; |
| } |
| } |
| Index: modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java
|
| ===================================================================
|
| --- modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java (revision 1297029)
|
| +++ modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java (working copy)
|
| @@ -22,6 +22,7 @@
|
| import org.apache.lucene.analysis.MockTokenizer; |
| import org.apache.lucene.queryparser.classic.ParseException; |
| import org.apache.lucene.queryparser.classic.QueryParser; |
| +import org.apache.lucene.queryparser.classic.QueryParserBase; |
| import org.apache.lucene.queryparser.util.QueryParserTestBase; |
| import org.apache.lucene.search.BooleanClause; |
| import org.apache.lucene.search.BooleanQuery; |
| @@ -47,7 +48,7 @@
|
| QueryParser qp = extensions == null ? new ExtendableQueryParser( |
| TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser( |
| TEST_VERSION_CURRENT, "field", a, extensions); |
| - qp.setDefaultOperator(QueryParser.OR_OPERATOR); |
| + qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); |
| return qp; |
| } |
| |
| Index: modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
|
| ===================================================================
|
| --- modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (revision 1297029)
|
| +++ modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (working copy)
|
| @@ -38,6 +38,7 @@
|
| import org.apache.lucene.queryparser.classic.CharStream; |
| import org.apache.lucene.queryparser.classic.ParseException; |
| import org.apache.lucene.queryparser.classic.QueryParser; |
| +import org.apache.lucene.queryparser.classic.QueryParserBase; |
| import org.apache.lucene.queryparser.classic.QueryParserTokenManager; |
| import org.apache.lucene.search.*; |
| import org.apache.lucene.search.BooleanClause.Occur; |
| @@ -160,7 +161,7 @@
|
| |
| public void assertEscapedQueryEquals(String query, Analyzer a, String result) |
| throws Exception { |
| - String escapedQuery = QueryParser.escape(query); |
| + String escapedQuery = QueryParserBase.escape(query); |
| if (!escapedQuery.equals(result)) { |
| fail("Query /" + query + "/ yielded /" + escapedQuery |
| + "/, expecting /" + result + "/"); |
| @@ -200,7 +201,7 @@
|
| if (a == null) |
| a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); |
| QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); |
| - qp.setDefaultOperator(QueryParser.AND_OPERATOR); |
| + qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); |
| return qp.parse(query); |
| } |
| |
| @@ -382,11 +383,11 @@
|
| |
| QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random)); |
| // make sure OR is the default: |
| - assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); |
| - qp.setDefaultOperator(QueryParser.AND_OPERATOR); |
| - assertEquals(QueryParser.AND_OPERATOR, qp.getDefaultOperator()); |
| - qp.setDefaultOperator(QueryParser.OR_OPERATOR); |
| - assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); |
| + assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator()); |
| + qp.setDefaultOperator(QueryParserBase.AND_OPERATOR); |
| + assertEquals(QueryParserBase.AND_OPERATOR, qp.getDefaultOperator()); |
| + qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); |
| + assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator()); |
| } |
| |
| public void testPunct() throws Exception { |
| Index: modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
|
| ===================================================================
|
| --- modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (revision 1297029)
|
| +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (working copy)
|
| @@ -412,7 +412,7 @@
|
| assertEquals(4, searchers.size()); |
| int num_field2 = this.numdoc(); |
| assertEquals(num_field2, num_field1 + 1); |
| - int numThreads = 5 + this.random.nextInt(5); |
| + int numThreads = 5 + LuceneTestCase.random.nextInt(5); |
| ExecutorService executor = Executors.newFixedThreadPool(numThreads); |
| SpellCheckWorker[] workers = new SpellCheckWorker[numThreads]; |
| for (int i = 0; i < numThreads; i++) { |
| Index: solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
|
| ===================================================================
|
| --- solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java (revision 1297029)
|
| +++ solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java (working copy)
|
| @@ -19,7 +19,9 @@
|
| import com.sun.mail.imap.IMAPMessage; |
| |
| import org.apache.tika.Tika; |
| +import org.apache.tika.metadata.HttpHeaders; |
| import org.apache.tika.metadata.Metadata; |
| +import org.apache.tika.metadata.TikaMetadataKeys; |
| import org.slf4j.Logger; |
| import org.slf4j.LoggerFactory; |
| |
| @@ -169,8 +171,8 @@
|
| InputStream is = part.getInputStream(); |
| String fileName = part.getFileName(); |
| Metadata md = new Metadata(); |
| - md.set(Metadata.CONTENT_TYPE, ctype.getBaseType().toLowerCase(Locale.ENGLISH)); |
| - md.set(Metadata.RESOURCE_NAME_KEY, fileName); |
| + md.set(HttpHeaders.CONTENT_TYPE, ctype.getBaseType().toLowerCase(Locale.ENGLISH)); |
| + md.set(TikaMetadataKeys.RESOURCE_NAME_KEY, fileName); |
| String content = tika.parseToString(is, md); |
| if (disp != null && disp.equalsIgnoreCase(Part.ATTACHMENT)) { |
| if (row.get(ATTACHMENT) == null) |
| Index: solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
|
| ===================================================================
|
| --- solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java (revision 1297029)
|
| +++ solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java (working copy)
|
| @@ -34,6 +34,7 @@
|
| |
| import javax.xml.transform.OutputKeys; |
| import javax.xml.transform.TransformerConfigurationException; |
| +import javax.xml.transform.TransformerFactory; |
| import javax.xml.transform.sax.SAXTransformerFactory; |
| import javax.xml.transform.sax.TransformerHandler; |
| import javax.xml.transform.stream.StreamResult; |
| @@ -142,7 +143,7 @@
|
| private static ContentHandler getHtmlHandler(Writer writer) |
| throws TransformerConfigurationException { |
| SAXTransformerFactory factory = (SAXTransformerFactory) |
| - SAXTransformerFactory.newInstance(); |
| + TransformerFactory.newInstance(); |
| TransformerHandler handler = factory.newTransformerHandler(); |
| handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "html"); |
| handler.setResult(new StreamResult(writer)); |
| @@ -185,7 +186,7 @@
|
| private static ContentHandler getXmlContentHandler(Writer writer) |
| throws TransformerConfigurationException { |
| SAXTransformerFactory factory = (SAXTransformerFactory) |
| - SAXTransformerFactory.newInstance(); |
| + TransformerFactory.newInstance(); |
| TransformerHandler handler = factory.newTransformerHandler(); |
| handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml"); |
| handler.setResult(new StreamResult(writer)); |
| Index: solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java
|
| ===================================================================
|
| --- solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java (revision 1297029)
|
| +++ solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java (working copy)
|
| @@ -211,7 +211,7 @@
|
| tmpdir.delete(); |
| tmpdir.mkdir(); |
| tmpdir.deleteOnExit(); |
| - TestFileListEntityProcessor.createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"), |
| + AbstractDataImportHandlerTestCase.createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"), |
| false); |
| Map entityAttrs = createMap("name", "e", |
| XPathEntityProcessor.USE_SOLR_ADD_SCHEMA, "true", "xsl", "" |
| Index: solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java
|
| ===================================================================
|
| --- solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java (revision 1297029)
|
| +++ solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java (working copy)
|
| @@ -36,7 +36,9 @@
|
| import org.apache.solr.update.processor.UpdateRequestProcessor; |
| import org.apache.tika.config.TikaConfig; |
| import org.apache.tika.exception.TikaException; |
| +import org.apache.tika.metadata.HttpHeaders; |
| import org.apache.tika.metadata.Metadata; |
| +import org.apache.tika.metadata.TikaMetadataKeys; |
| import org.apache.tika.mime.MediaType; |
| import org.apache.tika.parser.AutoDetectParser; |
| import org.apache.tika.parser.DefaultParser; |
| @@ -150,11 +152,11 @@
|
| // then Tika can make use of it in guessing the appropriate MIME type: |
| String resourceName = req.getParams().get(ExtractingParams.RESOURCE_NAME, null); |
| if (resourceName != null) { |
| - metadata.add(Metadata.RESOURCE_NAME_KEY, resourceName); |
| + metadata.add(TikaMetadataKeys.RESOURCE_NAME_KEY, resourceName); |
| } |
| // Provide stream's content type as hint for auto detection |
| if(stream.getContentType() != null) { |
| - metadata.add(Metadata.CONTENT_TYPE, stream.getContentType()); |
| + metadata.add(HttpHeaders.CONTENT_TYPE, stream.getContentType()); |
| } |
| |
| InputStream inputStream = null; |
| @@ -167,7 +169,7 @@
|
| // HtmlParser and TXTParser regard Metadata.CONTENT_ENCODING in metadata |
| String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType()); |
| if(charset != null){ |
| - metadata.add(Metadata.CONTENT_ENCODING, charset); |
| + metadata.add(HttpHeaders.CONTENT_ENCODING, charset); |
| } |
| |
| String xpathExpr = params.get(ExtractingParams.XPATH_EXPRESSION); |
| Index: solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java
|
| ===================================================================
|
| --- solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java (revision 1297029)
|
| +++ solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java (working copy)
|
| @@ -24,6 +24,7 @@
|
| import org.apache.solr.schema.IndexSchema; |
| import org.apache.solr.schema.SchemaField; |
| import org.apache.tika.metadata.Metadata; |
| +import org.apache.tika.metadata.TikaMetadataKeys; |
| import org.slf4j.Logger; |
| import org.slf4j.LoggerFactory; |
| import org.xml.sax.Attributes; |
| @@ -191,7 +192,7 @@
|
| if (sf==null && unknownFieldPrefix.length() > 0) { |
| name = unknownFieldPrefix + name; |
| sf = schema.getFieldOrNull(name); |
| - } else if (sf == null && defaultField.length() > 0 && name.equals(Metadata.RESOURCE_NAME_KEY) == false /*let the fall through below handle this*/){ |
| + } else if (sf == null && defaultField.length() > 0 && name.equals(TikaMetadataKeys.RESOURCE_NAME_KEY) == false /*let the fall through below handle this*/){ |
| name = defaultField; |
| sf = schema.getFieldOrNull(name); |
| } |
| @@ -201,7 +202,7 @@
|
| // ExtractingDocumentLoader.load(). You shouldn't have to define a mapping for this |
| // field just because you specified a resource.name parameter to the handler, should |
| // you? |
| - if (sf == null && unknownFieldPrefix.length()==0 && name == Metadata.RESOURCE_NAME_KEY) { |
| + if (sf == null && unknownFieldPrefix.length()==0 && name == TikaMetadataKeys.RESOURCE_NAME_KEY) { |
| return; |
| } |
| |
| Index: solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
|
| ===================================================================
|
| --- solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java (revision 1297029)
|
| +++ solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java (working copy)
|
| @@ -25,6 +25,7 @@
|
| import org.apache.velocity.Template; |
| import org.apache.velocity.VelocityContext; |
| import org.apache.velocity.app.VelocityEngine; |
| +import org.apache.velocity.runtime.RuntimeConstants; |
| import org.apache.velocity.tools.generic.*; |
| |
| import java.io.*; |
| @@ -117,14 +118,14 @@
|
| if (template_root != null) { |
| baseDir = new File(template_root); |
| } |
| - engine.setProperty(VelocityEngine.FILE_RESOURCE_LOADER_PATH, baseDir.getAbsolutePath()); |
| + engine.setProperty(RuntimeConstants.FILE_RESOURCE_LOADER_PATH, baseDir.getAbsolutePath()); |
| engine.setProperty("params.resource.loader.instance", new SolrParamResourceLoader(request)); |
| SolrVelocityResourceLoader resourceLoader = |
| new SolrVelocityResourceLoader(request.getCore().getSolrConfig().getResourceLoader()); |
| engine.setProperty("solr.resource.loader.instance", resourceLoader); |
| |
| // TODO: Externalize Velocity properties |
| - engine.setProperty(VelocityEngine.RESOURCE_LOADER, "params,file,solr"); |
| + engine.setProperty(RuntimeConstants.RESOURCE_LOADER, "params,file,solr"); |
| String propFile = request.getParams().get("v.properties"); |
| try { |
| if (propFile == null) |
| Index: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy)
|
| @@ -27,6 +27,7 @@
|
| import org.apache.lucene.document.Field; |
| import org.apache.lucene.index.*; |
| import org.apache.lucene.index.FieldInfo.IndexOptions; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.CharsRef; |
| @@ -399,7 +400,7 @@
|
| false); |
| if (docsEnum != null) { |
| int docId; |
| - if ((docId = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + if ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| return reader.document(docId); |
| } |
| } |
| Index: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java (working copy)
|
| @@ -150,7 +150,7 @@
|
| // the CMD_GET_FILE_LIST command. |
| // |
| core.getDeletionPolicy().setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration); |
| - rsp.add(CMD_INDEX_VERSION, core.getDeletionPolicy().getCommitTimestamp(commitPoint)); |
| + rsp.add(CMD_INDEX_VERSION, IndexDeletionPolicyWrapper.getCommitTimestamp(commitPoint)); |
| rsp.add(GENERATION, commitPoint.getGeneration()); |
| } else { |
| // This happens when replication is not configured to happen after startup and no commit/optimize |
| @@ -229,7 +229,7 @@
|
| for (IndexCommit c : commits.values()) { |
| try { |
| NamedList<Object> nl = new NamedList<Object>(); |
| - nl.add("indexVersion", core.getDeletionPolicy().getCommitTimestamp(c)); |
| + nl.add("indexVersion", IndexDeletionPolicyWrapper.getCommitTimestamp(c)); |
| nl.add(GENERATION, c.getGeneration()); |
| nl.add(CMD_GET_FILE_LIST, c.getFileNames()); |
| l.add(nl); |
| Index: solr/core/src/java/org/apache/solr/request/SimpleFacets.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/request/SimpleFacets.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/request/SimpleFacets.java (working copy)
|
| @@ -821,7 +821,7 @@
|
| } |
| |
| final String gap = required.getFieldParam(f,FacetParams.FACET_DATE_GAP); |
| - final DateMathParser dmp = new DateMathParser(ft.UTC, Locale.US); |
| + final DateMathParser dmp = new DateMathParser(DateField.UTC, Locale.US); |
| |
| final int minCount = params.getFieldInt(f,FacetParams.FACET_MINCOUNT, 0); |
| |
| Index: solr/core/src/java/org/apache/solr/schema/LatLonType.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/schema/LatLonType.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/schema/LatLonType.java (working copy)
|
| @@ -349,8 +349,8 @@
|
| |
| public SpatialWeight(IndexSearcher searcher) throws IOException { |
| this.searcher = searcher; |
| - this.latContext = latSource.newContext(searcher); |
| - this.lonContext = lonSource.newContext(searcher); |
| + this.latContext = ValueSource.newContext(searcher); |
| + this.lonContext = ValueSource.newContext(searcher); |
| latSource.createWeight(latContext, searcher); |
| lonSource.createWeight(lonContext, searcher); |
| } |
| Index: solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java (working copy)
|
| @@ -103,25 +103,25 @@
|
| final String minShouldMatch = |
| DisMaxQParser.parseMinShouldMatch(req.getSchema(), solrParams); |
| |
| - queryFields = U.parseFieldBoosts(solrParams.getParams(DMP.QF)); |
| + queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF)); |
| if (0 == queryFields.size()) { |
| queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f); |
| } |
| |
| // Boosted phrase of the full query string |
| Map<String,Float> phraseFields = |
| - U.parseFieldBoosts(solrParams.getParams(DMP.PF)); |
| + SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.PF)); |
| // Boosted Bi-Term Shingles from the query string |
| Map<String,Float> phraseFields2 = |
| - U.parseFieldBoosts(solrParams.getParams("pf2")); |
| + SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf2")); |
| // Boosted Tri-Term Shingles from the query string |
| Map<String,Float> phraseFields3 = |
| - U.parseFieldBoosts(solrParams.getParams("pf3")); |
| + SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf3")); |
| |
| - float tiebreaker = solrParams.getFloat(DMP.TIE, 0.0f); |
| + float tiebreaker = solrParams.getFloat(DisMaxParams.TIE, 0.0f); |
| |
| - int pslop = solrParams.getInt(DMP.PS, 0); |
| - int qslop = solrParams.getInt(DMP.QS, 0); |
| + int pslop = solrParams.getInt(DisMaxParams.PS, 0); |
| + int qslop = solrParams.getInt(DisMaxParams.QS, 0); |
| |
| // remove stopwords from mandatory "matching" component? |
| boolean stopwords = solrParams.getBool("stopwords", true); |
| @@ -137,7 +137,7 @@
|
| altUserQuery = null; |
| if( userQuery == null || userQuery.length() < 1 ) { |
| // If no query is specified, we may have an alternate |
| - String altQ = solrParams.get( DMP.ALTQ ); |
| + String altQ = solrParams.get( DisMaxParams.ALTQ ); |
| if (altQ != null) { |
| altQParser = subQuery(altQ, null); |
| altUserQuery = altQParser.getQuery(); |
| @@ -248,7 +248,7 @@
|
| |
| if (parsedUserQuery != null && doMinMatched) { |
| if (parsedUserQuery instanceof BooleanQuery) { |
| - U.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch); |
| + SolrPluginUtils.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch); |
| } |
| } |
| |
| @@ -285,8 +285,8 @@
|
| |
| if (parsedUserQuery instanceof BooleanQuery) { |
| BooleanQuery t = new BooleanQuery(); |
| - U.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery); |
| - U.setMinShouldMatch(t, minShouldMatch); |
| + SolrPluginUtils.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery); |
| + SolrPluginUtils.setMinShouldMatch(t, minShouldMatch); |
| parsedUserQuery = t; |
| } |
| } |
| @@ -326,7 +326,7 @@
|
| |
| |
| /* * * Boosting Query * * */ |
| - boostParams = solrParams.getParams(DMP.BQ); |
| + boostParams = solrParams.getParams(DisMaxParams.BQ); |
| //List<Query> boostQueries = U.parseQueryStrings(req, boostParams); |
| boostQueries=null; |
| if (boostParams!=null && boostParams.length>0) { |
| @@ -345,7 +345,7 @@
|
| |
| /* * * Boosting Functions * * */ |
| |
| - String[] boostFuncs = solrParams.getParams(DMP.BF); |
| + String[] boostFuncs = solrParams.getParams(DisMaxParams.BF); |
| if (null != boostFuncs && 0 != boostFuncs.length) { |
| for (String boostFunc : boostFuncs) { |
| if(null == boostFunc || "".equals(boostFunc)) continue; |
| Index: solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java
|
| ===================================================================
|
| --- solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java (revision 1297029)
|
| +++ solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java (working copy)
|
| @@ -36,6 +36,7 @@
|
| import org.apache.lucene.queries.function.FunctionValues; |
| import org.apache.lucene.queries.function.ValueSource; |
| import org.apache.lucene.queries.function.docvalues.FloatDocValues; |
| +import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.ReaderUtil; |
| import org.apache.solr.core.SolrCore; |
| @@ -278,7 +279,7 @@
|
| |
| docsEnum = termsEnum.docs(null, docsEnum, false); |
| int doc; |
| - while ((doc = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) { |
| + while ((doc = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| vals[doc] = fval; |
| } |
| } |
| Index: solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java (working copy)
|
| @@ -21,6 +21,7 @@
|
| import java.util.HashMap; |
| import java.util.Map; |
| |
| +import org.apache.lucene.analysis.BaseTokenStreamTestCase; |
| import org.apache.lucene.analysis.MockTokenizer; |
| import org.apache.lucene.analysis.TokenStream; |
| import org.apache.solr.SolrTestCaseJ4; |
| @@ -211,12 +212,12 @@
|
| |
| TokenStream ts = factoryDefault.create( |
| new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false)); |
| - BaseTokenTestCase.assertTokenStreamContents(ts, |
| + BaseTokenStreamTestCase.assertTokenStreamContents(ts, |
| new String[] { "I", "borrowed", "5", "400", "00", "540000", "at", "25", "interest", "rate", "interestrate" }); |
| |
| ts = factoryDefault.create( |
| new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false)); |
| - BaseTokenTestCase.assertTokenStreamContents(ts, |
| + BaseTokenStreamTestCase.assertTokenStreamContents(ts, |
| new String[] { "foo", "bar", "foobar" }); |
| |
| |
| @@ -229,13 +230,13 @@
|
| |
| ts = factoryCustom.create( |
| new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false)); |
| - BaseTokenTestCase.assertTokenStreamContents(ts, |
| + BaseTokenStreamTestCase.assertTokenStreamContents(ts, |
| new String[] { "I", "borrowed", "$5,400.00", "at", "25%", "interest", "rate", "interestrate" }); |
| |
| /* test custom behavior with a char > 0x7F, because we had to make a larger byte[] */ |
| ts = factoryCustom.create( |
| new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false)); |
| - BaseTokenTestCase.assertTokenStreamContents(ts, |
| + BaseTokenStreamTestCase.assertTokenStreamContents(ts, |
| new String[] { "foo\u200Dbar" }); |
| } |
| } |
| Index: solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java (working copy)
|
| @@ -20,6 +20,7 @@
|
| import java.io.File; |
| import java.util.concurrent.atomic.AtomicInteger; |
| |
| +import junit.framework.Assert; |
| import junit.framework.TestCase; |
| |
| import org.apache.solr.common.cloud.SolrZkClient; |
| @@ -91,7 +92,7 @@
|
| |
| try { |
| zkClient.makePath("collections/collection2", false); |
| - TestCase.fail("Server should be down here"); |
| + Assert.fail("Server should be down here"); |
| } catch (KeeperException.ConnectionLossException e) { |
| |
| } |
| Index: solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java (working copy)
|
| @@ -71,7 +71,7 @@
|
| + System.getProperty("file.separator") + "data"); |
| dataDir.mkdirs(); |
| |
| - solrConfig = h.createConfig("solrconfig.xml"); |
| + solrConfig = TestHarness.createConfig("solrconfig.xml"); |
| h = new TestHarness( dataDir.getAbsolutePath(), |
| solrConfig, |
| "schema12.xml"); |
| Index: solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java (working copy)
|
| @@ -17,12 +17,14 @@
|
| * limitations under the License. |
| */ |
| |
| +import junit.framework.Assert; |
| import junit.framework.TestCase; |
| |
| import org.apache.solr.BaseDistributedSearchTestCase; |
| import org.apache.solr.client.solrj.SolrServer; |
| import org.apache.solr.client.solrj.response.QueryResponse; |
| import org.apache.solr.common.params.ModifiableSolrParams; |
| +import org.apache.solr.common.params.SpellingParams; |
| import org.apache.solr.common.util.NamedList; |
| |
| /** |
| @@ -85,7 +87,7 @@
|
| NamedList sc = (NamedList) nl.get("spellcheck"); |
| NamedList sug = (NamedList) sc.get("suggestions"); |
| if(sug.size()==0) { |
| - TestCase.fail("Control data did not return any suggestions."); |
| + Assert.fail("Control data did not return any suggestions."); |
| } |
| } |
| |
| @@ -124,16 +126,16 @@
|
| // we care only about the spellcheck results |
| handle.put("response", SKIP); |
| |
| - q("q", "*:*", "spellcheck", "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "qt", "spellCheckCompRH", "shards.qt", "spellCheckCompRH"); |
| + q("q", "*:*", "spellcheck", "true", SpellingParams.SPELLCHECK_BUILD, "true", "qt", "spellCheckCompRH", "shards.qt", "spellCheckCompRH"); |
| |
| query("q", "*:*", "fl", "id,lowerfilt", "spellcheck.q","toyata", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName); |
| - query("q", "*:*", "fl", "id,lowerfilt", "spellcheck.q","toyata", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true"); |
| - query("q", "*:*", "fl", "id,lowerfilt", "spellcheck.q","bluo", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_COUNT, "4"); |
| - query("q", "The quick reb fox jumped over the lazy brown dogs", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_COUNT, "4", SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| + query("q", "*:*", "fl", "id,lowerfilt", "spellcheck.q","toyata", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true"); |
| + query("q", "*:*", "fl", "id,lowerfilt", "spellcheck.q","bluo", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_COUNT, "4"); |
| + query("q", "The quick reb fox jumped over the lazy brown dogs", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_COUNT, "4", SpellingParams.SPELLCHECK_COLLATE, "true"); |
| |
| - query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_COUNT, "10", SpellCheckComponent.SPELLCHECK_COLLATE, "true", SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "10", SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); |
| - query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_COUNT, "10", SpellCheckComponent.SPELLCHECK_COLLATE, "true", SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "10", SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "false"); |
| - query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_COUNT, "10", SpellCheckComponent.SPELLCHECK_COLLATE, "true", SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "0", SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1", SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "false"); |
| + query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_COUNT, "10", SpellingParams.SPELLCHECK_COLLATE, "true", SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10", SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10", SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); |
| + query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_COUNT, "10", SpellingParams.SPELLCHECK_COLLATE, "true", SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10", SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10", SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "false"); |
| + query("q", "lowerfilt:(+quock +reb)", "fl", "id,lowerfilt", "spellcheck", "true", "qt", requestHandlerName, "shards.qt", requestHandlerName, SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_COUNT, "10", SpellingParams.SPELLCHECK_COLLATE, "true", SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "0", SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1", SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "false"); |
| |
| } |
| } |
| Index: solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java (working copy)
|
| @@ -32,6 +32,7 @@
|
| import org.apache.solr.request.SolrRequestHandler; |
| import org.apache.solr.response.SolrQueryResponse; |
| import org.apache.solr.spelling.AbstractLuceneSpellChecker; |
| +import org.apache.solr.spelling.SolrSpellChecker; |
| import org.junit.BeforeClass; |
| import org.junit.Test; |
| |
| @@ -76,12 +77,12 @@
|
| |
| @Test |
| public void testExtendedResultsCount() throws Exception { |
| - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"false") |
| + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_BUILD, "true", "q","bluo", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"false") |
| ,"/spellcheck/suggestions/[0]=='bluo'" |
| ,"/spellcheck/suggestions/[1]/numFound==5" |
| ); |
| |
| - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"3", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true") |
| + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellingParams.SPELLCHECK_COUNT,"3", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") |
| ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'blud','freq':1}, {'word':'blue','freq':1}, {'word':'blee','freq':1}]" |
| ); |
| } |
| @@ -96,7 +97,7 @@
|
| |
| @Test |
| public void testPerDictionary() throws Exception { |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","documemt" |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_BUILD, "true", "q","documemt" |
| , SpellingParams.SPELLCHECK_DICT, "perDict", SpellingParams.SPELLCHECK_PREFIX + ".perDict.foo", "bar", SpellingParams.SPELLCHECK_PREFIX + ".perDict.bar", "foo") |
| ,"/spellcheck/suggestions/bar=={'numFound':1, 'startOffset':0, 'endOffset':1, 'suggestion':['foo']}" |
| ,"/spellcheck/suggestions/foo=={'numFound':1, 'startOffset':2, 'endOffset':3, 'suggestion':['bar']}" |
| @@ -105,16 +106,16 @@
|
| |
| @Test |
| public void testCollate() throws Exception { |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","documemt", SpellCheckComponent.SPELLCHECK_COLLATE, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_BUILD, "true", "q","documemt", SpellingParams.SPELLCHECK_COLLATE, "true") |
| ,"/spellcheck/suggestions/collation=='document'" |
| ); |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documemt lowerfilt:broen^4", SpellCheckComponent.SPELLCHECK_COLLATE, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documemt lowerfilt:broen^4", SpellingParams.SPELLCHECK_COLLATE, "true") |
| ,"/spellcheck/suggestions/collation=='document lowerfilt:brown^4'" |
| ); |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documemtsss broens", SpellCheckComponent.SPELLCHECK_COLLATE, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documemtsss broens", SpellingParams.SPELLCHECK_COLLATE, "true") |
| ,"/spellcheck/suggestions/collation=='document brown'" |
| ); |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","pixma", SpellCheckComponent.SPELLCHECK_COLLATE, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","pixma", SpellingParams.SPELLCHECK_COLLATE, "true") |
| ,"/spellcheck/suggestions/collation=='pixmaa'" |
| ); |
| } |
| @@ -123,10 +124,10 @@
|
| @Test |
| public void testCorrectSpelling() throws Exception { |
| // Make sure correct spellings are signaled in the response |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","lowerfilt:lazy lowerfilt:brown", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","lowerfilt:lazy lowerfilt:brown", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true") |
| ,"/spellcheck/suggestions=={'correctlySpelled':true}" |
| ); |
| - assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","lakkle", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true") |
| + assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","lakkle", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true") |
| ,"/spellcheck/suggestions/correctlySpelled==false" |
| ); |
| } |
| @@ -156,7 +157,7 @@
|
| |
| NamedList args = new NamedList(); |
| NamedList spellchecker = new NamedList(); |
| - spellchecker.add(AbstractLuceneSpellChecker.DICTIONARY_NAME, "default"); |
| + spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "default"); |
| spellchecker.add(AbstractLuceneSpellChecker.FIELD, "lowerfilt"); |
| spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, "spellchecker1"); |
| args.add("spellchecker", spellchecker); |
| @@ -201,11 +202,11 @@
|
| //So with a threshold of 29%, "another" is absent from the dictionary |
| //while "document" is present. |
| |
| - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellCheckComponent.SPELLCHECK_DICT, "threshold", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true") |
| + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") |
| ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" |
| ); |
| |
| - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellCheckComponent.SPELLCHECK_DICT, "threshold_direct", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true") |
| + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold_direct", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") |
| ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" |
| ); |
| |
| @@ -216,9 +217,9 @@
|
| |
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_DICT, "threshold"); |
| - params.add(SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_DICT, "threshold"); |
| + params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true"); |
| params.add(CommonParams.Q, "anotheq"); |
| |
| SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); |
| @@ -233,8 +234,8 @@
|
| assertTrue(suggestions.get("suggestion")==null); |
| assertTrue((Boolean) suggestions.get("correctlySpelled")==false); |
| |
| - params.remove(SpellCheckComponent.SPELLCHECK_DICT); |
| - params.add(SpellCheckComponent.SPELLCHECK_DICT, "threshold_direct"); |
| + params.remove(SpellingParams.SPELLCHECK_DICT); |
| + params.add(SpellingParams.SPELLCHECK_DICT, "threshold_direct"); |
| rsp = new SolrQueryResponse(); |
| rsp.add("responseHeader", new SimpleOrderedMap()); |
| req = new LocalSolrQueryRequest(core, params); |
| Index: solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java (working copy)
|
| @@ -35,6 +35,7 @@
|
| import org.apache.lucene.search.TopDocs; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.SimpleFSDirectory; |
| +import org.apache.solr.BaseDistributedSearchTestCase; |
| import org.apache.solr.SolrTestCaseJ4; |
| import org.apache.solr.TestDistributedSearch; |
| import org.apache.solr.client.solrj.SolrServer; |
| @@ -389,7 +390,7 @@
|
| assertEquals(nDocs, slaveQueryResult.getNumFound()); |
| |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| //start config files replication test |
| @@ -447,7 +448,7 @@
|
| assertEquals(nDocs, slaveQueryResult.getNumFound()); |
| |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| // start stop polling test |
| @@ -527,7 +528,7 @@
|
| SolrDocumentList slaveQueryResult = (SolrDocumentList) slaveQueryRsp.get("response"); |
| assertEquals(nDocs, slaveQueryResult.getNumFound()); |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| System.out.println("replicate slave to master"); |
| @@ -599,7 +600,7 @@
|
| assertEquals(nDocs, slaveQueryResult.getNumFound()); |
| |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| // NOTE: the master only replicates after startup now! |
| @@ -654,7 +655,7 @@
|
| assertEquals(10, slaveQueryResult.getNumFound()); |
| |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| Object version = getIndexVersion(masterClient).get("indexversion"); |
| @@ -714,7 +715,7 @@
|
| assertEquals(nDocs, slaveQueryResult.getNumFound()); |
| |
| //compare results |
| - String cmp = TestDistributedSearch.compare(masterQueryResult, slaveQueryResult, 0, null); |
| + String cmp = BaseDistributedSearchTestCase.compare(masterQueryResult, slaveQueryResult, 0, null); |
| assertEquals(null, cmp); |
| |
| //start config files replication test |
| Index: solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (working copy)
|
| @@ -34,6 +34,7 @@
|
| import org.apache.solr.update.UpdateHandler; |
| import org.apache.solr.update.UpdateLog; |
| import org.apache.solr.update.VersionInfo; |
| +import org.apache.solr.util.TestHarness; |
| import org.junit.BeforeClass; |
| import org.junit.Test; |
| |
| @@ -355,7 +356,7 @@
|
| |
| if (rand.nextInt(100) < softCommitPercent) { |
| verbose("softCommit start"); |
| - assertU(h.commit("softCommit","true")); |
| + assertU(TestHarness.commit("softCommit","true")); |
| verbose("softCommit end"); |
| } else { |
| verbose("hardCommit start"); |
| @@ -577,7 +578,7 @@
|
| |
| if (rand.nextInt(100) < softCommitPercent) { |
| verbose("softCommit start"); |
| - assertU(h.commit("softCommit","true")); |
| + assertU(TestHarness.commit("softCommit","true")); |
| verbose("softCommit end"); |
| } else { |
| verbose("hardCommit start"); |
| @@ -815,7 +816,7 @@
|
| |
| if (rand.nextInt(100) < softCommitPercent) { |
| verbose("softCommit start"); |
| - assertU(h.commit("softCommit","true")); |
| + assertU(TestHarness.commit("softCommit","true")); |
| verbose("softCommit end"); |
| } else { |
| verbose("hardCommit start"); |
| @@ -1085,7 +1086,7 @@
|
| if (uLog.getState() != UpdateLog.State.ACTIVE) version = -1; |
| if (rand.nextInt(100) < softCommitPercent) { |
| verbose("softCommit start"); |
| - assertU(h.commit("softCommit","true")); |
| + assertU(TestHarness.commit("softCommit","true")); |
| verbose("softCommit end"); |
| } else { |
| verbose("hardCommit start"); |
| Index: solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java (working copy)
|
| @@ -23,6 +23,7 @@
|
| import org.apache.lucene.analysis.Token; |
| import org.apache.solr.SolrTestCaseJ4; |
| import org.apache.solr.common.params.CommonParams; |
| +import org.apache.solr.common.params.SpellingParams; |
| import org.apache.solr.common.util.NamedList; |
| import org.apache.solr.core.SolrCore; |
| import org.apache.solr.handler.component.SpellCheckComponent; |
| @@ -59,7 +60,7 @@
|
| DirectSolrSpellChecker checker = new DirectSolrSpellChecker(); |
| NamedList spellchecker = new NamedList(); |
| spellchecker.add("classname", DirectSolrSpellChecker.class.getName()); |
| - spellchecker.add(DirectSolrSpellChecker.FIELD, "teststop"); |
| + spellchecker.add(SolrSpellChecker.FIELD, "teststop"); |
| spellchecker.add(DirectSolrSpellChecker.MINQUERYLENGTH, 2); // we will try "fob" |
| |
| SolrCore core = h.getCore(); |
| @@ -85,7 +86,7 @@
|
| |
| @Test |
| public void testOnlyMorePopularWithExtendedResults() throws Exception { |
| - assertQ(req("q", "teststop:fox", "qt", "spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_DICT, "direct", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", SpellCheckComponent.SPELLCHECK_ONLY_MORE_POPULAR, "true"), |
| + assertQ(req("q", "teststop:fox", "qt", "spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_ONLY_MORE_POPULAR, "true"), |
| "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/int[@name='origFreq']=1", |
| "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/str[@name='word']='foo'", |
| "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/int[@name='freq']=2", |
| Index: solr/core/src/test/org/apache/solr/spelling/FileBasedSpellCheckerTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/spelling/FileBasedSpellCheckerTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/spelling/FileBasedSpellCheckerTest.java (working copy)
|
| @@ -66,11 +66,11 @@
|
| |
| spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external"); |
| spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt"); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "teststop"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop"); |
| spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8"); |
| File indexDir = new File(TEMP_DIR, "spellingIdx" + new Date().getTime()); |
| indexDir.mkdirs(); |
| - spellchecker.add(FileBasedSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| + spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| SolrCore core = h.getCore(); |
| String dictName = checker.init(spellchecker, core); |
| assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true); |
| @@ -102,12 +102,12 @@
|
| spellchecker.add("classname", FileBasedSpellChecker.class.getName()); |
| spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external"); |
| spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt"); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "teststop"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop"); |
| spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8"); |
| File indexDir = new File(TEMP_DIR, "spellingIdx" + new Date().getTime()); |
| indexDir.mkdirs(); |
| - spellchecker.add(FileBasedSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| - spellchecker.add(FileBasedSpellChecker.FIELD_TYPE, "teststop"); |
| + spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| + spellchecker.add(SolrSpellChecker.FIELD_TYPE, "teststop"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| SolrCore core = h.getCore(); |
| String dictName = checker.init(spellchecker, core); |
| @@ -149,8 +149,8 @@
|
| spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external"); |
| spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt"); |
| spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8"); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "teststop"); |
| - spellchecker.add(FileBasedSpellChecker.FIELD_TYPE, "teststop"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop"); |
| + spellchecker.add(SolrSpellChecker.FIELD_TYPE, "teststop"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| |
| SolrCore core = h.getCore(); |
| Index: solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java (working copy)
|
| @@ -110,7 +110,7 @@
|
| File indexDir = new File(TEMP_DIR, "spellingIdx" + new Date().getTime()); |
| indexDir.mkdirs(); |
| spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "title"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| SolrCore core = h.getCore(); |
| |
| @@ -186,7 +186,7 @@
|
| File indexDir = new File(TEMP_DIR, "spellingIdx" + new Date().getTime()); |
| indexDir.mkdirs(); |
| spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "title"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| SolrCore core = h.getCore(); |
| String dictName = checker.init(spellchecker, core); |
| @@ -243,7 +243,7 @@
|
| File indexDir = new File(TEMP_DIR, "spellingIdx" + new Date().getTime()); |
| indexDir.mkdirs(); |
| spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "title"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| spellchecker.add(AbstractLuceneSpellChecker.STRING_DISTANCE, JaroWinklerDistance.class.getName()); |
| SolrCore core = h.getCore(); |
| @@ -299,7 +299,7 @@
|
| indexDir.mkdirs(); |
| spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath()); |
| spellchecker.add(AbstractLuceneSpellChecker.LOCATION, altIndexDir.getAbsolutePath()); |
| - spellchecker.add(IndexBasedSpellChecker.FIELD, "title"); |
| + spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title"); |
| spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker); |
| SolrCore core = h.getCore(); |
| String dictName = checker.init(spellchecker, core); |
| Index: solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java (working copy)
|
| @@ -24,6 +24,7 @@
|
| import org.apache.solr.common.params.CommonParams; |
| import org.apache.solr.common.params.GroupParams; |
| import org.apache.solr.common.params.ModifiableSolrParams; |
| +import org.apache.solr.common.params.SpellingParams; |
| import org.apache.solr.common.util.NamedList; |
| import org.apache.solr.common.util.SimpleOrderedMap; |
| import org.apache.solr.core.SolrCore; |
| @@ -59,9 +60,9 @@
|
| |
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); |
| |
| params.add(CommonParams.Q, "lowerfilt:(hypenated-wotd)"); |
| { |
| @@ -111,11 +112,11 @@
|
| |
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "10"); |
| + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10"); |
| params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); |
| params.add(CommonParams.FQ, "NOT(id:1)"); |
| |
| @@ -146,12 +147,12 @@
|
| |
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_DICT, "multipleFields"); |
| - params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "1"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1"); |
| + params.add(SpellingParams.SPELLCHECK_DICT, "multipleFields"); |
| + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "1"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); |
| params.add(CommonParams.Q, "peac"); |
| |
| //SpellCheckCompRH has no "qf" defined. It will not find "peace" from "peac" despite it being in the dictionary |
| @@ -170,7 +171,7 @@
|
| |
| //SpellCheckCompRH1 has "lowerfilt1" defined in the "qf" param. It will find "peace" from "peac" because |
| //requrying field "lowerfilt1" returns the hit. |
| - params.remove(SpellCheckComponent.SPELLCHECK_BUILD); |
| + params.remove(SpellingParams.SPELLCHECK_BUILD); |
| handler = core.getRequestHandler("spellCheckCompRH1"); |
| rsp = new SolrQueryResponse(); |
| rsp.add("responseHeader", new SimpleOrderedMap()); |
| @@ -193,11 +194,11 @@
|
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(CommonParams.QT, "spellCheckCompRH"); |
| params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); |
| - params.add(SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true"); |
| + params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true"); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); |
| |
| // Testing backwards-compatible behavior. |
| // Returns 1 collation as a single string. |
| @@ -217,9 +218,9 @@
|
| |
| // Testing backwards-compatible response format but will only return a |
| // collation that would return results. |
| - params.remove(SpellCheckComponent.SPELLCHECK_BUILD); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "5"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1"); |
| + params.remove(SpellingParams.SPELLCHECK_BUILD); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); |
| handler = core.getRequestHandler("spellCheckCompRH"); |
| rsp = new SolrQueryResponse(); |
| rsp.add("responseHeader", new SimpleOrderedMap()); |
| @@ -234,10 +235,10 @@
|
| |
| // Testing returning multiple collations if more than one valid |
| // combination exists. |
| - params.remove(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES); |
| - params.remove(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "2"); |
| + params.remove(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES); |
| + params.remove(SpellingParams.SPELLCHECK_MAX_COLLATIONS); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "2"); |
| handler = core.getRequestHandler("spellCheckCompRH"); |
| rsp = new SolrQueryResponse(); |
| rsp.add("responseHeader", new SimpleOrderedMap()); |
| @@ -256,7 +257,7 @@
|
| |
| // Testing return multiple collations with expanded collation response |
| // format. |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); |
| handler = core.getRequestHandler("spellCheckCompRH"); |
| rsp = new SolrQueryResponse(); |
| rsp.add("responseHeader", new SimpleOrderedMap()); |
| @@ -300,11 +301,11 @@
|
| |
| ModifiableSolrParams params = new ModifiableSolrParams(); |
| params.add(SpellCheckComponent.COMPONENT_NAME, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10"); |
| - params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "5"); |
| - params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1"); |
| + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); |
| + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); |
| + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); |
| + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); |
| params.add(CommonParams.Q, "lowerfilt:(+fauth)"); |
| params.add(GroupParams.GROUP, "true"); |
| params.add(GroupParams.GROUP_FIELD, "id"); |
| Index: solr/core/src/test/org/apache/solr/util/DateMathParserTest.java
|
| ===================================================================
|
| --- solr/core/src/test/org/apache/solr/util/DateMathParserTest.java (revision 1297029)
|
| +++ solr/core/src/test/org/apache/solr/util/DateMathParserTest.java (working copy)
|
| @@ -167,7 +167,8 @@
|
| Date trash = p.parseMath("+7YEARS"); |
| trash = p.parseMath("/MONTH"); |
| trash = p.parseMath("-5DAYS+20MINUTES"); |
| - Thread.currentThread().sleep(5); |
| + Thread.currentThread(); |
| + Thread.sleep(5); |
| |
| String a = fmt.format(p.parseMath("")); |
| assertEquals("State of DateMathParser changed", e, a); |
| Index: solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java
|
| ===================================================================
|
| --- solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java (revision 1297029)
|
| +++ solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java (working copy)
|
| @@ -56,7 +56,7 @@
|
| super.setUp(); |
| System.setProperty("solr.solr.home", ExternalPaths.EXAMPLE_HOME); |
| |
| - File dataDir = new File(SolrTestCaseJ4.TEMP_DIR, |
| + File dataDir = new File(LuceneTestCase.TEMP_DIR, |
| getClass().getName() + "-" + System.currentTimeMillis()); |
| dataDir.mkdirs(); |
| System.setProperty("solr.data.dir", dataDir.getCanonicalPath()); |
| Index: solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java
|
| ===================================================================
|
| --- solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java (revision 1297029)
|
| +++ solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java (working copy)
|
| @@ -248,7 +248,7 @@
|
| } |
| |
| public void setUp() throws Exception { |
| - File home = new File(SolrTestCaseJ4.TEMP_DIR, |
| + File home = new File(LuceneTestCase.TEMP_DIR, |
| getClass().getName() + "-" + System.currentTimeMillis()); |
| |
| |
| Index: solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
|
| ===================================================================
|
| --- solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java (revision 1297029)
|
| +++ solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java (working copy)
|
| @@ -30,6 +30,7 @@
|
| import java.util.Random; |
| import java.util.Set; |
| |
| +import junit.framework.Assert; |
| import junit.framework.TestCase; |
| |
| import org.apache.solr.client.solrj.SolrServer; |
| @@ -652,7 +653,7 @@
|
| cmp = compare(a.getResponse(), b.getResponse(), flags, handle); |
| if (cmp != null) { |
| log.error("Mismatched responses:\n" + a + "\n" + b); |
| - TestCase.fail(cmp); |
| + Assert.fail(cmp); |
| } |
| } |
| |
| Index: solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
|
| ===================================================================
|
| --- solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java (revision 1297029)
|
| +++ solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java (working copy)
|
| @@ -347,7 +347,7 @@
|
| } |
| |
| public static void createCore() throws Exception { |
| - solrConfig = h.createConfig(getSolrConfigFile()); |
| + solrConfig = TestHarness.createConfig(getSolrConfigFile()); |
| h = new TestHarness( dataDir.getAbsolutePath(), |
| solrConfig, |
| getSchemaFile()); |
| @@ -612,13 +612,13 @@
|
| * @see TestHarness#optimize |
| */ |
| public static String optimize(String... args) { |
| - return h.optimize(args); |
| + return TestHarness.optimize(args); |
| } |
| /** |
| * @see TestHarness#commit |
| */ |
| public static String commit(String... args) { |
| - return h.commit(args); |
| + return TestHarness.commit(args); |
| } |
| |
| /** |
| @@ -683,7 +683,7 @@
|
| * @see TestHarness#deleteById |
| */ |
| public static String delI(String id) { |
| - return h.deleteById(id); |
| + return TestHarness.deleteById(id); |
| } |
| /** |
| * Generates a <delete>... XML string for an query |
| @@ -691,7 +691,7 @@
|
| * @see TestHarness#deleteByQuery |
| */ |
| public static String delQ(String q) { |
| - return h.deleteByQuery(q); |
| + return TestHarness.deleteByQuery(q); |
| } |
| |
| /** |
| @@ -702,7 +702,7 @@
|
| */ |
| public static XmlDoc doc(String... fieldsAndValues) { |
| XmlDoc d = new XmlDoc(); |
| - d.xml = h.makeSimpleDoc(fieldsAndValues).toString(); |
| + d.xml = TestHarness.makeSimpleDoc(fieldsAndValues).toString(); |
| return d; |
| } |
| |
| Index: solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java
|
| ===================================================================
|
| --- solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java (revision 1297029)
|
| +++ solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java (working copy)
|
| @@ -155,7 +155,7 @@
|
| System.setProperty("solr.solr.home", getSolrHome()); |
| if (configFile != null) { |
| |
| - solrConfig = h.createConfig(getSolrConfigFile()); |
| + solrConfig = TestHarness.createConfig(getSolrConfigFile()); |
| h = new TestHarness( dataDir.getAbsolutePath(), |
| solrConfig, |
| getSchemaFile()); |
| @@ -312,13 +312,13 @@
|
| * @see TestHarness#optimize |
| */ |
| public String optimize(String... args) { |
| - return h.optimize(args); |
| + return TestHarness.optimize(args); |
| } |
| /** |
| * @see TestHarness#commit |
| */ |
| public String commit(String... args) { |
| - return h.commit(args); |
| + return TestHarness.commit(args); |
| } |
| |
| /** |
| @@ -397,7 +397,7 @@
|
| * @see TestHarness#deleteById |
| */ |
| public String delI(String id, String... args) { |
| - return h.deleteById(id, args); |
| + return TestHarness.deleteById(id, args); |
| } |
| |
| /** |
| @@ -406,7 +406,7 @@
|
| * @see TestHarness#deleteByQuery |
| */ |
| public String delQ(String q, String... args) { |
| - return h.deleteByQuery(q, args); |
| + return TestHarness.deleteByQuery(q, args); |
| } |
| |
| /** |
| @@ -417,7 +417,7 @@
|
| */ |
| public Doc doc(String... fieldsAndValues) { |
| Doc d = new Doc(); |
| - d.xml = h.makeSimpleDoc(fieldsAndValues).toString(); |
| + d.xml = TestHarness.makeSimpleDoc(fieldsAndValues).toString(); |
| return d; |
| } |
| |