blob: bff116a97e2b70e4369ba53a1e809520b81e72e0 [file] [log] [blame]
Index: src/java/org/apache/lucene/analysis/ASCIIFoldingFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/ASCIIFoldingFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/ASCIIFoldingFilter.java (working copy)
@@ -67,6 +67,7 @@
private int outputPos;
private TermAttribute termAtt;
+ @Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
final char[] buffer = termAtt.termBuffer();
Index: src/java/org/apache/lucene/analysis/BaseCharFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/BaseCharFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/BaseCharFilter.java (working copy)
@@ -44,6 +44,7 @@
* recently added position, as it's a simple linear
* search backwards through all offset corrections added
* by {@link #addOffCorrectMap}. */
+ @Override
protected int correct(int currentOff) {
if (pcmList == null || pcmList.isEmpty()) {
return currentOff;
@@ -78,6 +79,7 @@
this.cumulativeDiff = cumulativeDiff;
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('(');
Index: src/java/org/apache/lucene/analysis/CachingTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/CachingTokenFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/CachingTokenFilter.java (working copy)
@@ -42,6 +42,7 @@
super(input);
}
+ @Override
public final boolean incrementToken() throws IOException {
if (cache == null) {
// fill cache lazily
@@ -59,12 +60,14 @@
return true;
}
+ @Override
public final void end() throws IOException {
if (finalState != null) {
restoreState(finalState);
}
}
+ @Override
public void reset() throws IOException {
if(cache != null) {
iterator = cache.iterator();
Index: src/java/org/apache/lucene/analysis/CharArraySet.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharArraySet.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/CharArraySet.java (working copy)
@@ -223,14 +223,17 @@
}
+ @Override
public int size() {
return count;
}
+ @Override
public boolean isEmpty() {
return count==0;
}
+ @Override
public boolean contains(Object o) {
if (o instanceof char[]) {
final char[] text = (char[])o;
@@ -239,6 +242,7 @@
return contains(o.toString());
}
+ @Override
public boolean add(Object o) {
if (o instanceof char[]) {
return add((char[])o);
@@ -309,6 +313,7 @@
}
/** returns an iterator of new allocated Strings, this method violates the Set interface */
+ @Override
@SuppressWarnings("unchecked")
public Iterator<Object> iterator() {
return (Iterator) stringIterator();
@@ -328,22 +333,27 @@
super(entries, ignoreCase, count);
}
+ @Override
public boolean add(Object o){
throw new UnsupportedOperationException();
}
+ @Override
public boolean addAll(Collection<? extends Object> coll) {
throw new UnsupportedOperationException();
}
+ @Override
public boolean add(char[] text) {
throw new UnsupportedOperationException();
}
+ @Override
public boolean add(CharSequence text) {
throw new UnsupportedOperationException();
}
+ @Override
public boolean add(String text) {
throw new UnsupportedOperationException();
}
Index: src/java/org/apache/lucene/analysis/CharFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/CharFilter.java (working copy)
@@ -50,26 +50,32 @@
* Chains the corrected offset through the input
* CharFilter.
*/
+ @Override
public final int correctOffset(int currentOff) {
return input.correctOffset(correct(currentOff));
}
+ @Override
public void close() throws IOException {
input.close();
}
+ @Override
public int read(char[] cbuf, int off, int len) throws IOException {
return input.read(cbuf, off, len);
}
+ @Override
public boolean markSupported(){
return input.markSupported();
}
+ @Override
public void mark( int readAheadLimit ) throws IOException {
input.mark(readAheadLimit);
}
+ @Override
public void reset() throws IOException {
input.reset();
}
Index: src/java/org/apache/lucene/analysis/CharReader.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharReader.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/CharReader.java (working copy)
@@ -39,26 +39,32 @@
input = in;
}
+ @Override
public int correctOffset(int currentOff) {
return currentOff;
}
+ @Override
public void close() throws IOException {
input.close();
}
+ @Override
public int read(char[] cbuf, int off, int len) throws IOException {
return input.read(cbuf, off, len);
}
+ @Override
public boolean markSupported(){
return input.markSupported();
}
+ @Override
public void mark( int readAheadLimit ) throws IOException {
input.mark(readAheadLimit);
}
+ @Override
public void reset() throws IOException {
input.reset();
}
Index: src/java/org/apache/lucene/analysis/CharTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/CharTokenizer.java (working copy)
@@ -65,6 +65,7 @@
return c;
}
+ @Override
public final boolean incrementToken() throws IOException {
clearAttributes();
int length = 0;
@@ -108,12 +109,14 @@
return true;
}
+ @Override
public final void end() {
// set final offset
int finalOffset = correctOffset(offset);
offsetAtt.setOffset(finalOffset, finalOffset);
}
+ @Override
public void reset(Reader input) throws IOException {
super.reset(input);
bufferIndex = 0;
Index: src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java (working copy)
@@ -41,6 +41,7 @@
private int outputPos;
private TermAttribute termAtt;
+ @Override
public final boolean incrementToken() throws java.io.IOException {
if (input.incrementToken()) {
final char[] buffer = termAtt.termBuffer();
Index: src/java/org/apache/lucene/analysis/KeywordAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/KeywordAnalyzer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/KeywordAnalyzer.java (working copy)
@@ -28,10 +28,12 @@
public KeywordAnalyzer() {
setOverridesTokenStreamMethod(KeywordAnalyzer.class);
}
+ @Override
public TokenStream tokenStream(String fieldName,
final Reader reader) {
return new KeywordTokenizer(reader);
}
+ @Override
public TokenStream reusableTokenStream(String fieldName,
final Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
Index: src/java/org/apache/lucene/analysis/KeywordTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/KeywordTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/KeywordTokenizer.java (working copy)
@@ -62,6 +62,7 @@
termAtt.resizeTermBuffer(bufferSize);
}
+ @Override
public final boolean incrementToken() throws IOException {
if (!done) {
clearAttributes();
@@ -83,11 +84,13 @@
return false;
}
+ @Override
public final void end() {
// set final offset
offsetAtt.setOffset(finalOffset, finalOffset);
}
+ @Override
public void reset(Reader input) throws IOException {
super.reset(input);
this.done = false;
Index: src/java/org/apache/lucene/analysis/LengthFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/LengthFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/LengthFilter.java (working copy)
@@ -46,6 +46,7 @@
/**
* Returns the next input Token whose term() is the right len
*/
+ @Override
public final boolean incrementToken() throws IOException {
// return the first non-stop word found
while (input.incrementToken()) {
Index: src/java/org/apache/lucene/analysis/LetterTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/LetterTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/LetterTokenizer.java (working copy)
@@ -46,6 +46,7 @@
/** Collects only characters which satisfy
* {@link Character#isLetter(char)}.*/
+ @Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
}
Index: src/java/org/apache/lucene/analysis/LowerCaseFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/LowerCaseFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/LowerCaseFilter.java (working copy)
@@ -32,6 +32,7 @@
private TermAttribute termAtt;
+ @Override
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
Index: src/java/org/apache/lucene/analysis/LowerCaseTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/LowerCaseTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/LowerCaseTokenizer.java (working copy)
@@ -49,6 +49,7 @@
/** Converts char to lower case
* {@link Character#toLowerCase(char)}.*/
+ @Override
protected char normalize(char c) {
return Character.toLowerCase(c);
}
Index: src/java/org/apache/lucene/analysis/MappingCharFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/MappingCharFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/MappingCharFilter.java (working copy)
@@ -47,6 +47,7 @@
this.normMap = normMap;
}
+ @Override
public int read() throws IOException {
while(true) {
if (replacement != null && charPointer < replacement.length()) {
@@ -116,6 +117,7 @@
return result;
}
+ @Override
public int read(char[] cbuf, int off, int len) throws IOException {
char[] tmp = new char[len];
int l = input.read(tmp, 0, len);
Index: src/java/org/apache/lucene/analysis/PerFieldAnalyzerWrapper.java
===================================================================
--- src/java/org/apache/lucene/analysis/PerFieldAnalyzerWrapper.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/PerFieldAnalyzerWrapper.java (working copy)
@@ -86,6 +86,7 @@
analyzerMap.put(fieldName, analyzer);
}
+ @Override
public TokenStream tokenStream(String fieldName, Reader reader) {
Analyzer analyzer = analyzerMap.get(fieldName);
if (analyzer == null) {
@@ -95,6 +96,7 @@
return analyzer.tokenStream(fieldName, reader);
}
+ @Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
// LUCENE-1678: force fallback to tokenStream() if we
@@ -110,6 +112,7 @@
}
/** Return the positionIncrementGap from the analyzer assigned to fieldName */
+ @Override
public int getPositionIncrementGap(String fieldName) {
Analyzer analyzer = analyzerMap.get(fieldName);
if (analyzer == null)
@@ -117,6 +120,7 @@
return analyzer.getPositionIncrementGap(fieldName);
}
+ @Override
public String toString() {
return "PerFieldAnalyzerWrapper(" + analyzerMap + ", default=" + defaultAnalyzer + ")";
}
Index: src/java/org/apache/lucene/analysis/PorterStemFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/PorterStemFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/PorterStemFilter.java (working copy)
@@ -49,6 +49,7 @@
termAtt = addAttribute(TermAttribute.class);
}
+ @Override
public final boolean incrementToken() throws IOException {
if (!input.incrementToken())
return false;
Index: src/java/org/apache/lucene/analysis/PorterStemmer.java
===================================================================
--- src/java/org/apache/lucene/analysis/PorterStemmer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/PorterStemmer.java (working copy)
@@ -94,6 +94,7 @@
* or a reference to the internal buffer can be retrieved by getResultBuffer
* and getResultLength (which is generally more efficient.)
*/
+ @Override
public String toString() { return new String(b,0,i); }
/**
Index: src/java/org/apache/lucene/analysis/SimpleAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/SimpleAnalyzer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/SimpleAnalyzer.java (working copy)
@@ -24,10 +24,12 @@
* with {@link LowerCaseFilter} */
public final class SimpleAnalyzer extends Analyzer {
+ @Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new LowerCaseTokenizer(reader);
}
+ @Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
if (tokenizer == null) {
Index: src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (working copy)
@@ -95,6 +95,7 @@
/** Constructs a {@link StandardTokenizer} filtered by a {@link
StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */
+ @Override
public TokenStream tokenStream(String fieldName, Reader reader) {
StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
tokenStream.setMaxTokenLength(maxTokenLength);
@@ -132,6 +133,7 @@
return maxTokenLength;
}
+ @Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
// LUCENE-1678: force fallback to tokenStream() if we
Index: src/java/org/apache/lucene/analysis/standard/StandardFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/standard/StandardFilter.java (working copy)
@@ -45,6 +45,7 @@
* <p>Removes <tt>'s</tt> from the end of words.
* <p>Removes dots from acronyms.
*/
+ @Override
public final boolean incrementToken() throws java.io.IOException {
if (!input.incrementToken()) {
return false;
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (working copy)
@@ -170,6 +170,7 @@
*
* @see org.apache.lucene.analysis.TokenStream#next()
*/
+ @Override
public final boolean incrementToken() throws IOException {
clearAttributes();
int posIncr = 1;
@@ -207,6 +208,7 @@
}
}
+ @Override
public final void end() {
// set final offset
int finalOffset = correctOffset(scanner.yychar() + scanner.yylength());
@@ -218,11 +220,13 @@
*
* @see org.apache.lucene.analysis.TokenStream#reset()
*/
+ @Override
public void reset() throws IOException {
super.reset();
scanner.yyreset(input);
}
+ @Override
public void reset(Reader reader) throws IOException {
super.reset(reader);
reset();
Index: src/java/org/apache/lucene/analysis/StopAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopAnalyzer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/StopAnalyzer.java (working copy)
@@ -93,6 +93,7 @@
}
/** Filters LowerCaseTokenizer with StopFilter. */
+ @Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords);
}
@@ -102,6 +103,7 @@
Tokenizer source;
TokenStream result;
};
+ @Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
SavedStreams streams = (SavedStreams) getPreviousTokenStream();
if (streams == null) {
Index: src/java/org/apache/lucene/analysis/StopFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/StopFilter.java (working copy)
@@ -134,6 +134,7 @@
/**
* Returns the next input Token whose term() is not a stop word.
*/
+ @Override
public final boolean incrementToken() throws IOException {
// return the first non-stop word found
int skippedPositions = 0;
Index: src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java (working copy)
@@ -127,6 +127,7 @@
while (incrementToken());
}
+ @Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
// capture state lazily - maybe no SinkFilter accepts this state
@@ -148,6 +149,7 @@
return false;
}
+ @Override
public final void end() throws IOException {
super.end();
AttributeSource.State finalState = captureState();
@@ -204,6 +206,7 @@
this.finalState = finalState;
}
+ @Override
public final boolean incrementToken() throws IOException {
// lazy init the iterator
if (it == null) {
@@ -219,18 +222,21 @@
return true;
}
+ @Override
public final void end() throws IOException {
if (finalState != null) {
restoreState(finalState);
}
}
+ @Override
public final void reset() {
it = cachedStates.iterator();
}
}
private static final SinkFilter ACCEPT_ALL_FILTER = new SinkFilter() {
+ @Override
public boolean accept(AttributeSource source) {
return true;
}
Index: src/java/org/apache/lucene/analysis/Token.java
===================================================================
--- src/java/org/apache/lucene/analysis/Token.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/Token.java (working copy)
@@ -487,6 +487,7 @@
this.payload = payload;
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('(');
@@ -507,6 +508,7 @@
/** Resets the term text, payload, flags, and positionIncrement,
* startOffset, endOffset and token type to default.
*/
+ @Override
public void clear() {
payload = null;
// Leave termBuffer to allow re-use
@@ -517,6 +519,7 @@
type = DEFAULT_TYPE;
}
+ @Override
public Object clone() {
Token t = (Token)super.clone();
// Do a deep clone
@@ -544,6 +547,7 @@
return t;
}
+ @Override
public boolean equals(Object obj) {
if (obj == this)
return true;
@@ -578,6 +582,7 @@
return o1.equals(o2);
}
+ @Override
public int hashCode() {
initTermBuffer();
int code = termLength;
@@ -739,6 +744,7 @@
payload = prototype.payload;
}
+ @Override
public void copyTo(AttributeImpl target) {
if (target instanceof Token) {
final Token to = (Token) target;
@@ -780,11 +786,13 @@
this.delegate = delegate;
}
+ @Override
public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
return attClass.isAssignableFrom(Token.class)
? new Token() : delegate.createAttributeInstance(attClass);
}
+ @Override
public boolean equals(Object other) {
if (this == other) return true;
if (other instanceof TokenAttributeFactory) {
@@ -794,6 +802,7 @@
return false;
}
+ @Override
public int hashCode() {
return delegate.hashCode() ^ 0x0a45aa31;
}
Index: src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java (working copy)
@@ -49,10 +49,12 @@
this.flags = flags;
}
+ @Override
public void clear() {
flags = 0;
}
+ @Override
public boolean equals(Object other) {
if (this == other) {
return true;
@@ -65,10 +67,12 @@
return false;
}
+ @Override
public int hashCode() {
return flags;
}
+ @Override
public void copyTo(AttributeImpl target) {
FlagsAttribute t = (FlagsAttribute) target;
t.setFlags(flags);
Index: src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java (working copy)
@@ -55,11 +55,13 @@
}
+ @Override
public void clear() {
startOffset = 0;
endOffset = 0;
}
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -73,12 +75,14 @@
return false;
}
+ @Override
public int hashCode() {
int code = startOffset;
code = code * 31 + endOffset;
return code;
}
+ @Override
public void copyTo(AttributeImpl target) {
OffsetAttribute t = (OffsetAttribute) target;
t.setOffset(startOffset, endOffset);
Index: src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java (working copy)
@@ -54,10 +54,12 @@
this.payload = payload;
}
+ @Override
public void clear() {
payload = null;
}
+ @Override
public Object clone() {
PayloadAttributeImpl clone = (PayloadAttributeImpl) super.clone();
if (payload != null) {
@@ -66,6 +68,7 @@
return clone;
}
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -83,10 +86,12 @@
return false;
}
+ @Override
public int hashCode() {
return (payload == null) ? 0 : payload.hashCode();
}
+ @Override
public void copyTo(AttributeImpl target) {
PayloadAttribute t = (PayloadAttribute) target;
t.setPayload((payload == null) ? null : (Payload) payload.clone());
Index: src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java (working copy)
@@ -67,10 +67,12 @@
return positionIncrement;
}
+ @Override
public void clear() {
this.positionIncrement = 1;
}
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -83,10 +85,12 @@
return false;
}
+ @Override
public int hashCode() {
return positionIncrement;
}
+ @Override
public void copyTo(AttributeImpl target) {
PositionIncrementAttribute t = (PositionIncrementAttribute) target;
t.setPositionIncrement(positionIncrement);
Index: src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java (working copy)
@@ -164,6 +164,7 @@
termLength = length;
}
+ @Override
public int hashCode() {
initTermBuffer();
int code = termLength;
@@ -171,10 +172,12 @@
return code;
}
+ @Override
public void clear() {
termLength = 0;
}
+ @Override
public Object clone() {
TermAttributeImpl t = (TermAttributeImpl)super.clone();
// Do a deep clone
@@ -184,6 +187,7 @@
return t;
}
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -207,11 +211,13 @@
return false;
}
+ @Override
public String toString() {
initTermBuffer();
return "term=" + new String(termBuffer, 0, termLength);
}
+ @Override
public void copyTo(AttributeImpl target) {
initTermBuffer();
TermAttribute t = (TermAttribute) target;
Index: src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java (working copy)
@@ -47,10 +47,12 @@
this.type = type;
}
+ @Override
public void clear() {
type = DEFAULT_TYPE;
}
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -63,10 +65,12 @@
return false;
}
+ @Override
public int hashCode() {
return type.hashCode();
}
+ @Override
public void copyTo(AttributeImpl target) {
TypeAttribute t = (TypeAttribute) target;
t.setType(type);
Index: src/java/org/apache/lucene/analysis/TokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenFilter.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/TokenFilter.java (working copy)
@@ -37,16 +37,19 @@
/** Performs end-of-stream operations, if any, and calls then <code>end()</code> on the
* input TokenStream.<p/>
* <b>NOTE:</b> Be sure to call <code>super.end()</code> first when overriding this method.*/
+ @Override
public void end() throws IOException {
input.end();
}
/** Close the input TokenStream. */
+ @Override
public void close() throws IOException {
input.close();
}
/** Reset the filter as well as the input TokenStream. */
+ @Override
public void reset() throws IOException {
input.reset();
}
Index: src/java/org/apache/lucene/analysis/Tokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/Tokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/Tokenizer.java (working copy)
@@ -67,6 +67,7 @@
}
/** By default, closes the input Reader. */
+ @Override
public void close() throws IOException {
input.close();
}
Index: src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java (working copy)
@@ -23,10 +23,12 @@
/** An Analyzer that uses {@link WhitespaceTokenizer}. */
public final class WhitespaceAnalyzer extends Analyzer {
+ @Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new WhitespaceTokenizer(reader);
}
+ @Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
if (tokenizer == null) {
Index: src/java/org/apache/lucene/analysis/WhitespaceTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/WhitespaceTokenizer.java (revision 830378)
+++ src/java/org/apache/lucene/analysis/WhitespaceTokenizer.java (working copy)
@@ -42,6 +42,7 @@
/** Collects only characters which do not satisfy
* {@link Character#isWhitespace(char)}.*/
+ @Override
protected boolean isTokenChar(char c) {
return !Character.isWhitespace(c);
}
Index: src/java/org/apache/lucene/document/AbstractField.java
===================================================================
--- src/java/org/apache/lucene/document/AbstractField.java (revision 830378)
+++ src/java/org/apache/lucene/document/AbstractField.java (working copy)
@@ -235,6 +235,7 @@
}
/** Prints a Field for human consumption. */
+ @Override
public final String toString() {
StringBuilder result = new StringBuilder();
if (isStored) {
Index: src/java/org/apache/lucene/document/DateTools.java
===================================================================
--- src/java/org/apache/lucene/document/DateTools.java (revision 830378)
+++ src/java/org/apache/lucene/document/DateTools.java (working copy)
@@ -246,6 +246,7 @@
this.resolution = resolution;
}
+ @Override
public String toString() {
return resolution;
}
Index: src/java/org/apache/lucene/document/Document.java
===================================================================
--- src/java/org/apache/lucene/document/Document.java (revision 830378)
+++ src/java/org/apache/lucene/document/Document.java (working copy)
@@ -289,6 +289,7 @@
}
/** Prints the fields of a document for human consumption. */
+ @Override
public final String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("Document<");
Index: src/java/org/apache/lucene/document/Field.java
===================================================================
--- src/java/org/apache/lucene/document/Field.java (revision 830378)
+++ src/java/org/apache/lucene/document/Field.java (working copy)
@@ -43,12 +43,13 @@
* stored.
*/
YES {
+ @Override
public boolean isStored() { return true; }
},
/** Do not store the field value in the index. */
- NO
- {
+ NO {
+ @Override
public boolean isStored() { return false; }
};
@@ -62,8 +63,11 @@
* but one can still access its contents provided it is
* {@link Field.Store stored}. */
NO {
+ @Override
public boolean isIndexed() { return false; }
+ @Override
public boolean isAnalyzed() { return false; }
+ @Override
public boolean omitNorms() { return true; }
},
@@ -71,8 +75,11 @@
* value through an Analyzer. This is useful for
* common text. */
ANALYZED {
+ @Override
public boolean isIndexed() { return true; }
+ @Override
public boolean isAnalyzed() { return true; }
+ @Override
public boolean omitNorms() { return false; }
},
@@ -81,8 +88,11 @@
* useful for unique Ids like product numbers.
*/
NOT_ANALYZED {
+ @Override
public boolean isIndexed() { return true; }
+ @Override
public boolean isAnalyzed() { return false; }
+ @Override
public boolean omitNorms() { return false; }
},
@@ -101,8 +111,11 @@
* that field must be indexed with NOT_ANALYZED_NO_NORMS
* from the beginning. */
NOT_ANALYZED_NO_NORMS {
+ @Override
public boolean isIndexed() { return true; }
+ @Override
public boolean isAnalyzed() { return false; }
+ @Override
public boolean omitNorms() { return true; }
},
@@ -112,8 +125,11 @@
* {@link #NOT_ANALYZED_NO_NORMS} for what norms are
* and why you may want to disable them. */
ANALYZED_NO_NORMS {
+ @Override
public boolean isIndexed() { return true; }
+ @Override
public boolean isAnalyzed() { return true; }
+ @Override
public boolean omitNorms() { return true; }
};
@@ -156,17 +172,23 @@
/** Do not store term vectors.
*/
NO {
- public boolean isStored() { return false; }
- public boolean withPositions() { return false; }
- public boolean withOffsets() { return false; }
+ @Override
+ public boolean isStored() { return false; }
+ @Override
+ public boolean withPositions() { return false; }
+ @Override
+ public boolean withOffsets() { return false; }
},
/** Store the term vectors of each document. A term vector is a list
* of the document's terms and their number of occurrences in that document. */
YES {
- public boolean isStored() { return true; }
- public boolean withPositions() { return false; }
- public boolean withOffsets() { return false; }
+ @Override
+ public boolean isStored() { return true; }
+ @Override
+ public boolean withPositions() { return false; }
+ @Override
+ public boolean withOffsets() { return false; }
},
/**
@@ -175,9 +197,12 @@
* @see #YES
*/
WITH_POSITIONS {
- public boolean isStored() { return true; }
- public boolean withPositions() { return true; }
- public boolean withOffsets() { return false; }
+ @Override
+ public boolean isStored() { return true; }
+ @Override
+ public boolean withPositions() { return true; }
+ @Override
+ public boolean withOffsets() { return false; }
},
/**
@@ -186,9 +211,12 @@
* @see #YES
*/
WITH_OFFSETS {
- public boolean isStored() { return true; }
- public boolean withPositions() { return false; }
- public boolean withOffsets() { return true; }
+ @Override
+ public boolean isStored() { return true; }
+ @Override
+ public boolean withPositions() { return false; }
+ @Override
+ public boolean withOffsets() { return true; }
},
/**
@@ -199,9 +227,12 @@
* @see #WITH_OFFSETS
*/
WITH_POSITIONS_OFFSETS {
- public boolean isStored() { return true; }
- public boolean withPositions() { return true; }
- public boolean withOffsets() { return true; }
+ @Override
+ public boolean isStored() { return true; }
+ @Override
+ public boolean withPositions() { return true; }
+ @Override
+ public boolean withOffsets() { return true; }
};
/** Get the best representation of a TermVector given the flags. */
@@ -209,7 +240,7 @@
// If it is not stored, nothing else matters.
if (!stored) {
- return TermVector.NO;
+ return TermVector.NO;
}
if (withOffsets) {
@@ -337,7 +368,7 @@
* </ul>
*/
public Field(String name, String value, Store store, Index index, TermVector termVector) {
- this(name, true, value, store, index, termVector);
+ this(name, true, value, store, index, termVector);
}
/**
Index: src/java/org/apache/lucene/index/ByteSliceReader.java
===================================================================
--- src/java/org/apache/lucene/index/ByteSliceReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/ByteSliceReader.java (working copy)
@@ -66,6 +66,7 @@
return upto + bufferOffset == endIndex;
}
+ @Override
public byte readByte() {
assert !eof();
assert upto <= limit;
@@ -117,6 +118,7 @@
}
}
+ @Override
public void readBytes(byte[] b, int offset, int len) {
while(len > 0) {
final int numLeft = limit-upto;
@@ -135,9 +137,13 @@
}
}
+ @Override
public long getFilePointer() {throw new RuntimeException("not implemented");}
+ @Override
public long length() {throw new RuntimeException("not implemented");}
+ @Override
public void seek(long pos) {throw new RuntimeException("not implemented");}
+ @Override
public void close() {throw new RuntimeException("not implemented");}
}
Index: src/java/org/apache/lucene/index/CheckIndex.java
===================================================================
--- src/java/org/apache/lucene/index/CheckIndex.java (revision 830378)
+++ src/java/org/apache/lucene/index/CheckIndex.java (working copy)
@@ -274,11 +274,13 @@
super(p);
}
+ @Override
public void seek(Term term) throws IOException {
super.seek(term);
delCount = 0;
}
+ @Override
protected void skippingDoc() throws IOException {
delCount++;
}
Index: src/java/org/apache/lucene/index/CompoundFileReader.java
===================================================================
--- src/java/org/apache/lucene/index/CompoundFileReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/CompoundFileReader.java (working copy)
@@ -107,6 +107,7 @@
return fileName;
}
+ @Override
public synchronized void close() throws IOException {
if (stream == null)
throw new IOException("Already closed");
@@ -116,6 +117,7 @@
stream = null;
}
+ @Override
public synchronized IndexInput openInput(String id)
throws IOException
{
@@ -123,6 +125,7 @@
return openInput(id, readBufferSize);
}
+ @Override
public synchronized IndexInput openInput(String id, int readBufferSize)
throws IOException
{
@@ -137,28 +140,33 @@
}
/** Returns an array of strings, one for each file in the directory. */
+ @Override
public String[] listAll() {
String res[] = new String[entries.size()];
return entries.keySet().toArray(res);
}
/** Returns true iff a file with the given name exists. */
+ @Override
public boolean fileExists(String name) {
return entries.containsKey(name);
}
/** Returns the time the compound file was last modified. */
+ @Override
public long fileModified(String name) throws IOException {
return directory.fileModified(fileName);
}
/** Set the modified time of the compound file to now. */
+ @Override
public void touchFile(String name) throws IOException {
directory.touchFile(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
+ @Override
public void deleteFile(String name)
{
throw new UnsupportedOperationException();
@@ -173,6 +181,7 @@
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
+ @Override
public long fileLength(String name)
throws IOException
{
@@ -184,6 +193,7 @@
/** Not implemented
* @throws UnsupportedOperationException */
+ @Override
public IndexOutput createOutput(String name)
{
throw new UnsupportedOperationException();
@@ -191,6 +201,7 @@
/** Not implemented
* @throws UnsupportedOperationException */
+ @Override
public Lock makeLock(String name)
{
throw new UnsupportedOperationException();
@@ -220,6 +231,7 @@
this.length = length;
}
+ @Override
public Object clone() {
CSIndexInput clone = (CSIndexInput)super.clone();
clone.base = (IndexInput)base.clone();
@@ -234,6 +246,7 @@
* @param offset the offset in the array to start storing bytes
* @param len the number of bytes to read
*/
+ @Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException
{
@@ -248,13 +261,16 @@
* the next {@link #readInternal(byte[],int,int)} will occur.
* @see #readInternal(byte[],int,int)
*/
+ @Override
protected void seekInternal(long pos) {}
/** Closes the stream to further operations. */
+ @Override
public void close() throws IOException {
base.close();
}
+ @Override
public long length() {
return length;
}
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (revision 830378)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (working copy)
@@ -112,6 +112,7 @@
}
}
+ @Override
public void close() {
closed = true;
}
@@ -146,6 +147,7 @@
return count;
}
+ @Override
public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
@@ -275,6 +277,7 @@
}
}
+ @Override
public void run() {
// First time through the while loop we do the merge
@@ -324,6 +327,7 @@
}
}
+ @Override
public String toString() {
MergePolicy.OneMerge merge = getRunningMerge();
if (merge == null)
Index: src/java/org/apache/lucene/index/DefaultSkipListReader.java
===================================================================
--- src/java/org/apache/lucene/index/DefaultSkipListReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/DefaultSkipListReader.java (working copy)
@@ -75,6 +75,7 @@
return lastPayloadLength;
}
+ @Override
protected void seekChild(int level) throws IOException {
super.seekChild(level);
freqPointer[level] = lastFreqPointer;
@@ -82,6 +83,7 @@
payloadLength[level] = lastPayloadLength;
}
+ @Override
protected void setLastSkipData(int level) {
super.setLastSkipData(level);
lastFreqPointer = freqPointer[level];
@@ -90,6 +92,7 @@
}
+ @Override
protected int readSkipData(int level, IndexInput skipStream) throws IOException {
int delta;
if (currentFieldStoresPayloads) {
Index: src/java/org/apache/lucene/index/DefaultSkipListWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DefaultSkipListWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/DefaultSkipListWriter.java (working copy)
@@ -74,6 +74,7 @@
this.curProxPointer = proxOutput.getFilePointer();
}
+ @Override
protected void resetSkip() {
super.resetSkip();
Arrays.fill(lastSkipDoc, 0);
@@ -83,6 +84,7 @@
Arrays.fill(lastSkipProxPointer, proxOutput.getFilePointer());
}
+ @Override
protected void writeSkipData(int level, IndexOutput skipBuffer) throws IOException {
// To efficiently store payloads in the posting lists we do not store the length of
// every payload. Instead we omit the length for a payload if the previous payload had
Index: src/java/org/apache/lucene/index/DirectoryReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/DirectoryReader.java (working copy)
@@ -67,6 +67,7 @@
static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly,
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
+ @Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(directory, segmentFileName);
@@ -311,6 +312,7 @@
starts[subReaders.length] = maxDoc;
}
+ @Override
public final synchronized Object clone() {
try {
return clone(readOnly); // Preserve current readOnly
@@ -319,6 +321,7 @@
}
}
+ @Override
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
DirectoryReader newReader = doReopen((SegmentInfos) segmentInfos.clone(), true, openReadOnly);
@@ -341,15 +344,18 @@
return newReader;
}
+ @Override
public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
// Preserve current readOnly
return doReopen(readOnly, null);
}
+ @Override
public final synchronized IndexReader reopen(boolean openReadOnly) throws CorruptIndexException, IOException {
return doReopen(openReadOnly, null);
}
+ @Override
public final synchronized IndexReader reopen(final IndexCommit commit) throws CorruptIndexException, IOException {
return doReopen(true, commit);
}
@@ -420,6 +426,7 @@
}
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
+ @Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(directory, segmentFileName);
@@ -439,17 +446,20 @@
}
/** Version number when this IndexReader was opened. */
+ @Override
public long getVersion() {
ensureOpen();
return segmentInfos.getVersion();
}
+ @Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
+ @Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
@@ -458,12 +468,14 @@
}
+ @Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
+ @Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
@@ -474,11 +486,13 @@
* Checks is the index is optimized (if it has a single segment and no deletions)
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
*/
+ @Override
public boolean isOptimized() {
ensureOpen();
return segmentInfos.size() == 1 && !hasDeletions();
}
+ @Override
public synchronized int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
if (numDocs == -1) { // check cache
@@ -490,29 +504,34 @@
return numDocs;
}
+ @Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
+ @Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
+ @Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
final int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
+ @Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
+ @Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
@@ -520,6 +539,7 @@
hasDeletions = true;
}
+ @Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
@@ -553,6 +573,7 @@
return hi;
}
+ @Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
@@ -561,6 +582,7 @@
return false;
}
+ @Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
@@ -576,6 +598,7 @@
return bytes;
}
+ @Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
@@ -591,6 +614,7 @@
}
}
+ @Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
@@ -600,16 +624,19 @@
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
+ @Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
+ @Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
+ @Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
@@ -618,11 +645,13 @@
return total;
}
+ @Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
+ @Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
@@ -639,6 +668,7 @@
* obtained)
* @throws IOException if there is a low-level IO error
*/
+ @Override
protected void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
if (readOnly) {
@@ -679,6 +709,7 @@
*
* @throws IOException if there is a low-level IO error
*/
+ @Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
if (hasChanges) {
segmentInfos.setUserData(commitUserData);
@@ -763,11 +794,13 @@
}
}
+ @Override
public Map<String,String> getCommitUserData() {
ensureOpen();
return segmentInfos.getUserData();
}
+ @Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
if (writer == null || writer.isClosed()) {
@@ -778,6 +811,7 @@
}
}
+ @Override
protected synchronized void doClose() throws IOException {
IOException ioe = null;
normsCache = null;
@@ -793,6 +827,7 @@
if (ioe != null) throw ioe;
}
+ @Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return getFieldNames(fieldNames, this.subReaders);
@@ -808,11 +843,13 @@
return fieldSet;
}
+ @Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
/** Returns the directory this index resides in. */
+ @Override
public Directory directory() {
// Don't ensureOpen here -- in certain cases, when a
// cloned/reopened reader needs to commit, it may call
@@ -829,6 +866,7 @@
* <p/>
* <p><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
*/
+ @Override
public IndexCommit getIndexCommit() throws IOException {
return new ReaderCommit(segmentInfos, directory);
}
@@ -896,34 +934,42 @@
isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions();
}
+ @Override
public boolean isOptimized() {
return isOptimized;
}
+ @Override
public String getSegmentsFileName() {
return segmentsFileName;
}
+ @Override
public Collection<String> getFileNames() {
return files;
}
+ @Override
public Directory getDirectory() {
return dir;
}
+ @Override
public long getVersion() {
return version;
}
+ @Override
public long getGeneration() {
return generation;
}
+ @Override
public boolean isDeleted() {
return false;
}
+ @Override
public Map<String,String> getUserData() {
return userData;
}
@@ -964,6 +1010,7 @@
}
}
+ @Override
public boolean next() throws IOException {
for (int i=0; i<matchingSegments.length; i++) {
SegmentMergeInfo smi = matchingSegments[i];
@@ -998,14 +1045,17 @@
return true;
}
+ @Override
public Term term() {
return term;
}
+ @Override
public int docFreq() {
return docFreq;
}
+ @Override
public void close() throws IOException {
queue.close();
}
@@ -1167,6 +1217,7 @@
super(topReader,r,s);
}
+ @Override
protected TermDocs termDocs(IndexReader reader) throws IOException {
return reader.termPositions();
}
Index: src/java/org/apache/lucene/index/DocFieldConsumers.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldConsumers.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocFieldConsumers.java (working copy)
@@ -40,12 +40,14 @@
this.two = two;
}
+ @Override
void setFieldInfos(FieldInfos fieldInfos) {
super.setFieldInfos(fieldInfos);
one.setFieldInfos(fieldInfos);
two.setFieldInfos(fieldInfos);
}
+ @Override
public void flush(Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
Map oneThreadsAndFields = new HashMap();
@@ -78,6 +80,7 @@
two.flush(twoThreadsAndFields, state);
}
+ @Override
public void closeDocStore(SegmentWriteState state) throws IOException {
try {
one.closeDocStore(state);
@@ -86,6 +89,7 @@
}
}
+ @Override
public void abort() {
try {
one.abort();
@@ -94,12 +98,14 @@
}
}
+ @Override
public boolean freeRAM() {
boolean any = one.freeRAM();
any |= two.freeRAM();
return any;
}
+ @Override
public DocFieldConsumerPerThread addThread(DocFieldProcessorPerThread docFieldProcessorPerThread) throws IOException {
return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.addThread(docFieldProcessorPerThread), two.addThread(docFieldProcessorPerThread));
}
@@ -133,10 +139,12 @@
DocumentsWriter.DocWriter one;
DocumentsWriter.DocWriter two;
+ @Override
public long sizeInBytes() {
return one.sizeInBytes() + two.sizeInBytes();
}
+ @Override
public void finish() throws IOException {
try {
try {
@@ -149,6 +157,7 @@
}
}
+ @Override
public void abort() {
try {
try {
Index: src/java/org/apache/lucene/index/DocFieldConsumersPerField.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldConsumersPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocFieldConsumersPerField.java (working copy)
@@ -32,11 +32,13 @@
this.two = two;
}
+ @Override
public void processFields(Fieldable[] fields, int count) throws IOException {
one.processFields(fields, count);
two.processFields(fields, count);
}
+ @Override
public void abort() {
try {
one.abort();
Index: src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java (working copy)
@@ -34,11 +34,13 @@
docState = docFieldProcessorPerThread.docState;
}
+ @Override
public void startDocument() throws IOException {
one.startDocument();
two.startDocument();
}
+ @Override
public void abort() {
try {
one.abort();
@@ -47,6 +49,7 @@
}
}
+ @Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
final DocumentsWriter.DocWriter oneDoc = one.finishDocument();
final DocumentsWriter.DocWriter twoDoc = two.finishDocument();
@@ -65,6 +68,7 @@
}
}
+ @Override
public DocFieldConsumerPerField addField(FieldInfo fi) {
return new DocFieldConsumersPerField(this, one.addField(fi), two.addField(fi));
}
Index: src/java/org/apache/lucene/index/DocFieldProcessor.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessor.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocFieldProcessor.java (working copy)
@@ -45,11 +45,13 @@
fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
}
+ @Override
public void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
fieldsWriter.closeDocStore(state);
}
+ @Override
public void flush(Collection<DocConsumerPerThread> threads, SegmentWriteState state) throws IOException {
Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>>();
@@ -70,15 +72,18 @@
state.flushedFiles.add(fileName);
}
+ @Override
public void abort() {
fieldsWriter.abort();
consumer.abort();
}
+ @Override
public boolean freeRAM() {
return consumer.freeRAM();
}
+ @Override
public DocConsumerPerThread addThread(DocumentsWriterThreadState threadState) throws IOException {
return new DocFieldProcessorPerThread(threadState, this);
}
Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (working copy)
@@ -63,6 +63,7 @@
fieldsWriter = docFieldProcessor.fieldsWriter.addThread(docState);
}
+ @Override
public void abort() {
for(int i=0;i<fieldHash.length;i++) {
DocFieldProcessorPerField field = fieldHash[i];
@@ -150,6 +151,7 @@
hashMask = newHashMask;
}
+ @Override
public DocumentsWriter.DocWriter processDocument() throws IOException {
consumer.startDocument();
@@ -353,10 +355,12 @@
DocumentsWriter.DocWriter one;
DocumentsWriter.DocWriter two;
+ @Override
public long sizeInBytes() {
return one.sizeInBytes() + two.sizeInBytes();
}
+ @Override
public void finish() throws IOException {
try {
try {
@@ -369,6 +373,7 @@
}
}
+ @Override
public void abort() {
try {
try {
Index: src/java/org/apache/lucene/index/DocInverter.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverter.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocInverter.java (working copy)
@@ -39,12 +39,14 @@
this.endConsumer = endConsumer;
}
+ @Override
void setFieldInfos(FieldInfos fieldInfos) {
super.setFieldInfos(fieldInfos);
consumer.setFieldInfos(fieldInfos);
endConsumer.setFieldInfos(fieldInfos);
}
+ @Override
void flush(Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> childThreadsAndFields = new HashMap<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>>();
@@ -71,20 +73,24 @@
endConsumer.flush(endChildThreadsAndFields, state);
}
+ @Override
public void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
endConsumer.closeDocStore(state);
}
+ @Override
void abort() {
consumer.abort();
endConsumer.abort();
}
+ @Override
public boolean freeRAM() {
return consumer.freeRAM();
}
+ @Override
public DocFieldConsumerPerThread addThread(DocFieldProcessorPerThread docFieldProcessorPerThread) {
return new DocInverterPerThread(docFieldProcessorPerThread, this);
}
Index: src/java/org/apache/lucene/index/DocInverterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocInverterPerField.java (working copy)
@@ -51,11 +51,13 @@
this.endConsumer = perThread.endConsumer.addField(this, fieldInfo);
}
+ @Override
void abort() {
consumer.abort();
endConsumer.abort();
}
+ @Override
public void processFields(final Fieldable[] fields,
final int count) throws IOException {
Index: src/java/org/apache/lucene/index/DocInverterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocInverterPerThread.java (working copy)
@@ -49,6 +49,7 @@
}
// this is a dummy, to not throw an UOE because this class does not implement any iteration method
+ @Override
public boolean incrementToken() {
throw new UnsupportedOperationException();
}
@@ -68,11 +69,13 @@
endConsumer = docInverter.endConsumer.addThread(this);
}
+ @Override
public void startDocument() throws IOException {
consumer.startDocument();
endConsumer.startDocument();
}
+ @Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
// TODO: allow endConsumer.finishDocument to also return
// a DocWriter
@@ -80,6 +83,7 @@
return consumer.finishDocument();
}
+ @Override
void abort() {
try {
consumer.abort();
@@ -88,6 +92,7 @@
}
}
+ @Override
public DocFieldConsumerPerField addField(FieldInfo fi) {
return new DocInverterPerField(this, fi);
}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy)
@@ -181,6 +181,7 @@
static final IndexingChain DefaultIndexingChain = new IndexingChain() {
+ @Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
@@ -1115,10 +1116,13 @@
}
private static class SkipDocWriter extends DocWriter {
+ @Override
void finish() {
}
+ @Override
void abort() {
}
+ @Override
long sizeInBytes() {
return 0;
}
@@ -1194,6 +1198,7 @@
ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
/* Allocate another byte[] from the shared pool */
+ @Override
byte[] getByteBlock(boolean trackAllocations) {
synchronized(DocumentsWriter.this) {
final int size = freeByteBlocks.size();
@@ -1217,6 +1222,7 @@
}
/* Return byte[]'s to the pool */
+ @Override
void recycleByteBlocks(byte[][] blocks, int start, int end) {
synchronized(DocumentsWriter.this) {
for(int i=start;i<end;i++)
Index: src/java/org/apache/lucene/index/FieldInfo.java
===================================================================
--- src/java/org/apache/lucene/index/FieldInfo.java (revision 830378)
+++ src/java/org/apache/lucene/index/FieldInfo.java (working copy)
@@ -55,6 +55,7 @@
}
}
+ @Override
public Object clone() {
return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector,
storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
Index: src/java/org/apache/lucene/index/FieldInfos.java
===================================================================
--- src/java/org/apache/lucene/index/FieldInfos.java (revision 830378)
+++ src/java/org/apache/lucene/index/FieldInfos.java (working copy)
@@ -98,6 +98,7 @@
/**
* Returns a deep clone of this FieldInfos instance.
*/
+ @Override
synchronized public Object clone() {
FieldInfos fis = new FieldInfos();
final int numField = byNumber.size();
Index: src/java/org/apache/lucene/index/FieldSortedTermVectorMapper.java
===================================================================
--- src/java/org/apache/lucene/index/FieldSortedTermVectorMapper.java (revision 830378)
+++ src/java/org/apache/lucene/index/FieldSortedTermVectorMapper.java (working copy)
@@ -43,11 +43,13 @@
this.comparator = comparator;
}
+ @Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
TermVectorEntry entry = new TermVectorEntry(currentField, term, frequency, offsets, positions);
currentSet.add(entry);
}
+ @Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
currentSet = new TreeSet<TermVectorEntry>(comparator);
currentField = field;
Index: src/java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- src/java/org/apache/lucene/index/FieldsReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/FieldsReader.java (working copy)
@@ -70,6 +70,7 @@
* job not to close the original FieldsReader until all
* clones are called (eg, currently SegmentReader manages
* this logic). */
+ @Override
public Object clone() {
ensureOpen();
return new FieldsReader(fieldInfos, numTotalDocs, size, format, formatSize, docStoreOffset, cloneableFieldsStream, cloneableIndexStream);
@@ -516,6 +517,7 @@
this.toRead = toRead;
}
+ @Override
public byte[] getBinaryValue(byte[] result) {
ensureOpen();
Index: src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy)
@@ -85,9 +85,13 @@
public FilterTermEnum(TermEnum in) { this.in = in; }
+ @Override
public boolean next() throws IOException { return in.next(); }
+ @Override
public Term term() { return in.term(); }
+ @Override
public int docFreq() { return in.docFreq(); }
+ @Override
public void close() throws IOException { in.close(); }
}
@@ -105,16 +109,19 @@
this.in = in;
}
+ @Override
public Directory directory() {
return in.directory();
}
+ @Override
public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException {
ensureOpen();
return in.getTermFreqVectors(docNumber);
}
+ @Override
public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException {
ensureOpen();
@@ -122,120 +129,146 @@
}
+ @Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
in.getTermFreqVector(docNumber, field, mapper);
}
+ @Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
in.getTermFreqVector(docNumber, mapper);
}
+ @Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return in.numDocs();
}
+ @Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return in.maxDoc();
}
+ @Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
return in.document(n, fieldSelector);
}
+ @Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
return in.isDeleted(n);
}
+ @Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return in.hasDeletions();
}
+ @Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
+ @Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
return in.hasNorms(field);
}
+ @Override
public byte[] norms(String f) throws IOException {
ensureOpen();
return in.norms(f);
}
+ @Override
public void norms(String f, byte[] bytes, int offset) throws IOException {
ensureOpen();
in.norms(f, bytes, offset);
}
+ @Override
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
in.setNorm(d, f, b);
}
+ @Override
public TermEnum terms() throws IOException {
ensureOpen();
return in.terms();
}
+ @Override
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return in.terms(t);
}
+ @Override
public int docFreq(Term t) throws IOException {
ensureOpen();
return in.docFreq(t);
}
+ @Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return in.termDocs();
}
+ @Override
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
return in.termDocs(term);
}
+ @Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return in.termPositions();
}
+ @Override
protected void doDelete(int n) throws CorruptIndexException, IOException { in.deleteDocument(n); }
+ @Override
protected void doCommit(Map<String,String> commitUserData) throws IOException { in.commit(commitUserData); }
+ @Override
protected void doClose() throws IOException { in.close(); }
+ @Override
public Collection<String> getFieldNames(IndexReader.FieldOption fieldNames) {
ensureOpen();
return in.getFieldNames(fieldNames);
}
+ @Override
public long getVersion() {
ensureOpen();
return in.getVersion();
}
+ @Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
return in.isCurrent();
}
+ @Override
public boolean isOptimized() {
ensureOpen();
return in.isOptimized();
}
+ @Override
public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders();
}
Index: src/java/org/apache/lucene/index/FormatPostingsDocsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/FormatPostingsDocsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/FormatPostingsDocsWriter.java (working copy)
@@ -67,6 +67,7 @@
/** Adds a new doc in this term. If this returns null
* then we just skip consuming positions/payloads. */
+ @Override
FormatPostingsPositionsConsumer addDoc(int docID, int termDocFreq) throws IOException {
final int delta = docID - lastDocID;
@@ -99,6 +100,7 @@
final UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
/** Called when we are done adding docs to this term */
+ @Override
void finish() throws IOException {
long skipPointer = skipListWriter.writeSkip(out);
Index: src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java (working copy)
@@ -60,12 +60,14 @@
}
/** Add a new field */
+ @Override
FormatPostingsTermsConsumer addField(FieldInfo field) {
termsWriter.setField(field);
return termsWriter;
}
/** Called when we are done adding everything. */
+ @Override
void finish() throws IOException {
termsOut.close();
termsWriter.close();
Index: src/java/org/apache/lucene/index/FormatPostingsPositionsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/FormatPostingsPositionsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/FormatPostingsPositionsWriter.java (working copy)
@@ -49,6 +49,7 @@
int lastPosition;
/** Add a new position & payload */
+ @Override
void addPosition(int position, byte[] payload, int payloadOffset, int payloadLength) throws IOException {
assert !omitTermFreqAndPositions: "omitTermFreqAndPositions is true";
assert out != null;
@@ -75,6 +76,7 @@
}
/** Called when we are done adding positions & payloads */
+ @Override
void finish() {
lastPosition = 0;
lastPayloadLength = -1;
Index: src/java/org/apache/lucene/index/FormatPostingsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/FormatPostingsTermsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/FormatPostingsTermsWriter.java (working copy)
@@ -45,6 +45,7 @@
long proxStart;
/** Adds a new term in this field */
+ @Override
FormatPostingsDocsConsumer addTerm(char[] text, int start) {
currentTerm = text;
currentTermStart = start;
@@ -62,6 +63,7 @@
}
/** Called when we are done adding terms to this field */
+ @Override
void finish() {
}
Index: src/java/org/apache/lucene/index/FreqProxTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriter.java (working copy)
@@ -31,10 +31,12 @@
@SuppressWarnings("unchecked")
final class FreqProxTermsWriter extends TermsHashConsumer {
+ @Override
public TermsHashConsumerPerThread addThread(TermsHashPerThread perThread) {
return new FreqProxTermsWriterPerThread(perThread);
}
+ @Override
void createPostings(RawPostingList[] postings, int start, int count) {
final int end = start + count;
for(int i=start;i<end;i++)
@@ -57,7 +59,9 @@
}
}
+ @Override
void closeDocStore(SegmentWriteState state) {}
+ @Override
void abort() {}
@@ -66,6 +70,7 @@
// under the same FieldInfo together, up into TermsHash*.
// Other writers would presumably share alot of this...
+ @Override
public void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
// Gather all FieldData's that have postings, across all
@@ -288,6 +293,7 @@
int lastPosition; // Last position where this term occurred
}
+ @Override
int bytesPerPosting() {
return RawPostingList.BYTES_SIZE + 4 * DocumentsWriter.INT_NUM_BYTE;
}
Index: src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (working copy)
@@ -43,6 +43,7 @@
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
}
+ @Override
int getStreamCount() {
if (fieldInfo.omitTermFreqAndPositions)
return 1;
@@ -50,10 +51,12 @@
return 2;
}
+ @Override
void finish() {}
boolean hasPayloads;
+ @Override
void skippingLongTerm() throws IOException {}
public int compareTo(FreqProxTermsWriterPerField other) {
@@ -67,6 +70,7 @@
payloadAttribute = null;
}
+ @Override
boolean start(Fieldable[] fields, int count) {
for(int i=0;i<count;i++)
if (fields[i].isIndexed())
@@ -74,6 +78,7 @@
return false;
}
+ @Override
void start(Fieldable f) {
if (fieldState.attributeSource.hasAttribute(PayloadAttribute.class)) {
payloadAttribute = fieldState.attributeSource.getAttribute(PayloadAttribute.class);
@@ -100,6 +105,7 @@
p.lastPosition = fieldState.position;
}
+ @Override
final void newTerm(RawPostingList p0) {
// First time we're seeing this term since the last
// flush
@@ -115,6 +121,7 @@
}
}
+ @Override
final void addTerm(RawPostingList p0) {
assert docState.testPoint("FreqProxTermsWriterPerField.addTerm start");
Index: src/java/org/apache/lucene/index/FreqProxTermsWriterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriterPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriterPerThread.java (working copy)
@@ -26,16 +26,20 @@
termsHashPerThread = perThread;
}
+ @Override
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) {
return new FreqProxTermsWriterPerField(termsHashPerField, this, fieldInfo);
}
+ @Override
void startDocument() {
}
+ @Override
DocumentsWriter.DocWriter finishDocument() {
return null;
}
+ @Override
public void abort() {}
}
Index: src/java/org/apache/lucene/index/IndexCommit.java
===================================================================
--- src/java/org/apache/lucene/index/IndexCommit.java (revision 830378)
+++ src/java/org/apache/lucene/index/IndexCommit.java (working copy)
@@ -89,6 +89,7 @@
/**
* Two IndexCommits are equal if both their Directory and versions are equal.
*/
+ @Override
public boolean equals(Object other) {
if (other instanceof IndexCommit) {
IndexCommit otherCommit = (IndexCommit) other;
@@ -97,6 +98,7 @@
return false;
}
+ @Override
public int hashCode() {
return getDirectory().hashCode() + getSegmentsFileName().hashCode();
}
Index: src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 830378)
+++ src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy)
@@ -585,30 +585,37 @@
assert !segmentInfos.hasExternalSegments(directory);
}
+ @Override
public boolean isOptimized() {
return isOptimized;
}
+ @Override
public String getSegmentsFileName() {
return segmentsFileName;
}
+ @Override
public Collection<String> getFileNames() throws IOException {
return files;
}
+ @Override
public Directory getDirectory() {
return directory;
}
+ @Override
public long getVersion() {
return version;
}
+ @Override
public long getGeneration() {
return generation;
}
+ @Override
public Map<String,String> getUserData() {
return userData;
}
@@ -617,6 +624,7 @@
* Called only be the deletion policy, to remove this
* commit point from the index.
*/
+ @Override
public void delete() {
if (!deleted) {
deleted = true;
@@ -624,6 +632,7 @@
}
}
+ @Override
public boolean isDeleted() {
return deleted;
}
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -88,6 +88,7 @@
private FieldOption(String option) {
this.option = option;
}
+ @Override
public String toString() {
return this.option;
}
@@ -395,6 +396,7 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
+ @Override
public synchronized Object clone() {
throw new UnsupportedOperationException("This reader does not implement clone()");
}
@@ -430,6 +432,7 @@
*/
public static long lastModified(final Directory directory2) throws CorruptIndexException, IOException {
return ((Long) new SegmentInfos.FindSegmentsFile(directory2) {
+ @Override
public Object doBody(String segmentFileName) throws IOException {
return Long.valueOf(directory2.fileModified(segmentFileName));
}
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -4771,6 +4771,7 @@
return limit;
}
+ @Override
public String toString()
{
return name + ":" + limit;
Index: src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java (revision 830378)
+++ src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java (working copy)
@@ -35,6 +35,7 @@
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
}
+ @Override
protected long size(SegmentInfo info) throws IOException {
return sizeBytes(info);
}
Index: src/java/org/apache/lucene/index/LogDocMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogDocMergePolicy.java (revision 830378)
+++ src/java/org/apache/lucene/index/LogDocMergePolicy.java (working copy)
@@ -36,6 +36,7 @@
// it to Long.MAX_VALUE to disable it
maxMergeSize = Long.MAX_VALUE;
}
+ @Override
protected long size(SegmentInfo info) throws IOException {
return sizeDocs(info);
}
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java (revision 830378)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java (working copy)
@@ -102,6 +102,7 @@
}
// Javadoc inherited
+ @Override
public boolean useCompoundFile(SegmentInfos infos, SegmentInfo info) {
return useCompoundFile;
}
@@ -120,6 +121,7 @@
}
// Javadoc inherited
+ @Override
public boolean useCompoundDocStore(SegmentInfos infos) {
return useCompoundDocStore;
}
@@ -151,6 +153,7 @@
return calibrateSizeByDeletes;
}
+ @Override
public void close() {}
abstract protected long size(SegmentInfo info) throws IOException;
@@ -211,6 +214,7 @@
* setting is true. This method returns multiple merges
* (mergeFactor at a time) so the {@link MergeScheduler}
* in use may make use of concurrency. */
+ @Override
public MergeSpecification findMergesForOptimize(SegmentInfos infos,
int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException {
MergeSpecification spec;
@@ -295,6 +299,7 @@
* index. We simply merge adjacent segments that have
* deletes, up to mergeFactor at a time.
*/
+ @Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
final int numSegments = segmentInfos.size();
@@ -347,6 +352,7 @@
* multiple levels have too many segments, this method
* will return multiple merges, allowing the {@link
* MergeScheduler} to use concurrency. */
+ @Override
public MergeSpecification findMerges(SegmentInfos infos) throws IOException {
final int numSegments = infos.size();
Index: src/java/org/apache/lucene/index/MultiLevelSkipListReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiLevelSkipListReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/MultiLevelSkipListReader.java (working copy)
@@ -244,27 +244,33 @@
input.readBytes(data, 0, length);
}
+ @Override
public void close() throws IOException {
data = null;
}
+ @Override
public long getFilePointer() {
return pointer + pos;
}
+ @Override
public long length() {
return data.length;
}
+ @Override
public byte readByte() throws IOException {
return data[pos++];
}
+ @Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
System.arraycopy(data, pos, b, offset, len);
pos += len;
}
+ @Override
public void seek(long pos) throws IOException {
this.pos = (int) (pos - pointer);
}
Index: src/java/org/apache/lucene/index/MultipleTermPositions.java
===================================================================
--- src/java/org/apache/lucene/index/MultipleTermPositions.java (revision 830378)
+++ src/java/org/apache/lucene/index/MultipleTermPositions.java (working copy)
@@ -46,6 +46,7 @@
return top();
}
+ @Override
public final boolean lessThan(TermPositions a, TermPositions b) {
return a.doc() < b.doc();
}
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/MultiReader.java (working copy)
@@ -106,6 +106,7 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
+ @Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
@@ -119,6 +120,7 @@
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*/
+ @Override
public synchronized Object clone() {
try {
return doReopen(true);
@@ -185,12 +187,14 @@
}
}
+ @Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
+ @Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
@@ -199,22 +203,26 @@
}
+ @Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
+ @Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
+ @Override
public boolean isOptimized() {
return false;
}
+ @Override
public synchronized int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
if (numDocs == -1) { // check cache
@@ -226,29 +234,34 @@
return numDocs;
}
+ @Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
+ @Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
+ @Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
+ @Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
+ @Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
@@ -256,6 +269,7 @@
hasDeletions = true;
}
+ @Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
@@ -268,6 +282,7 @@
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}
+ @Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
@@ -282,6 +297,7 @@
return ones;
}
+ @Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
@@ -297,6 +313,7 @@
return bytes;
}
+ @Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
@@ -315,6 +332,7 @@
}
}
+ @Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
@@ -324,16 +342,19 @@
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
+ @Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
+ @Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
+ @Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
@@ -342,21 +363,25 @@
return total;
}
+ @Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
+ @Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
+ @Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit(commitUserData);
}
+ @Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < subReaders.length; i++) {
if (decrefOnClose[i]) {
@@ -367,6 +392,7 @@
}
}
+ @Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
@@ -375,6 +401,7 @@
/**
* Checks recursively if all subreaders are up to date.
*/
+ @Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
@@ -389,10 +416,12 @@
/** Not implemented.
* @throws UnsupportedOperationException
*/
+ @Override
public long getVersion() {
throw new UnsupportedOperationException("MultiReader does not support this method.");
}
+ @Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
Index: src/java/org/apache/lucene/index/NormsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/NormsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/NormsWriter.java (working copy)
@@ -41,21 +41,25 @@
private static final byte defaultNorm = Similarity.encodeNorm(1.0f);
private FieldInfos fieldInfos;
+ @Override
public InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) {
return new NormsWriterPerThread(docInverterPerThread, this);
}
+ @Override
public void abort() {}
// We only write the _X.nrm file at flush
void files(Collection<String> files) {}
+ @Override
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
}
/** Produce _X.nrm if any document had a field with norms
* not disabled */
+ @Override
public void flush(Map<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
final Map byField = new HashMap();
@@ -173,5 +177,6 @@
}
}
+ @Override
void closeDocStore(SegmentWriteState state) {}
}
Index: src/java/org/apache/lucene/index/NormsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/NormsWriterPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/NormsWriterPerField.java (working copy)
@@ -52,6 +52,7 @@
fieldState = docInverterPerField.fieldState;
}
+ @Override
void abort() {
upto = 0;
}
@@ -60,6 +61,7 @@
return fieldInfo.name.compareTo(other.fieldInfo.name);
}
+ @Override
void finish() {
assert docIDs.length == norms.length;
if (fieldInfo.isIndexed && !fieldInfo.omitNorms) {
Index: src/java/org/apache/lucene/index/NormsWriterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/NormsWriterPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/NormsWriterPerThread.java (working copy)
@@ -26,13 +26,17 @@
docState = docInverterPerThread.docState;
}
+ @Override
InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
}
+ @Override
void abort() {}
+ @Override
void startDocument() {}
+ @Override
void finishDocument() {}
boolean freeRAM() {
Index: src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- src/java/org/apache/lucene/index/ParallelReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/ParallelReader.java (working copy)
@@ -121,6 +121,7 @@
decrefOnClose.add(Boolean.valueOf(incRefReaders));
}
+ @Override
public synchronized Object clone() {
try {
return doReopen(true);
@@ -148,6 +149,7 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
+ @Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
@@ -217,22 +219,26 @@
}
+ @Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return numDocs;
}
+ @Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
+ @Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
// check first reader
+ @Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
if (readers.size() > 0)
@@ -241,6 +247,7 @@
}
// delete in all readers
+ @Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.deleteDocument(n);
@@ -249,6 +256,7 @@
}
// undeleteAll in all readers
+ @Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.undeleteAll();
@@ -257,6 +265,7 @@
}
// append fields from storedFieldReaders
+ @Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
Document result = new Document();
@@ -282,6 +291,7 @@
}
// get all vectors
+ @Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
ArrayList<TermFreqVector> results = new ArrayList<TermFreqVector>();
@@ -296,6 +306,7 @@
return results.toArray(new TermFreqVector[results.size()]);
}
+ @Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
@@ -304,6 +315,7 @@
}
+ @Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
@@ -312,6 +324,7 @@
}
}
+ @Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
@@ -324,18 +337,21 @@
}
+ @Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
return reader==null ? false : reader.hasNorms(field);
}
+ @Override
public byte[] norms(String field) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
return reader==null ? null : reader.norms(field);
}
+ @Override
public void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
@@ -344,6 +360,7 @@
reader.norms(field, result, offset);
}
+ @Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
IndexReader reader = fieldToReader.get(field);
@@ -351,37 +368,44 @@
reader.doSetNorm(n, field, value);
}
+ @Override
public TermEnum terms() throws IOException {
ensureOpen();
return new ParallelTermEnum();
}
+ @Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new ParallelTermEnum(term);
}
+ @Override
public int docFreq(Term term) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(term.field());
return reader==null ? 0 : reader.docFreq(term);
}
+ @Override
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
return new ParallelTermDocs(term);
}
+ @Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new ParallelTermDocs();
}
+ @Override
public TermPositions termPositions(Term term) throws IOException {
ensureOpen();
return new ParallelTermPositions(term);
}
+ @Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new ParallelTermPositions();
@@ -390,6 +414,7 @@
/**
* Checks recursively if all subreaders are up to date.
*/
+ @Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
if (!reader.isCurrent()) {
@@ -404,6 +429,7 @@
/**
* Checks recursively if all subindexes are optimized
*/
+ @Override
public boolean isOptimized() {
for (final IndexReader reader : readers) {
if (!reader.isOptimized()) {
@@ -419,6 +445,7 @@
/** Not implemented.
* @throws UnsupportedOperationException
*/
+ @Override
public long getVersion() {
throw new UnsupportedOperationException("ParallelReader does not support this method.");
}
@@ -428,11 +455,13 @@
return readers.toArray(new IndexReader[readers.size()]);
}
+ @Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (final IndexReader reader : readers)
reader.commit(commitUserData);
}
+ @Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < readers.size(); i++) {
if (decrefOnClose.get(i).booleanValue()) {
@@ -443,6 +472,7 @@
}
}
+ @Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
Set<String> fieldSet = new HashSet<String>();
@@ -476,6 +506,7 @@
termEnum = reader.terms(term);
}
+ @Override
public boolean next() throws IOException {
if (termEnum==null)
return false;
@@ -504,6 +535,7 @@
return false; // no more fields
}
+ @Override
public Term term() {
if (termEnum==null)
return null;
@@ -511,6 +543,7 @@
return termEnum.term();
}
+ @Override
public int docFreq() {
if (termEnum==null)
return 0;
@@ -518,6 +551,7 @@
return termEnum.docFreq();
}
+ @Override
public void close() throws IOException {
if (termEnum!=null)
termEnum.close();
@@ -583,6 +617,7 @@
public ParallelTermPositions() {}
public ParallelTermPositions(Term term) throws IOException { seek(term); }
+ @Override
public void seek(Term term) throws IOException {
IndexReader reader = fieldToReader.get(term.field());
termDocs = reader!=null ? reader.termPositions(term) : null;
Index: src/java/org/apache/lucene/index/Payload.java
===================================================================
--- src/java/org/apache/lucene/index/Payload.java (revision 830378)
+++ src/java/org/apache/lucene/index/Payload.java (working copy)
@@ -156,6 +156,7 @@
* Clones this payload by creating a copy of the underlying
* byte array.
*/
+ @Override
public Object clone() {
try {
// Start with a shallow copy of data
@@ -176,6 +177,7 @@
}
}
+ @Override
public boolean equals(Object obj) {
if (obj == this)
return true;
@@ -192,6 +194,7 @@
return false;
}
+ @Override
public int hashCode() {
return ArrayUtil.hashCode(data, offset, offset+length);
}
Index: src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java
===================================================================
--- src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java (revision 830378)
+++ src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java (working copy)
@@ -56,6 +56,7 @@
* Never ignores positions. This mapper doesn't make much sense unless there are positions
* @return false
*/
+ @Override
public boolean isIgnoringPositions() {
return false;
}
@@ -67,6 +68,7 @@
* @param offsets
* @param positions
*/
+ @Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
for (int i = 0; i < positions.length; i++) {
Integer posVal = Integer.valueOf(positions[i]);
@@ -86,6 +88,7 @@
* @param storeOffsets Whether offsets are available
* @param storePositions Whether positions are available
*/
+ @Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
if (storePositions == false)
{
Index: src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java (working copy)
@@ -36,6 +36,7 @@
super(writer, infos, termInfosIndexDivisor);
}
+ @Override
protected void acquireWriteLock() {
ReadOnlySegmentReader.noWrite();
}
Index: src/java/org/apache/lucene/index/ReadOnlySegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReadOnlySegmentReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/ReadOnlySegmentReader.java (working copy)
@@ -23,11 +23,13 @@
throw new UnsupportedOperationException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
}
+ @Override
protected void acquireWriteLock() {
noWrite();
}
// Not synchronized
+ @Override
public boolean isDeleted(int n) {
return deletedDocs != null && deletedDocs.get(n);
}
Index: src/java/org/apache/lucene/index/ReusableStringReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReusableStringReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/ReusableStringReader.java (working copy)
@@ -31,9 +31,11 @@
left = s.length();
this.upto = 0;
}
+ @Override
public int read(char[] c) {
return read(c, 0, c.length);
}
+ @Override
public int read(char[] c, int off, int len) {
if (left > len) {
s.getChars(upto, upto+len, c, off);
@@ -50,6 +52,7 @@
return r;
}
}
+ @Override
public void close() {};
}
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentInfo.java (working copy)
@@ -91,6 +91,7 @@
private Map<String,String> diagnostics;
+ @Override
public String toString() {
return "si: "+dir.toString()+" "+name+" docCount: "+docCount+" delCount: "+delCount+" delFileName: "+getDelFileName();
}
@@ -314,6 +315,7 @@
clearFiles();
}
+ @Override
public Object clone () {
SegmentInfo si = new SegmentInfo(name, docCount, dir);
si.isCompoundFile = isCompoundFile;
@@ -710,6 +712,7 @@
/** We consider another SegmentInfo instance equal if it
* has the same dir and same name. */
+ @Override
public boolean equals(Object obj) {
SegmentInfo other;
try {
@@ -720,6 +723,7 @@
return other.dir == dir && other.name.equals(name);
}
+ @Override
public int hashCode() {
return dir.hashCode() + name.hashCode();
}
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentInfos.java (working copy)
@@ -305,6 +305,7 @@
new FindSegmentsFile(directory) {
+ @Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
read(directory, segmentFileName);
return null;
@@ -369,6 +370,7 @@
* SegmentInfo.
*/
+ @Override
public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i<sis.size();i++) {
@@ -400,6 +402,7 @@
throws CorruptIndexException, IOException {
return ((Long) new FindSegmentsFile(directory) {
+ @Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
IndexInput input = directory.openInput(segmentFileName);
Index: src/java/org/apache/lucene/index/SegmentMergeQueue.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMergeQueue.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentMergeQueue.java (working copy)
@@ -25,6 +25,7 @@
initialize(size);
}
+ @Override
protected final boolean lessThan(SegmentMergeInfo stiA, SegmentMergeInfo stiB) {
int comparison = stiA.term.compareTo(stiB.term);
if (comparison == 0)
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy)
@@ -76,6 +76,7 @@
directory = dir;
segment = name;
checkAbort = new CheckAbort(null, null) {
+ @Override
public void work(double units) throws MergeAbortedException {
// do nothing
}
@@ -89,6 +90,7 @@
checkAbort = new CheckAbort(merge, directory);
} else {
checkAbort = new CheckAbort(null, null) {
+ @Override
public void work(double units) throws MergeAbortedException {
// do nothing
}
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentReader.java (working copy)
@@ -293,6 +293,7 @@
* Sets the initial value
*/
private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
+ @Override
protected FieldsReader initialValue() {
return (FieldsReader) core.getFieldsReaderOrig().clone();
}
@@ -301,6 +302,7 @@
static class Ref {
private int refCount = 1;
+ @Override
public String toString() {
return "refcount: "+refCount;
}
@@ -491,6 +493,7 @@
// Returns a copy of this Norm instance that shares
// IndexInput & bytes with the original one
+ @Override
public synchronized Object clone() {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
@@ -633,6 +636,7 @@
return (BitVector)bv.clone();
}
+ @Override
public final synchronized Object clone() {
try {
return clone(readOnly); // Preserve current readOnly
@@ -641,6 +645,7 @@
}
}
+ @Override
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
return reopenSegment(si, true, openReadOnly);
}
@@ -737,6 +742,7 @@
return clone;
}
+ @Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
if (hasChanges) {
if (deletedDocsDirty) { // re-write deleted
@@ -772,6 +778,7 @@
return fieldsReaderLocal.get();
}
+ @Override
protected void doClose() throws IOException {
termVectorsLocal.close();
fieldsReaderLocal.close();
@@ -795,6 +802,7 @@
return si.hasDeletions();
}
+ @Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return deletedDocs != null;
@@ -808,6 +816,7 @@
return si.hasSeparateNorms();
}
+ @Override
protected void doDelete(int docNum) {
if (deletedDocs == null) {
deletedDocs = new BitVector(maxDoc());
@@ -827,6 +836,7 @@
pendingDeleteCount++;
}
+ @Override
protected void doUndeleteAll() {
deletedDocsDirty = false;
if (deletedDocs != null) {
@@ -847,11 +857,13 @@
return new ArrayList<String>(si.files());
}
+ @Override
public TermEnum terms() {
ensureOpen();
return core.getTermsReader().terms();
}
+ @Override
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return core.getTermsReader().terms(t);
@@ -861,15 +873,18 @@
return core.fieldInfos;
}
+ @Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
return getFieldsReader().doc(n, fieldSelector);
}
+ @Override
public synchronized boolean isDeleted(int n) {
return (deletedDocs != null && deletedDocs.get(n));
}
+ @Override
public TermDocs termDocs(Term term) throws IOException {
if (term == null) {
return new AllTermDocs(this);
@@ -878,16 +893,19 @@
}
}
+ @Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new SegmentTermDocs(this);
}
+ @Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new SegmentTermPositions(this);
}
+ @Override
public int docFreq(Term t) throws IOException {
ensureOpen();
TermInfo ti = core.getTermsReader().get(t);
@@ -897,6 +915,7 @@
return 0;
}
+ @Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
int n = maxDoc();
@@ -905,6 +924,7 @@
return n;
}
+ @Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return si.docCount;
@@ -913,6 +933,7 @@
/**
* @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption)
*/
+ @Override
public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
ensureOpen();
@@ -961,6 +982,7 @@
}
+ @Override
public synchronized boolean hasNorms(String field) {
ensureOpen();
return norms.containsKey(field);
@@ -982,12 +1004,14 @@
}
// returns fake norms if norms aren't available
+ @Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
return bytes;
}
+ @Override
protected void doSetNorm(int doc, String field, byte value)
throws IOException {
Norm norm = norms.get(field);
@@ -999,6 +1023,7 @@
}
/** Read norms into a pre-allocated array. */
+ @Override
public synchronized void norms(String field, byte[] bytes, int offset)
throws IOException {
@@ -1121,6 +1146,7 @@
* flag set. If the flag was not set, the method returns null.
* @throws IOException
*/
+ @Override
public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException {
// Check if this field is invalid or has no stored term vector
ensureOpen();
@@ -1136,6 +1162,7 @@
}
+ @Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
FieldInfo fi = core.fieldInfos.fieldInfo(field);
@@ -1152,6 +1179,7 @@
}
+ @Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
@@ -1169,6 +1197,7 @@
* If no such fields existed, the method returns null.
* @throws IOException
*/
+ @Override
public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {
ensureOpen();
@@ -1218,6 +1247,7 @@
}
/** Returns the directory this index resides in. */
+ @Override
public Directory directory() {
// Don't ensureOpen here -- in certain cases, when a
// cloned/reopened reader needs to commit, it may call
@@ -1228,10 +1258,12 @@
// This is necessary so that cloned SegmentReaders (which
// share the underlying postings data) will map to the
// same entry in the FieldCache. See LUCENE-1579.
+ @Override
public final Object getFieldCacheKey() {
return core.freqStream;
}
+ @Override
public long getUniqueTermCount() {
return core.getTermsReader().size();
}
Index: src/java/org/apache/lucene/index/SegmentTermEnum.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentTermEnum.java (working copy)
@@ -92,6 +92,7 @@
}
}
+ @Override
protected Object clone() {
SegmentTermEnum clone = null;
try {
@@ -118,6 +119,7 @@
}
/** Increments the enumeration to the next element. True if one exists.*/
+ @Override
public final boolean next() throws IOException {
if (position++ >= size - 1) {
prevBuffer.set(termBuffer);
@@ -165,6 +167,7 @@
/** Returns the current Term in the enumeration.
Initially invalid, valid after next() called for the first time.*/
+ @Override
public final Term term() {
return termBuffer.toTerm();
}
@@ -188,6 +191,7 @@
/** Returns the docFreq from the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
+ @Override
public final int docFreq() {
return termInfo.docFreq;
}
@@ -205,6 +209,7 @@
}
/** Closes the enumeration to further activity, freeing resources. */
+ @Override
public final void close() throws IOException {
input.close();
}
Index: src/java/org/apache/lucene/index/SegmentTermPositions.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentTermPositions.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentTermPositions.java (working copy)
@@ -43,6 +43,7 @@
this.proxStream = null; // the proxStream will be cloned lazily when nextPosition() is called for the first time
}
+ @Override
final void seek(TermInfo ti, Term term) throws IOException {
super.seek(ti, term);
if (ti != null)
@@ -54,6 +55,7 @@
needToLoadPayload = false;
}
+ @Override
public final void close() throws IOException {
super.close();
if (proxStream != null) proxStream.close();
@@ -85,11 +87,13 @@
return delta;
}
+ @Override
protected final void skippingDoc() throws IOException {
// we remember to skip a document lazily
lazySkipProxCount += freq;
}
+ @Override
public final boolean next() throws IOException {
// we remember to skip the remaining positions of the current
// document lazily
@@ -103,12 +107,14 @@
return false;
}
+ @Override
public final int read(final int[] docs, final int[] freqs) {
throw new UnsupportedOperationException("TermPositions does not support processing multiple documents in one call. Use TermDocs instead.");
}
/** Called by super.skipTo(). */
+ @Override
protected void skipProx(long proxPointer, int payloadLength) throws IOException {
// we save the pointer, we might have to skip there lazily
lazySkipPointer = proxPointer;
Index: src/java/org/apache/lucene/index/SegmentTermVector.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentTermVector.java (revision 830378)
+++ src/java/org/apache/lucene/index/SegmentTermVector.java (working copy)
@@ -39,6 +39,7 @@
return field;
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('{');
Index: src/java/org/apache/lucene/index/SerialMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/SerialMergeScheduler.java (revision 830378)
+++ src/java/org/apache/lucene/index/SerialMergeScheduler.java (working copy)
@@ -26,6 +26,7 @@
/** Just do the merges in sequence. We do this
* "synchronized" so that even if the application is using
* multiple threads, only one merge may run at a time. */
+ @Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
@@ -37,5 +38,6 @@
}
}
+ @Override
public void close() {}
}
Index: src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
===================================================================
--- src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java (revision 830378)
+++ src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java (working copy)
@@ -92,15 +92,19 @@
MyCommitPoint(IndexCommit cp) {
this.cp = cp;
}
+ @Override
public String getSegmentsFileName() {
return cp.getSegmentsFileName();
}
+ @Override
public Collection<String> getFileNames() throws IOException {
return cp.getFileNames();
}
+ @Override
public Directory getDirectory() {
return cp.getDirectory();
}
+ @Override
public void delete() {
synchronized(SnapshotDeletionPolicy.this) {
// Suppress the delete request if this commit point is
@@ -109,15 +113,19 @@
cp.delete();
}
}
+ @Override
public boolean isDeleted() {
return cp.isDeleted();
}
+ @Override
public long getVersion() {
return cp.getVersion();
}
+ @Override
public long getGeneration() {
return cp.getGeneration();
}
+ @Override
public Map<String,String> getUserData() throws IOException {
return cp.getUserData();
}
Index: src/java/org/apache/lucene/index/SortedTermVectorMapper.java
===================================================================
--- src/java/org/apache/lucene/index/SortedTermVectorMapper.java (revision 830378)
+++ src/java/org/apache/lucene/index/SortedTermVectorMapper.java (working copy)
@@ -60,6 +60,7 @@
* @param positions Position information, may be null
*/
//We need to combine any previous mentions of the term
+ @Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
TermVectorEntry entry = termToTVE.get(term);
if (entry == null) {
@@ -108,6 +109,7 @@
}
+ @Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
this.storeOffsets = storeOffsets;
Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/StoredFieldsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/StoredFieldsWriter.java (working copy)
@@ -177,15 +177,18 @@
numStoredFields = 0;
}
+ @Override
void abort() {
reset();
free(this);
}
+ @Override
public long sizeInBytes() {
return fdt.sizeInBytes();
}
+ @Override
public void finish() throws IOException {
finishDocument(this);
}
Index: src/java/org/apache/lucene/index/TermBuffer.java
===================================================================
--- src/java/org/apache/lucene/index/TermBuffer.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermBuffer.java (working copy)
@@ -124,6 +124,7 @@
return term;
}
+ @Override
protected Object clone() {
TermBuffer clone = null;
try {
Index: src/java/org/apache/lucene/index/TermsHash.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHash.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermsHash.java (working copy)
@@ -68,6 +68,7 @@
postingsFreeChunk = (int) (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
}
+ @Override
InvertedDocConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) {
return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, null);
}
@@ -76,11 +77,13 @@
return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, primaryPerThread);
}
+ @Override
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
consumer.setFieldInfos(fieldInfos);
}
+ @Override
synchronized public void abort() {
consumer.abort();
if (nextTermsHash != null)
@@ -99,12 +102,14 @@
}
}
+ @Override
synchronized void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
if (nextTermsHash != null)
nextTermsHash.closeDocStore(state);
}
+ @Override
synchronized void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
Map childThreadsAndFields = new HashMap();
Map nextThreadsAndFields;
@@ -152,6 +157,7 @@
nextTermsHash.flush(nextThreadsAndFields, state);
}
+ @Override
synchronized public boolean freeRAM() {
if (!trackAllocations)
Index: src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermsHashPerField.java (working copy)
@@ -100,6 +100,7 @@
nextPerField.reset();
}
+ @Override
synchronized public void abort() {
reset();
if (nextPerField != null)
@@ -248,6 +249,7 @@
private boolean doCall;
private boolean doNextCall;
+ @Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.addAttribute(TermAttribute.class);
consumer.start(f);
@@ -256,6 +258,7 @@
}
}
+ @Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (nextPerField != null)
@@ -339,6 +342,7 @@
}
// Primary entry point (for first TermsHash)
+ @Override
void add() throws IOException {
assert !postingsCompacted;
@@ -505,6 +509,7 @@
writeByte(stream, (byte) i);
}
+ @Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
Index: src/java/org/apache/lucene/index/TermsHashPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermsHashPerThread.java (working copy)
@@ -58,10 +58,12 @@
nextPerThread = null;
}
+ @Override
InvertedDocConsumerPerField addField(DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
return new TermsHashPerField(docInverterPerField, this, nextPerThread, fieldInfo);
}
+ @Override
synchronized public void abort() {
reset(true);
consumer.abort();
@@ -83,12 +85,14 @@
return true;
}
+ @Override
public void startDocument() throws IOException {
consumer.startDocument();
if (nextPerThread != null)
nextPerThread.consumer.startDocument();
}
+ @Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
final DocumentsWriter.DocWriter doc = consumer.finishDocument();
Index: src/java/org/apache/lucene/index/TermVectorEntry.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorEntry.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorEntry.java (working copy)
@@ -73,6 +73,7 @@
}
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
@@ -84,10 +85,12 @@
return true;
}
+ @Override
public int hashCode() {
return (term != null ? term.hashCode() : 0);
}
+ @Override
public String toString() {
return "TermVectorEntry{" +
"field='" + field + '\'' +
Index: src/java/org/apache/lucene/index/TermVectorOffsetInfo.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorOffsetInfo.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorOffsetInfo.java (working copy)
@@ -70,6 +70,7 @@
* @param o The comparison Object
* @return true if both {@link #getStartOffset()} and {@link #getEndOffset()} are the same for both objects.
*/
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof TermVectorOffsetInfo)) return false;
@@ -82,6 +83,7 @@
return true;
}
+ @Override
public int hashCode() {
int result;
result = startOffset;
Index: src/java/org/apache/lucene/index/TermVectorsReader.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsReader.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorsReader.java (working copy)
@@ -520,6 +520,7 @@
}
}
+ @Override
protected Object clone() throws CloneNotSupportedException {
final TermVectorsReader clone = (TermVectorsReader) super.clone();
@@ -552,6 +553,7 @@
private boolean storingPositions;
private String field;
+ @Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
this.field = field;
terms = new String[numTerms];
@@ -564,6 +566,7 @@
this.offsets = new TermVectorOffsetInfo[numTerms][];
}
+ @Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
terms[currentPosition] = term;
termFreqs[currentPosition] = frequency;
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (working copy)
@@ -41,16 +41,19 @@
this.docWriter = docWriter;
}
+ @Override
public TermsHashConsumerPerThread addThread(TermsHashPerThread termsHashPerThread) {
return new TermVectorsTermsWriterPerThread(termsHashPerThread, this);
}
+ @Override
void createPostings(RawPostingList[] postings, int start, int count) {
final int end = start + count;
for(int i=start;i<end;i++)
postings[i] = new PostingList();
}
+ @Override
synchronized void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
if (tvx != null) {
@@ -77,6 +80,7 @@
}
}
+ @Override
synchronized void closeDocStore(final SegmentWriteState state) throws IOException {
if (tvx != null) {
// At least one doc in this run had term vectors
@@ -207,6 +211,7 @@
return false;
}
+ @Override
public void abort() {
if (tvx != null) {
try {
@@ -252,6 +257,7 @@
numVectorFields = 0;
}
+ @Override
void abort() {
reset();
free(this);
@@ -267,10 +273,12 @@
numVectorFields++;
}
+ @Override
public long sizeInBytes() {
return tvf.sizeInBytes();
}
+ @Override
public void finish() throws IOException {
finishDocument(this);
}
@@ -282,6 +290,7 @@
int lastPosition; // Last position where this term occurred
}
+ @Override
int bytesPerPosting() {
return RawPostingList.BYTES_SIZE + 3 * DocumentsWriter.INT_NUM_BYTE;
}
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (working copy)
@@ -49,10 +49,12 @@
fieldState = termsHashPerField.fieldState;
}
+ @Override
int getStreamCount() {
return 2;
}
+ @Override
boolean start(Fieldable[] fields, int count) {
doVectors = false;
doVectorPositions = false;
@@ -97,6 +99,7 @@
* are enabled, to write the vectors to
* RAMOutputStream, which is then quickly flushed to
* * the real term vectors files in the Directory. */
+ @Override
void finish() throws IOException {
assert docState.testPoint("TermVectorsTermsWriterPerField.finish start");
@@ -194,6 +197,7 @@
maxNumPostings = 0;
}
+ @Override
void start(Fieldable f) {
if (doVectorOffsets) {
offsetAttribute = fieldState.attributeSource.addAttribute(OffsetAttribute.class);
@@ -202,6 +206,7 @@
}
}
+ @Override
void newTerm(RawPostingList p0) {
assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
@@ -225,6 +230,7 @@
}
}
+ @Override
void addTerm(RawPostingList p0) {
assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
@@ -247,5 +253,6 @@
}
}
+ @Override
void skippingLongTerm() {}
}
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerThread.java (revision 830378)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerThread.java (working copy)
@@ -39,6 +39,7 @@
final UnicodeUtil.UTF8Result utf8Results[] = {new UnicodeUtil.UTF8Result(),
new UnicodeUtil.UTF8Result()};
+ @Override
public void startDocument() {
assert clearLastVectorFieldName();
if (doc != null) {
@@ -47,6 +48,7 @@
}
}
+ @Override
public DocumentsWriter.DocWriter finishDocument() {
try {
return doc;
@@ -55,10 +57,12 @@
}
}
+ @Override
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) {
return new TermVectorsTermsWriterPerField(termsHashPerField, this, fieldInfo);
}
+ @Override
public void abort() {
if (doc != null) {
doc.abort();
Index: src/java/org/apache/lucene/messages/MessageImpl.java
===================================================================
--- src/java/org/apache/lucene/messages/MessageImpl.java (revision 830378)
+++ src/java/org/apache/lucene/messages/MessageImpl.java (working copy)
@@ -57,6 +57,7 @@
return NLS.getLocalizedMessage(getKey(), locale, getArguments());
}
+ @Override
public String toString() {
Object[] args = getArguments();
String argsString = "";
Index: src/java/org/apache/lucene/search/BooleanClause.java
===================================================================
--- src/java/org/apache/lucene/search/BooleanClause.java (revision 830378)
+++ src/java/org/apache/lucene/search/BooleanClause.java (working copy)
@@ -24,7 +24,7 @@
public static enum Occur {
/** Use this operator for clauses that <i>must</i> appear in the matching documents. */
- MUST { public String toString() { return "+"; } },
+ MUST { @Override public String toString() { return "+"; } },
/** Use this operator for clauses that <i>should</i> appear in the
* matching documents. For a BooleanQuery with no <code>MUST</code>
@@ -32,12 +32,12 @@
* for the BooleanQuery to match.
* @see BooleanQuery#setMinimumNumberShouldMatch
*/
- SHOULD { public String toString() { return ""; } },
+ SHOULD { @Override public String toString() { return ""; } },
/** Use this operator for clauses that <i>must not</i> appear in the matching documents.
* Note that it is not possible to search for queries that only consist
* of a <code>MUST_NOT</code> clause. */
- MUST_NOT { public String toString() { return "-"; } };
+ MUST_NOT { @Override public String toString() { return "-"; } };
}
@@ -84,6 +84,7 @@
/** Returns true if <code>o</code> is equal to this. */
+ @Override
public boolean equals(Object o) {
if (o == null || !(o instanceof BooleanClause))
return false;
@@ -93,11 +94,13 @@
}
/** Returns a hash code value for this object.*/
+ @Override
public int hashCode() {
return query.hashCode() ^ (Occur.MUST == occur?1:0) ^ (Occur.MUST_NOT == occur?2:0);
}
+ @Override
public String toString() {
return occur.toString() + query.toString();
}
Index: src/java/org/apache/lucene/search/BooleanQuery.java
===================================================================
--- src/java/org/apache/lucene/search/BooleanQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/BooleanQuery.java (working copy)
@@ -40,6 +40,7 @@
*/
public static class TooManyClauses extends RuntimeException {
public TooManyClauses() {}
+ @Override
public String getMessage() {
return "maxClauseCount is set to " + maxClauseCount;
}
@@ -89,10 +90,12 @@
// Implement coord disabling.
// Inherit javadoc.
+ @Override
public Similarity getSimilarity(Searcher searcher) {
Similarity result = super.getSimilarity(searcher);
if (disableCoord) { // disable coord as requested
result = new SimilarityDelegator(result) {
+ @Override
public float coord(int overlap, int maxOverlap) {
return 1.0f;
}
Index: src/java/org/apache/lucene/search/CachingSpanFilter.java
===================================================================
--- src/java/org/apache/lucene/search/CachingSpanFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/CachingSpanFilter.java (working copy)
@@ -42,6 +42,7 @@
this.filter = filter;
}
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
SpanFilterResult result = getCachedResult(reader);
return result != null ? result.getDocIdSet() : null;
@@ -64,19 +65,23 @@
}
+ @Override
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
return getCachedResult(reader);
}
+ @Override
public String toString() {
return "CachingSpanFilter("+filter+")";
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof CachingSpanFilter)) return false;
return this.filter.equals(((CachingSpanFilter)o).filter);
}
+ @Override
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
Index: src/java/org/apache/lucene/search/CachingWrapperFilter.java
===================================================================
--- src/java/org/apache/lucene/search/CachingWrapperFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/CachingWrapperFilter.java (working copy)
@@ -61,6 +61,7 @@
}
}
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
if (cache == null) {
cache = new WeakHashMap<IndexReader, DocIdSet>();
@@ -86,15 +87,18 @@
return docIdSet;
}
+ @Override
public String toString() {
return "CachingWrapperFilter("+filter+")";
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof CachingWrapperFilter)) return false;
return this.filter.equals(((CachingWrapperFilter)o).filter);
}
+ @Override
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
Index: src/java/org/apache/lucene/search/ComplexExplanation.java
===================================================================
--- src/java/org/apache/lucene/search/ComplexExplanation.java (revision 830378)
+++ src/java/org/apache/lucene/search/ComplexExplanation.java (working copy)
@@ -52,11 +52,13 @@
* </p>
* @see #getMatch
*/
+ @Override
public boolean isMatch() {
Boolean m = getMatch();
return (null != m ? m.booleanValue() : super.isMatch());
}
+ @Override
protected String getSummary() {
if (null == getMatch())
return super.getSummary();
Index: src/java/org/apache/lucene/search/DefaultSimilarity.java
===================================================================
--- src/java/org/apache/lucene/search/DefaultSimilarity.java (revision 830378)
+++ src/java/org/apache/lucene/search/DefaultSimilarity.java (working copy)
@@ -31,6 +31,7 @@
*
* <p><b>WARNING</b>: This API is new and experimental, and may suddenly
* change.</p> */
+ @Override
public float computeNorm(String field, FieldInvertState state) {
final int numTerms;
if (discountOverlaps)
@@ -41,31 +42,37 @@
}
/** Implemented as <code>1/sqrt(numTerms)</code>. */
+ @Override
public float lengthNorm(String fieldName, int numTerms) {
return (float)(1.0 / Math.sqrt(numTerms));
}
/** Implemented as <code>1/sqrt(sumOfSquaredWeights)</code>. */
+ @Override
public float queryNorm(float sumOfSquaredWeights) {
return (float)(1.0 / Math.sqrt(sumOfSquaredWeights));
}
/** Implemented as <code>sqrt(freq)</code>. */
+ @Override
public float tf(float freq) {
return (float)Math.sqrt(freq);
}
/** Implemented as <code>1 / (distance + 1)</code>. */
+ @Override
public float sloppyFreq(int distance) {
return 1.0f / (distance + 1);
}
/** Implemented as <code>log(numDocs/(docFreq+1)) + 1</code>. */
+ @Override
public float idf(int docFreq, int numDocs) {
return (float)(Math.log(numDocs/(double)(docFreq+1)) + 1.0);
}
/** Implemented as <code>overlap / maxOverlap</code>. */
+ @Override
public float coord(int overlap, int maxOverlap) {
return overlap / (float)maxOverlap;
}
Index: src/java/org/apache/lucene/search/DocIdSet.java
===================================================================
--- src/java/org/apache/lucene/search/DocIdSet.java (revision 830378)
+++ src/java/org/apache/lucene/search/DocIdSet.java (working copy)
@@ -29,8 +29,11 @@
public static final DocIdSet EMPTY_DOCIDSET = new DocIdSet() {
private final DocIdSetIterator iterator = new DocIdSetIterator() {
+ @Override
public int advance(int target) throws IOException { return NO_MORE_DOCS; }
+ @Override
public int docID() { return NO_MORE_DOCS; }
+ @Override
public int nextDoc() throws IOException { return NO_MORE_DOCS; }
};
Index: src/java/org/apache/lucene/search/ExactPhraseScorer.java
===================================================================
--- src/java/org/apache/lucene/search/ExactPhraseScorer.java (revision 830378)
+++ src/java/org/apache/lucene/search/ExactPhraseScorer.java (working copy)
@@ -27,6 +27,7 @@
super(weight, tps, offsets, similarity, norms);
}
+ @Override
protected final float phraseFreq() throws IOException {
// sort list with pq
pq.clear();
Index: src/java/org/apache/lucene/search/Explanation.java
===================================================================
--- src/java/org/apache/lucene/search/Explanation.java (revision 830378)
+++ src/java/org/apache/lucene/search/Explanation.java (working copy)
@@ -82,6 +82,7 @@
}
/** Render an explanation as text. */
+ @Override
public String toString() {
return toString(0);
}
Index: src/java/org/apache/lucene/search/FieldCache.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCache.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldCache.java (working copy)
@@ -154,6 +154,7 @@
protected Object readResolve() {
return DEFAULT_BYTE_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_BYTE_PARSER";
}
@@ -167,6 +168,7 @@
protected Object readResolve() {
return DEFAULT_SHORT_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_SHORT_PARSER";
}
@@ -180,6 +182,7 @@
protected Object readResolve() {
return DEFAULT_INT_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_INT_PARSER";
}
@@ -193,6 +196,7 @@
protected Object readResolve() {
return DEFAULT_FLOAT_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_FLOAT_PARSER";
}
@@ -206,6 +210,7 @@
protected Object readResolve() {
return DEFAULT_LONG_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_LONG_PARSER";
}
@@ -219,6 +224,7 @@
protected Object readResolve() {
return DEFAULT_DOUBLE_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".DEFAULT_DOUBLE_PARSER";
}
@@ -238,6 +244,7 @@
protected Object readResolve() {
return NUMERIC_UTILS_INT_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER";
}
@@ -257,6 +264,7 @@
protected Object readResolve() {
return NUMERIC_UTILS_FLOAT_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER";
}
@@ -276,6 +284,7 @@
protected Object readResolve() {
return NUMERIC_UTILS_LONG_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER";
}
@@ -295,6 +304,7 @@
protected Object readResolve() {
return NUMERIC_UTILS_DOUBLE_PARSER;
}
+ @Override
public String toString() {
return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER";
}
@@ -528,6 +538,7 @@
}
+ @Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("'").append(getReaderKey()).append("'=>");
Index: src/java/org/apache/lucene/search/FieldCacheImpl.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldCacheImpl.java (working copy)
@@ -107,10 +107,15 @@
// }
}
+ @Override
public Object getReaderKey() { return readerKey; }
+ @Override
public String getFieldName() { return fieldName; }
+ @Override
public Class<?> getCacheType() { return cacheType; }
+ @Override
public Object getCustom() { return custom; }
+ @Override
public Object getValue() { return value; }
}
@@ -212,6 +217,7 @@
}
/** Two of these are equal iff they reference the same field and type. */
+ @Override
public boolean equals (Object o) {
if (o instanceof Entry) {
Entry other = (Entry) o;
@@ -227,6 +233,7 @@
}
/** Composes a hashcode based on the field and type. */
+ @Override
public int hashCode() {
return field.hashCode() ^ (custom==null ? 0 : custom.hashCode());
}
@@ -247,6 +254,7 @@
ByteCache(FieldCache wrapper) {
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
@@ -293,6 +301,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
@@ -339,6 +348,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
@@ -396,6 +406,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
@@ -450,6 +461,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entry)
throws IOException {
String field = entry.field;
@@ -504,6 +516,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
@@ -553,6 +566,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
String field = StringHelper.intern(entryKey.field);
@@ -588,6 +602,7 @@
super(wrapper);
}
+ @Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
String field = StringHelper.intern(entryKey.field);
Index: src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (working copy)
@@ -70,6 +70,7 @@
}
/** This method is implemented for each data type */
+ @Override
public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException;
/**
@@ -79,6 +80,7 @@
*/
public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final FieldCache.StringIndex fcsi = FieldCache.DEFAULT.getStringIndex(reader, field);
final int lowerPoint = fcsi.binarySearchLookup(lowerVal);
@@ -120,6 +122,7 @@
// for this DocIdSet, we never need to use TermDocs,
// because deleted docs have an order of 0 (null entry in StringIndex)
return new FieldCacheDocIdSet(reader, false) {
+ @Override
final boolean matchDoc(int doc) {
return fcsi.order[doc] >= inclusiveLowerPoint && fcsi.order[doc] <= inclusiveUpperPoint;
}
@@ -144,6 +147,7 @@
*/
public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final byte inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) {
@@ -169,6 +173,7 @@
final byte[] values = FieldCache.DEFAULT.getBytes(reader, field, (FieldCache.ByteParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -193,6 +198,7 @@
*/
public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final short inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) {
@@ -218,6 +224,7 @@
final short[] values = FieldCache.DEFAULT.getShorts(reader, field, (FieldCache.ShortParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -242,6 +249,7 @@
*/
public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final int inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) {
@@ -267,6 +275,7 @@
final int[] values = FieldCache.DEFAULT.getInts(reader, field, (FieldCache.IntParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -291,6 +300,7 @@
*/
public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final long inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) {
@@ -316,6 +326,7 @@
final long[] values = FieldCache.DEFAULT.getLongs(reader, field, (FieldCache.LongParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -340,6 +351,7 @@
*/
public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
// we transform the floating point numbers to sortable integers
// using NumericUtils to easier find the next bigger/lower value
@@ -369,6 +381,7 @@
final float[] values = FieldCache.DEFAULT.getFloats(reader, field, (FieldCache.FloatParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -393,6 +406,7 @@
*/
public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
// we transform the floating point numbers to sortable integers
// using NumericUtils to easier find the next bigger/lower value
@@ -422,6 +436,7 @@
final double[] values = FieldCache.DEFAULT.getDoubles(reader, field, (FieldCache.DoubleParser) parser);
// we only request the usage of termDocs, if the range contains 0
return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0)) {
+ @Override
boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
}
@@ -430,6 +445,7 @@
};
}
+ @Override
public final String toString() {
final StringBuilder sb = new StringBuilder(field).append(":");
return sb.append(includeLower ? '[' : '{')
@@ -440,6 +456,7 @@
.toString();
}
+ @Override
public final boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof FieldCacheRangeFilter)) return false;
@@ -455,6 +472,7 @@
return true;
}
+ @Override
public final int hashCode() {
int h = field.hashCode();
h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204;
@@ -516,10 +534,12 @@
return new DocIdSetIterator() {
private int doc = -1;
+ @Override
public int docID() {
return doc;
}
+ @Override
public int nextDoc() throws IOException {
do {
if (!termDocs.next())
@@ -528,6 +548,7 @@
return doc;
}
+ @Override
public int advance(int target) throws IOException {
if (!termDocs.skipTo(target))
return doc = NO_MORE_DOCS;
@@ -544,10 +565,12 @@
return new DocIdSetIterator() {
private int doc = -1;
+ @Override
public int docID() {
return doc;
}
+ @Override
public int nextDoc() {
try {
do {
@@ -559,6 +582,7 @@
}
}
+ @Override
public int advance(int target) {
try {
doc = target;
Index: src/java/org/apache/lucene/search/FieldCacheTermsFilter.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheTermsFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldCacheTermsFilter.java (working copy)
@@ -106,6 +106,7 @@
return FieldCache.DEFAULT;
}
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
return new FieldCacheTermsFilterDocIdSet(getFieldCache().getStringIndex(reader, field));
}
@@ -140,10 +141,12 @@
protected class FieldCacheTermsFilterDocIdSetIterator extends DocIdSetIterator {
private int doc = -1;
+ @Override
public int docID() {
return doc;
}
+ @Override
public int nextDoc() {
try {
while (!openBitSet.fastGet(fcsi.order[++doc])) {}
@@ -153,6 +156,7 @@
return doc;
}
+ @Override
public int advance(int target) {
try {
doc = target;
Index: src/java/org/apache/lucene/search/FieldComparator.java
===================================================================
--- src/java/org/apache/lucene/search/FieldComparator.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldComparator.java (working copy)
@@ -178,26 +178,32 @@
this.parser = (ByteParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2];
}
+ @Override
public int compareBottom(int doc) {
return bottom - currentReaderValues[doc];
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getBytes(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Byte.valueOf(values[slot]);
}
@@ -213,20 +219,24 @@
docIDs = new int[numHits];
}
+ @Override
public int compare(int slot1, int slot2) {
// No overflow risk because docIDs are non-negative
return docIDs[slot1] - docIDs[slot2];
}
+ @Override
public int compareBottom(int doc) {
// No overflow risk because docIDs are non-negative
return bottom - (docBase + doc);
}
+ @Override
public void copy(int slot, int doc) {
docIDs[slot] = docBase + doc;
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) {
// TODO: can we "map" our docIDs to the current
// reader? saves having to then subtract on every
@@ -234,10 +244,12 @@
this.docBase = docBase;
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = docIDs[bottom];
}
+ @Override
public Comparable value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
@@ -258,6 +270,7 @@
this.parser = (DoubleParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
final double v1 = values[slot1];
final double v2 = values[slot2];
@@ -270,6 +283,7 @@
}
}
+ @Override
public int compareBottom(int doc) {
final double v2 = currentReaderValues[doc];
if (bottom > v2) {
@@ -281,18 +295,22 @@
}
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getDoubles(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Double.valueOf(values[slot]);
}
@@ -313,6 +331,7 @@
this.parser = (FloatParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
// TODO: are there sneaky non-branch ways to compute
// sign of float?
@@ -327,6 +346,7 @@
}
}
+ @Override
public int compareBottom(int doc) {
// TODO: are there sneaky non-branch ways to compute
// sign of float?
@@ -340,18 +360,22 @@
}
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getFloats(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Float.valueOf(values[slot]);
}
@@ -372,6 +396,7 @@
this.parser = (IntParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
@@ -388,6 +413,7 @@
}
}
+ @Override
public int compareBottom(int doc) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
@@ -403,18 +429,22 @@
}
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getInts(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Integer.valueOf(values[slot]);
}
@@ -435,6 +465,7 @@
this.parser = (LongParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
@@ -449,6 +480,7 @@
}
}
+ @Override
public int compareBottom(int doc) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
@@ -462,18 +494,22 @@
}
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getLongs(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Long.valueOf(values[slot]);
}
@@ -494,34 +530,41 @@
scores = new float[numHits];
}
+ @Override
public int compare(int slot1, int slot2) {
final float score1 = scores[slot1];
final float score2 = scores[slot2];
return score1 > score2 ? -1 : (score1 < score2 ? 1 : 0);
}
+ @Override
public int compareBottom(int doc) throws IOException {
float score = scorer.score();
return bottom > score ? -1 : (bottom < score ? 1 : 0);
}
+ @Override
public void copy(int slot, int doc) throws IOException {
scores[slot] = scorer.score();
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) {
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = scores[bottom];
}
+ @Override
public void setScorer(Scorer scorer) {
// wrap with a ScoreCachingWrappingScorer so that successive calls to
// score() will not incur score computation over and over again.
this.scorer = new ScoreCachingWrappingScorer(scorer);
}
+ @Override
public Comparable value(int slot) {
return Float.valueOf(scores[slot]);
}
@@ -542,26 +585,32 @@
this.parser = (ShortParser) parser;
}
+ @Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2];
}
+ @Override
public int compareBottom(int doc) {
return bottom - currentReaderValues[doc];
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getShorts(reader, field, parser);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return Short.valueOf(values[slot]);
}
@@ -583,6 +632,7 @@
collator = Collator.getInstance(locale);
}
+ @Override
public int compare(int slot1, int slot2) {
final String val1 = values[slot1];
final String val2 = values[slot2];
@@ -597,6 +647,7 @@
return collator.compare(val1, val2);
}
+ @Override
public int compareBottom(int doc) {
final String val2 = currentReaderValues[doc];
if (bottom == null) {
@@ -610,18 +661,22 @@
return collator.compare(bottom, val2);
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getStrings(reader, field);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return values[slot];
}
@@ -662,6 +717,7 @@
this.field = field;
}
+ @Override
public int compare(int slot1, int slot2) {
if (readerGen[slot1] == readerGen[slot2]) {
int cmp = ords[slot1] - ords[slot2];
@@ -683,6 +739,7 @@
return val1.compareTo(val2);
}
+ @Override
public int compareBottom(int doc) {
assert bottomSlot != -1;
int order = this.order[doc];
@@ -734,6 +791,7 @@
ords[slot] = index;
}
+ @Override
public void copy(int slot, int doc) {
final int ord = order[doc];
ords[slot] = ord;
@@ -742,6 +800,7 @@
readerGen[slot] = currentReaderGen;
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
StringIndex currentReaderValues = FieldCache.DEFAULT.getStringIndex(reader, field);
currentReaderGen++;
@@ -754,6 +813,7 @@
}
}
+ @Override
public void setBottom(final int bottom) {
bottomSlot = bottom;
if (readerGen[bottom] != currentReaderGen) {
@@ -765,6 +825,7 @@
bottomValue = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return values[slot];
}
@@ -798,6 +859,7 @@
this.field = field;
}
+ @Override
public int compare(int slot1, int slot2) {
final String val1 = values[slot1];
final String val2 = values[slot2];
@@ -813,6 +875,7 @@
return val1.compareTo(val2);
}
+ @Override
public int compareBottom(int doc) {
final String val2 = currentReaderValues[doc];
if (bottom == null) {
@@ -826,18 +889,22 @@
return bottom.compareTo(val2);
}
+ @Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getStrings(reader, field);
}
+ @Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
+ @Override
public Comparable value(int slot) {
return values[slot];
}
Index: src/java/org/apache/lucene/search/FieldDoc.java
===================================================================
--- src/java/org/apache/lucene/search/FieldDoc.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldDoc.java (working copy)
@@ -38,37 +38,38 @@
*/
public class FieldDoc extends ScoreDoc {
- /** Expert: The values which are used to sort the referenced document.
- * The order of these will match the original sort criteria given by a
- * Sort object. Each Object will be either an Integer, Float or String,
- * depending on the type of values in the terms of the original field.
- * @see Sort
- * @see Searcher#search(Query,Filter,int,Sort)
- */
- public Comparable[] fields;
+ /** Expert: The values which are used to sort the referenced document.
+ * The order of these will match the original sort criteria given by a
+ * Sort object. Each Object will be either an Integer, Float or String,
+ * depending on the type of values in the terms of the original field.
+ * @see Sort
+ * @see Searcher#search(Query,Filter,int,Sort)
+ */
+ public Comparable[] fields;
- /** Expert: Creates one of these objects with empty sort information. */
- public FieldDoc (int doc, float score) {
- super (doc, score);
- }
+ /** Expert: Creates one of these objects with empty sort information. */
+ public FieldDoc (int doc, float score) {
+ super (doc, score);
+ }
- /** Expert: Creates one of these objects with the given sort information. */
- public FieldDoc (int doc, float score, Comparable[] fields) {
- super (doc, score);
- this.fields = fields;
- }
-
- // A convenience method for debugging.
- public String toString() {
- // super.toString returns the doc and score information, so just add the
+ /** Expert: Creates one of these objects with the given sort information. */
+ public FieldDoc (int doc, float score, Comparable[] fields) {
+ super (doc, score);
+ this.fields = fields;
+ }
+
+ // A convenience method for debugging.
+ @Override
+ public String toString() {
+ // super.toString returns the doc and score information, so just add the
// fields information
- StringBuilder sb = new StringBuilder(super.toString());
- sb.append("[");
- for (int i = 0; i < fields.length; i++) {
+ StringBuilder sb = new StringBuilder(super.toString());
+ sb.append("[");
+ for (int i = 0; i < fields.length; i++) {
sb.append(fields[i]).append(", ");
}
- sb.setLength(sb.length() - 2); // discard last ", "
- sb.append("]");
- return super.toString();
- }
+ sb.setLength(sb.length() - 2); // discard last ", "
+ sb.append("]");
+ return super.toString();
+ }
}
Index: src/java/org/apache/lucene/search/FieldValueHitQueue.java
===================================================================
--- src/java/org/apache/lucene/search/FieldValueHitQueue.java (revision 830378)
+++ src/java/org/apache/lucene/search/FieldValueHitQueue.java (working copy)
@@ -47,6 +47,7 @@
this.score = score;
}
+ @Override
public String toString() {
return "slot:" + slot + " docID:" + docID + " score=" + score;
}
@@ -84,6 +85,7 @@
* @param b ScoreDoc
* @return <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
*/
+ @Override
protected boolean lessThan(final Entry hitA, final Entry hitB) {
assert hitA != hitB;
@@ -121,6 +123,7 @@
initialize(size);
}
+ @Override
protected boolean lessThan(final Entry hitA, final Entry hitB) {
assert hitA != hitB;
@@ -190,6 +193,7 @@
protected final FieldComparator[] comparators;
protected final int[] reverseMul;
+ @Override
protected abstract boolean lessThan (final Entry a, final Entry b);
/**
Index: src/java/org/apache/lucene/search/FilteredDocIdSet.java
===================================================================
--- src/java/org/apache/lucene/search/FilteredDocIdSet.java (revision 830378)
+++ src/java/org/apache/lucene/search/FilteredDocIdSet.java (working copy)
@@ -70,6 +70,7 @@
@Override
public DocIdSetIterator iterator() throws IOException {
return new FilteredDocIdSetIterator(_innerSet.iterator()) {
+ @Override
protected boolean match(int docid) throws IOException {
return FilteredDocIdSet.this.match(docid);
}
Index: src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
===================================================================
--- src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (revision 830378)
+++ src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (working copy)
@@ -49,10 +49,12 @@
*/
abstract protected boolean match(int doc) throws IOException;
+ @Override
public int docID() {
return doc;
}
+ @Override
public int nextDoc() throws IOException {
while ((doc = _innerIter.nextDoc()) != NO_MORE_DOCS) {
if (match(doc)) {
@@ -62,6 +64,7 @@
return doc;
}
+ @Override
public int advance(int target) throws IOException {
doc = _innerIter.advance(target);
if (doc != NO_MORE_DOCS) {
Index: src/java/org/apache/lucene/search/FilteredQuery.java
===================================================================
--- src/java/org/apache/lucene/search/FilteredQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/FilteredQuery.java (working copy)
@@ -58,6 +58,7 @@
* Returns a Weight that applies the filter to the enclosed query's Weight.
* This is accomplished by overriding the Scorer returned by the Weight.
*/
+ @Override
public Weight createWeight(final Searcher searcher) throws IOException {
final Weight weight = query.createWeight (searcher);
final Similarity similarity = query.getSimilarity(searcher);
Index: src/java/org/apache/lucene/search/FilteredTermEnum.java
===================================================================
--- src/java/org/apache/lucene/search/FilteredTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/search/FilteredTermEnum.java (working copy)
@@ -60,6 +60,7 @@
* Returns the docFreq of the current Term in the enumeration.
* Returns -1 if no Term matches or all terms have been enumerated.
*/
+ @Override
public int docFreq() {
if (currentTerm == null) return -1;
assert actualEnum != null;
@@ -67,6 +68,7 @@
}
/** Increments the enumeration to the next element. True if one exists. */
+ @Override
public boolean next() throws IOException {
if (actualEnum == null) return false; // the actual enumerator is not initialized!
currentTerm = null;
@@ -87,11 +89,13 @@
/** Returns the current Term in the enumeration.
* Returns null if no Term matches or all terms have been enumerated. */
+ @Override
public Term term() {
return currentTerm;
}
/** Closes the enumeration to further activity, freeing resources. */
+ @Override
public void close() throws IOException {
if (actualEnum != null) actualEnum.close();
currentTerm = null;
Index: src/java/org/apache/lucene/search/function/ByteFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ByteFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/ByteFieldSource.java (working copy)
@@ -66,27 +66,33 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "byte(" + super.description() + ')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException {
final byte[] arr = cache.getBytes(reader, field, parser);
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return (float) arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
+ @Override
public int intVal(int doc) {
return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -94,6 +100,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */
+ @Override
public boolean cachedFieldSourceEquals(FieldCacheSource o) {
if (o.getClass() != ByteFieldSource.class) {
return false;
@@ -105,6 +112,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */
+ @Override
public int cachedFieldSourceHashCode() {
return parser==null ?
Byte.class.hashCode() : parser.getClass().hashCode();
Index: src/java/org/apache/lucene/search/function/FieldCacheSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/FieldCacheSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/FieldCacheSource.java (working copy)
@@ -60,11 +60,13 @@
}
/* (non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */
+ @Override
public final DocValues getValues(IndexReader reader) throws IOException {
return getCachedFieldValues(FieldCache.DEFAULT, field, reader);
}
/* (non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return field;
}
@@ -78,6 +80,7 @@
public abstract DocValues getCachedFieldValues(FieldCache cache, String field, IndexReader reader) throws IOException;
/*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+ @Override
public final boolean equals(Object o) {
if (!(o instanceof FieldCacheSource)) {
return false;
@@ -89,6 +92,7 @@
}
/*(non-Javadoc) @see java.lang.Object#hashCode() */
+ @Override
public final int hashCode() {
return
field.hashCode() +
Index: src/java/org/apache/lucene/search/function/FieldScoreQuery.java
===================================================================
--- src/java/org/apache/lucene/search/function/FieldScoreQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/FieldScoreQuery.java (working copy)
@@ -90,6 +90,7 @@
this.typeName = name;
}
/*(non-Javadoc) @see java.lang.Object#toString() */
+ @Override
public String toString() {
return getClass().getName()+"::"+typeName;
}
Index: src/java/org/apache/lucene/search/function/FloatFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/FloatFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/FloatFieldSource.java (working copy)
@@ -66,23 +66,28 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "float(" + super.description() + ')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException {
final float[] arr = cache.getFloats(reader, field, parser);
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -90,6 +95,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */
+ @Override
public boolean cachedFieldSourceEquals(FieldCacheSource o) {
if (o.getClass() != FloatFieldSource.class) {
return false;
@@ -101,6 +107,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */
+ @Override
public int cachedFieldSourceHashCode() {
return parser==null ?
Float.class.hashCode() : parser.getClass().hashCode();
Index: src/java/org/apache/lucene/search/function/IntFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/IntFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/IntFieldSource.java (working copy)
@@ -66,27 +66,33 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "int(" + super.description() + ')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException {
final int[] arr = cache.getInts(reader, field, parser);
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return (float) arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
+ @Override
public int intVal(int doc) {
return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -94,6 +100,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */
+ @Override
public boolean cachedFieldSourceEquals(FieldCacheSource o) {
if (o.getClass() != IntFieldSource.class) {
return false;
@@ -105,6 +112,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */
+ @Override
public int cachedFieldSourceHashCode() {
return parser==null ?
Integer.class.hashCode() : parser.getClass().hashCode();
Index: src/java/org/apache/lucene/search/function/MultiValueSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/MultiValueSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/MultiValueSource.java (working copy)
@@ -42,6 +42,7 @@
this.other = other;
}
+ @Override
public DocValues getValues(IndexReader reader) throws IOException {
IndexReader[] subReaders = reader.getSequentialSubReaders();
@@ -54,10 +55,12 @@
}
}
+ @Override
public String description() {
return other.description();
}
+ @Override
public boolean equals(Object o) {
if (o instanceof MultiValueSource) {
return ((MultiValueSource) o).other.equals(other);
@@ -66,6 +69,7 @@
}
}
+ @Override
public int hashCode() {
return 31 * other.hashCode();
}
@@ -86,36 +90,43 @@
}
}
+ @Override
public float floatVal(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].floatVal(doc-docStarts[n]);
}
+ @Override
public int intVal(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].intVal(doc-docStarts[n]);
}
+ @Override
public long longVal(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].longVal(doc-docStarts[n]);
}
+ @Override
public double doubleVal(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].doubleVal(doc-docStarts[n]);
}
+ @Override
public String strVal(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].strVal(doc-docStarts[n]);
}
+ @Override
public String toString(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].toString(doc-docStarts[n]);
}
+ @Override
public Explanation explain(int doc) {
final int n = ReaderUtil.subIndex(doc, docStarts);
return docValues[n].explain(doc-docStarts[n]);
Index: src/java/org/apache/lucene/search/function/OrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/OrdFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/OrdFieldSource.java (working copy)
@@ -67,28 +67,34 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "ord(" + field + ')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getValues(IndexReader reader) throws IOException {
final int[] arr = FieldCache.DEFAULT.getStringIndex(reader, field).order;
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return (float)arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */
+ @Override
public String strVal(int doc) {
// the string value of the ordinal, not the string itself
return Integer.toString(arr[doc]);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -96,6 +102,7 @@
}
/*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+ @Override
public boolean equals(Object o) {
if (o.getClass() != OrdFieldSource.class) return false;
OrdFieldSource other = (OrdFieldSource)o;
@@ -105,6 +112,7 @@
private static final int hcode = OrdFieldSource.class.hashCode();
/*(non-Javadoc) @see java.lang.Object#hashCode() */
+ @Override
public int hashCode() {
return hcode + field.hashCode();
}
Index: src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java (working copy)
@@ -68,11 +68,13 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "rord("+field+')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getValues(IndexReader reader) throws IOException {
final FieldCache.StringIndex sindex = FieldCache.DEFAULT.getStringIndex(reader, field);
@@ -81,23 +83,28 @@
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return (float)(end - arr[doc]);
}
/* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
+ @Override
public int intVal(int doc) {
return end - arr[doc];
}
/* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */
+ @Override
public String strVal(int doc) {
// the string value of the ordinal, not the string itself
return Integer.toString(intVal(doc));
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + strVal(doc);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -105,6 +112,7 @@
}
/*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+ @Override
public boolean equals(Object o) {
if (o.getClass() != ReverseOrdFieldSource.class) return false;
ReverseOrdFieldSource other = (ReverseOrdFieldSource)o;
@@ -114,6 +122,7 @@
private static final int hcode = ReverseOrdFieldSource.class.hashCode();
/*(non-Javadoc) @see java.lang.Object#hashCode() */
+ @Override
public int hashCode() {
return hcode + field.hashCode();
}
Index: src/java/org/apache/lucene/search/function/ShortFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ShortFieldSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/ShortFieldSource.java (working copy)
@@ -66,27 +66,33 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */
+ @Override
public String description() {
return "short(" + super.description() + ')';
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */
+ @Override
public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException {
final short[] arr = cache.getShorts(reader, field, parser);
return new DocValues() {
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
+ @Override
public float floatVal(int doc) {
return (float) arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
+ @Override
public int intVal(int doc) {
return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */
+ @Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */
+ @Override
Object getInnerArray() {
return arr;
}
@@ -94,6 +100,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */
+ @Override
public boolean cachedFieldSourceEquals(FieldCacheSource o) {
if (o.getClass() != ShortFieldSource.class) {
return false;
@@ -105,6 +112,7 @@
}
/*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */
+ @Override
public int cachedFieldSourceHashCode() {
return parser==null ?
Short.class.hashCode() : parser.getClass().hashCode();
Index: src/java/org/apache/lucene/search/function/ValueSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ValueSource.java (revision 830378)
+++ src/java/org/apache/lucene/search/function/ValueSource.java (working copy)
@@ -54,6 +54,7 @@
public abstract String description();
/* (non-Javadoc) @see java.lang.Object#toString() */
+ @Override
public String toString() {
return description();
}
@@ -62,12 +63,14 @@
* Needed for possible caching of query results - used by {@link ValueSourceQuery#equals(Object)}.
* @see Object#equals(Object)
*/
+ @Override
public abstract boolean equals(Object o);
/**
* Needed for possible caching of query results - used by {@link ValueSourceQuery#hashCode()}.
* @see Object#hashCode()
*/
+ @Override
public abstract int hashCode();
}
Index: src/java/org/apache/lucene/search/FuzzyQuery.java
===================================================================
--- src/java/org/apache/lucene/search/FuzzyQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/FuzzyQuery.java (working copy)
@@ -109,6 +109,7 @@
return prefixLength;
}
+ @Override
protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
return new FuzzyTermEnum(reader, getTerm(), minimumSimilarity, prefixLength);
}
@@ -120,6 +121,7 @@
return term;
}
+ @Override
public void setRewriteMethod(RewriteMethod method) {
throw new UnsupportedOperationException("FuzzyQuery cannot change rewrite method");
}
@@ -172,6 +174,7 @@
return query;
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
Index: src/java/org/apache/lucene/search/FuzzyTermEnum.java
===================================================================
--- src/java/org/apache/lucene/search/FuzzyTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/search/FuzzyTermEnum.java (working copy)
@@ -125,6 +125,7 @@
* The termCompare method in FuzzyTermEnum uses Levenshtein distance to
* calculate the distance between the given term and the comparing term.
*/
+ @Override
protected final boolean termCompare(Term term) {
if (field == term.field() && term.text().startsWith(prefix)) {
final String target = term.text().substring(prefix.length());
@@ -136,11 +137,13 @@
}
/** {@inheritDoc} */
+ @Override
public final float difference() {
return (similarity - minimumSimilarity) * scale_factor;
}
/** {@inheritDoc} */
+ @Override
public final boolean endEnum() {
return endEnum;
}
@@ -271,6 +274,7 @@
}
/** {@inheritDoc} */
+ @Override
public void close() throws IOException {
p = d = null;
searchTerm = null;
Index: src/java/org/apache/lucene/search/HitQueue.java
===================================================================
--- src/java/org/apache/lucene/search/HitQueue.java (revision 830378)
+++ src/java/org/apache/lucene/search/HitQueue.java (working copy)
@@ -68,6 +68,7 @@
}
// Returns null if prePopulate is false.
+ @Override
protected ScoreDoc getSentinelObject() {
// Always set the doc Id to MAX_VALUE so that it won't be favored by
// lessThan. This generally should not happen since if score is not NEG_INF,
@@ -75,6 +76,7 @@
return !prePopulate ? null : new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
}
+ @Override
protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) {
if (hitA.score == hitB.score)
return hitA.doc > hitB.doc;
Index: src/java/org/apache/lucene/search/IndexSearcher.java
===================================================================
--- src/java/org/apache/lucene/search/IndexSearcher.java (revision 830378)
+++ src/java/org/apache/lucene/search/IndexSearcher.java (working copy)
@@ -116,17 +116,20 @@
* If the IndexReader was supplied implicitly by specifying a directory, then
* the IndexReader gets closed.
*/
+ @Override
public void close() throws IOException {
if(closeReader)
reader.close();
}
// inherit javadoc
+ @Override
public int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
// inherit javadoc
+ @Override
public Document doc(int i) throws CorruptIndexException, IOException {
return reader.document(i);
}
@@ -137,11 +140,13 @@
}
// inherit javadoc
+ @Override
public int maxDoc() throws IOException {
return reader.maxDoc();
}
// inherit javadoc
+ @Override
public TopDocs search(Weight weight, Filter filter, final int nDocs) throws IOException {
if (nDocs <= 0) {
@@ -153,6 +158,7 @@
return collector.topDocs();
}
+ @Override
public TopFieldDocs search(Weight weight, Filter filter,
final int nDocs, Sort sort) throws IOException {
return search(weight, filter, nDocs, sort, true);
@@ -186,6 +192,7 @@
return (TopFieldDocs) collector.topDocs();
}
+ @Override
public void search(Weight weight, Filter filter, Collector collector)
throws IOException {
@@ -251,6 +258,7 @@
}
}
+ @Override
public Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
@@ -260,6 +268,7 @@
return query;
}
+ @Override
public Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, docStarts);
int deBasedDoc = doc - docStarts[n];
Index: src/java/org/apache/lucene/search/MultiSearcher.java
===================================================================
--- src/java/org/apache/lucene/search/MultiSearcher.java (revision 830378)
+++ src/java/org/apache/lucene/search/MultiSearcher.java (working copy)
@@ -52,6 +52,7 @@
setSimilarity(similarity);
}
+ @Override
public int docFreq(Term term) {
int df;
try {
@@ -63,6 +64,7 @@
return df;
}
+ @Override
public int[] docFreqs(Term[] terms) {
int[] result = new int[terms.length];
for (int i = 0; i < terms.length; i++) {
@@ -71,10 +73,12 @@
return result;
}
+ @Override
public int maxDoc() {
return maxDoc;
}
+ @Override
public Query rewrite(Query query) {
// this is a bit of a hack. We know that a query which
// creates a Weight based on this Dummy-Searcher is
@@ -83,10 +87,12 @@
return query;
}
+ @Override
public void close() {
throw new UnsupportedOperationException();
}
+ @Override
public Document doc(int i) {
throw new UnsupportedOperationException();
}
@@ -95,18 +101,22 @@
throw new UnsupportedOperationException();
}
+ @Override
public Explanation explain(Weight weight,int doc) {
throw new UnsupportedOperationException();
}
+ @Override
public void search(Weight weight, Filter filter, Collector results) {
throw new UnsupportedOperationException();
}
+ @Override
public TopDocs search(Weight weight,Filter filter,int n) {
throw new UnsupportedOperationException();
}
+ @Override
public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
throw new UnsupportedOperationException();
}
@@ -138,11 +148,13 @@
}
// inherit javadoc
+ @Override
public void close() throws IOException {
for (int i = 0; i < searchables.length; i++)
searchables[i].close();
}
+ @Override
public int docFreq(Term term) throws IOException {
int docFreq = 0;
for (int i = 0; i < searchables.length; i++)
@@ -151,6 +163,7 @@
}
// inherit javadoc
+ @Override
public Document doc(int n) throws CorruptIndexException, IOException {
int i = subSearcher(n); // find searcher index
return searchables[i].doc(n - starts[i]); // dispatch to searcher
@@ -174,10 +187,12 @@
return n - starts[subSearcher(n)];
}
+ @Override
public int maxDoc() throws IOException {
return maxDoc;
}
+ @Override
public TopDocs search(Weight weight, Filter filter, int nDocs)
throws IOException {
@@ -205,6 +220,7 @@
return new TopDocs(totalHits, scoreDocs, maxScore);
}
+ @Override
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort)
throws IOException {
FieldDocSortedHitQueue hq = null;
@@ -247,6 +263,7 @@
}
// inherit javadoc
+ @Override
public void search(Weight weight, Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
@@ -254,15 +271,19 @@
final int start = starts[i];
final Collector hc = new Collector() {
+ @Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
+ @Override
public void collect(int doc) throws IOException {
collector.collect(doc);
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
@@ -272,6 +293,7 @@
}
}
+ @Override
public Query rewrite(Query original) throws IOException {
Query[] queries = new Query[searchables.length];
for (int i = 0; i < searchables.length; i++) {
@@ -280,6 +302,7 @@
return queries[0].combine(queries);
}
+ @Override
public Explanation explain(Weight weight, int doc) throws IOException {
int i = subSearcher(doc); // find searcher index
return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
@@ -300,6 +323,7 @@
*
* @return rewritten queries
*/
+ @Override
protected Weight createWeight(Query original) throws IOException {
// step 1
Query rewrittenQuery = rewrite(original);
Index: src/java/org/apache/lucene/search/MultiTermQuery.java
===================================================================
--- src/java/org/apache/lucene/search/MultiTermQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/MultiTermQuery.java (working copy)
@@ -67,6 +67,7 @@
}
private static final class ConstantScoreFilterRewrite extends RewriteMethod implements Serializable {
+ @Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<MultiTermQuery>(query));
result.setBoost(query.getBoost());
@@ -94,6 +95,7 @@
public final static RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new ConstantScoreFilterRewrite();
private static class ScoringBooleanQueryRewrite extends RewriteMethod implements Serializable {
+ @Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
FilteredTermEnum enumerator = query.getEnum(reader);
@@ -138,6 +140,7 @@
public final static RewriteMethod SCORING_BOOLEAN_QUERY_REWRITE = new ScoringBooleanQueryRewrite();
private static class ConstantScoreBooleanQueryRewrite extends ScoringBooleanQueryRewrite implements Serializable {
+ @Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
// strip the scores off
Query result = new ConstantScoreQuery(new QueryWrapperFilter(super.rewrite(reader, query)));
@@ -146,6 +149,7 @@
}
// Make sure we are still a singleton even after deserializing
+ @Override
protected Object readResolve() {
return CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
}
@@ -212,6 +216,7 @@
return docCountPercent;
}
+ @Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
// Get the enum and start visiting terms. If we
// exhaust the enum before hitting either of the
@@ -361,6 +366,7 @@
numberOfTerms += inc;
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
return rewriteMethod.rewrite(reader, this);
}
Index: src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
===================================================================
--- src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (working copy)
@@ -142,6 +142,7 @@
// else fill into a OpenBitSet
final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc());
new TermGenerator() {
+ @Override
public void handleDoc(int doc) {
bitSet.set(doc);
}
Index: src/java/org/apache/lucene/search/ParallelMultiSearcher.java
===================================================================
--- src/java/org/apache/lucene/search/ParallelMultiSearcher.java (revision 830378)
+++ src/java/org/apache/lucene/search/ParallelMultiSearcher.java (working copy)
@@ -32,7 +32,7 @@
private Searchable[] searchables;
private int[] starts;
-
+
/** Creates a searchable which searches <i>searchables</i>. */
public ParallelMultiSearcher(Searchable... searchables) throws IOException {
super(searchables);
@@ -43,6 +43,7 @@
/**
* TODO: parallelize this one too
*/
+ @Override
public int docFreq(Term term) throws IOException {
return super.docFreq(term);
}
@@ -52,6 +53,7 @@
* Searchable, waits for each search to complete and merge
* the results back together.
*/
+ @Override
public TopDocs search(Weight weight, Filter filter, int nDocs)
throws IOException {
HitQueue hq = new HitQueue(nDocs, false);
@@ -97,6 +99,7 @@
* Searchable, waits for each search to complete and merges
* the results back together.
*/
+ @Override
public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort)
throws IOException {
// don't specify the fields - we'll wait to do this until we get results
@@ -153,6 +156,7 @@
*
* TODO: parallelize this one too
*/
+ @Override
public void search(Weight weight, Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
@@ -160,15 +164,22 @@
final int start = starts[i];
final Collector hc = new Collector() {
+ @Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
+
+ @Override
public void collect(int doc) throws IOException {
collector.collect(doc);
}
+
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
+
+ @Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
@@ -176,12 +187,13 @@
searchables[i].search(weight, filter, hc);
}
- }
+ }
/*
* TODO: this one could be parallelized too
* @see org.apache.lucene.search.Searchable#rewrite(org.apache.lucene.search.Query)
*/
+ @Override
public Query rewrite(Query original) throws IOException {
return super.rewrite(original);
}
@@ -230,6 +242,7 @@
this.sort = sort;
}
+ @Override
@SuppressWarnings ("unchecked")
public void run() {
try {
Index: src/java/org/apache/lucene/search/payloads/AveragePayloadFunction.java
===================================================================
--- src/java/org/apache/lucene/search/payloads/AveragePayloadFunction.java (revision 830378)
+++ src/java/org/apache/lucene/search/payloads/AveragePayloadFunction.java (working copy)
@@ -26,14 +26,17 @@
**/
public class AveragePayloadFunction extends PayloadFunction{
+ @Override
public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) {
return currentPayloadScore + currentScore;
}
+ @Override
public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) {
return numPayloadsSeen > 0 ? (payloadScore / numPayloadsSeen) : 1;
}
+ @Override
public int hashCode() {
final int prime = 31;
int result = 1;
@@ -41,6 +44,7 @@
return result;
}
+ @Override
public boolean equals(Object obj) {
if (this == obj)
return true;
Index: src/java/org/apache/lucene/search/payloads/MaxPayloadFunction.java
===================================================================
--- src/java/org/apache/lucene/search/payloads/MaxPayloadFunction.java (revision 830378)
+++ src/java/org/apache/lucene/search/payloads/MaxPayloadFunction.java (working copy)
@@ -25,14 +25,17 @@
*
**/
public class MaxPayloadFunction extends PayloadFunction {
+ @Override
public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) {
return Math.max(currentPayloadScore, currentScore);
}
+ @Override
public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) {
return numPayloadsSeen > 0 ? payloadScore : 1;
}
+ @Override
public int hashCode() {
final int prime = 31;
int result = 1;
@@ -40,6 +43,7 @@
return result;
}
+ @Override
public boolean equals(Object obj) {
if (this == obj)
return true;
Index: src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java
===================================================================
--- src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java (revision 830378)
+++ src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java (working copy)
@@ -23,14 +23,17 @@
**/
public class MinPayloadFunction extends PayloadFunction {
+ @Override
public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) {
return Math.min(currentPayloadScore, currentScore);
}
+ @Override
public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) {
return numPayloadsSeen > 0 ? payloadScore : 1;
}
+ @Override
public int hashCode() {
final int prime = 31;
int result = 1;
@@ -38,6 +41,7 @@
return result;
}
+ @Override
public boolean equals(Object obj) {
if (this == obj)
return true;
Index: src/java/org/apache/lucene/search/payloads/PayloadFunction.java
===================================================================
--- src/java/org/apache/lucene/search/payloads/PayloadFunction.java (revision 830378)
+++ src/java/org/apache/lucene/search/payloads/PayloadFunction.java (working copy)
@@ -56,8 +56,10 @@
*/
public abstract float docScore(int docId, String field, int numPayloadsSeen, float payloadScore);
+ @Override
public abstract int hashCode();
+ @Override
public abstract boolean equals(Object o);
}
Index: src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
===================================================================
--- src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (working copy)
@@ -203,6 +203,7 @@
}
//
+ @Override
protected boolean setFreqCurrentDoc() throws IOException {
if (!more) {
return false;
Index: src/java/org/apache/lucene/search/PhraseQueue.java
===================================================================
--- src/java/org/apache/lucene/search/PhraseQueue.java (revision 830378)
+++ src/java/org/apache/lucene/search/PhraseQueue.java (working copy)
@@ -24,6 +24,7 @@
initialize(size);
}
+ @Override
protected final boolean lessThan(PhrasePositions pp1, PhrasePositions pp2) {
if (pp1.doc == pp2.doc)
if (pp1.position == pp2.position)
Index: src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java
===================================================================
--- src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java (revision 830378)
+++ src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java (working copy)
@@ -35,16 +35,19 @@
this.c = c;
}
+ @Override
public void collect(int doc) throws IOException {
if (scorer.score() > 0) {
c.collect(doc);
}
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
c.setNextReader(reader, docBase);
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
// Set a ScoreCachingWrappingScorer in case the wrapped Collector will call
// score() also.
@@ -52,6 +55,7 @@
c.setScorer(this.scorer);
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return c.acceptsDocsOutOfOrder();
}
Index: src/java/org/apache/lucene/search/PrefixFilter.java
===================================================================
--- src/java/org/apache/lucene/search/PrefixFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/PrefixFilter.java (working copy)
@@ -32,6 +32,7 @@
public Term getPrefix() { return query.getPrefix(); }
/** Prints a user-readable version of this query. */
+ @Override
public String toString () {
StringBuilder buffer = new StringBuilder();
buffer.append("PrefixFilter(");
Index: src/java/org/apache/lucene/search/PrefixQuery.java
===================================================================
--- src/java/org/apache/lucene/search/PrefixQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/PrefixQuery.java (working copy)
@@ -40,6 +40,7 @@
/** Returns the prefix of this query. */
public Term getPrefix() { return prefix; }
+ @Override
protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
return new PrefixTermEnum(reader, prefix);
}
Index: src/java/org/apache/lucene/search/PrefixTermEnum.java
===================================================================
--- src/java/org/apache/lucene/search/PrefixTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/search/PrefixTermEnum.java (working copy)
@@ -41,10 +41,12 @@
setEnum(reader.terms(new Term(prefix.field(), prefix.text())));
}
+ @Override
public float difference() {
return 1.0f;
}
+ @Override
protected boolean endEnum() {
return endEnum;
}
@@ -53,6 +55,7 @@
return prefix;
}
+ @Override
protected boolean termCompare(Term term) {
if (term.field() == prefix.field() && term.text().startsWith(prefix.text())) {
return true;
Index: src/java/org/apache/lucene/search/Query.java
===================================================================
--- src/java/org/apache/lucene/search/Query.java (revision 830378)
+++ src/java/org/apache/lucene/search/Query.java (working copy)
@@ -78,6 +78,7 @@
public abstract String toString(String field);
/** Prints a query to a string. */
+ @Override
public String toString() {
return toString("");
}
@@ -204,6 +205,7 @@
}
/** Returns a clone of this query. */
+ @Override
public Object clone() {
try {
return super.clone();
@@ -212,6 +214,7 @@
}
}
+ @Override
public int hashCode() {
final int prime = 31;
int result = 1;
@@ -219,6 +222,7 @@
return result;
}
+ @Override
public boolean equals(Object obj) {
if (this == obj)
return true;
Index: src/java/org/apache/lucene/search/QueryTermVector.java
===================================================================
--- src/java/org/apache/lucene/search/QueryTermVector.java (revision 830378)
+++ src/java/org/apache/lucene/search/QueryTermVector.java (working copy)
@@ -106,6 +106,7 @@
}
}
+ @Override
public final String toString() {
StringBuilder sb = new StringBuilder();
sb.append('{');
Index: src/java/org/apache/lucene/search/ScoreDoc.java
===================================================================
--- src/java/org/apache/lucene/search/ScoreDoc.java (revision 830378)
+++ src/java/org/apache/lucene/search/ScoreDoc.java (working copy)
@@ -35,6 +35,7 @@
}
// A convenience method for debugging.
+ @Override
public String toString() {
return "doc=" + doc + " score=" + score;
}
Index: src/java/org/apache/lucene/search/SimilarityDelegator.java
===================================================================
--- src/java/org/apache/lucene/search/SimilarityDelegator.java (revision 830378)
+++ src/java/org/apache/lucene/search/SimilarityDelegator.java (working copy)
@@ -34,34 +34,42 @@
this.delegee = delegee;
}
+ @Override
public float computeNorm(String fieldName, FieldInvertState state) {
return delegee.computeNorm(fieldName, state);
}
+ @Override
public float lengthNorm(String fieldName, int numTerms) {
return delegee.lengthNorm(fieldName, numTerms);
}
+ @Override
public float queryNorm(float sumOfSquaredWeights) {
return delegee.queryNorm(sumOfSquaredWeights);
}
+ @Override
public float tf(float freq) {
return delegee.tf(freq);
}
+ @Override
public float sloppyFreq(int distance) {
return delegee.sloppyFreq(distance);
}
+ @Override
public float idf(int docFreq, int numDocs) {
return delegee.idf(docFreq, numDocs);
}
+ @Override
public float coord(int overlap, int maxOverlap) {
return delegee.coord(overlap, maxOverlap);
}
+ @Override
public float scorePayload(int docId, String fieldName, int start, int end, byte [] payload, int offset, int length) {
return delegee.scorePayload(docId, fieldName, start, end, payload, offset, length);
}
Index: src/java/org/apache/lucene/search/SloppyPhraseScorer.java
===================================================================
--- src/java/org/apache/lucene/search/SloppyPhraseScorer.java (revision 830378)
+++ src/java/org/apache/lucene/search/SloppyPhraseScorer.java (working copy)
@@ -52,6 +52,7 @@
* would get same score as "g f"~2, although "c b"~2 could be matched twice.
* We may want to fix this in the future (currently not, for performance reasons).
*/
+ @Override
protected final float phraseFreq() throws IOException {
int end = initPhrasePositions();
Index: src/java/org/apache/lucene/search/Sort.java
===================================================================
--- src/java/org/apache/lucene/search/Sort.java (revision 830378)
+++ src/java/org/apache/lucene/search/Sort.java (working copy)
@@ -151,6 +151,7 @@
return fields;
}
+ @Override
public String toString() {
StringBuilder buffer = new StringBuilder();
@@ -164,6 +165,7 @@
}
/** Returns true if <code>o</code> is equal to this. */
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Sort)) return false;
@@ -172,6 +174,7 @@
}
/** Returns a hash code value for this object. */
+ @Override
public int hashCode() {
// TODO in Java 1.5: switch to Arrays.hashCode(). The
// Java 1.4 workaround below calculates the same hashCode
Index: src/java/org/apache/lucene/search/SortField.java
===================================================================
--- src/java/org/apache/lucene/search/SortField.java (revision 830378)
+++ src/java/org/apache/lucene/search/SortField.java (working copy)
@@ -260,6 +260,7 @@
return reverse;
}
+ @Override
public String toString() {
StringBuilder buffer = new StringBuilder();
switch (type) {
@@ -323,6 +324,7 @@
* {@link FieldComparatorSource} or {@link
* FieldCache.Parser} was provided, it must properly
* implement equals (unless a singleton is always used). */
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SortField)) return false;
@@ -342,6 +344,7 @@
* FieldCache.Parser} was provided, it must properly
* implement hashCode (unless a singleton is always
* used). */
+ @Override
public int hashCode() {
int hash=type^0x346565dd + Boolean.valueOf(reverse).hashCode()^0xaf5998bb;
if (field != null) hash += field.hashCode()^0xff5685dd;
Index: src/java/org/apache/lucene/search/SpanQueryFilter.java
===================================================================
--- src/java/org/apache/lucene/search/SpanQueryFilter.java (revision 830378)
+++ src/java/org/apache/lucene/search/SpanQueryFilter.java (working copy)
@@ -54,11 +54,13 @@
this.query = query;
}
+ @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
SpanFilterResult result = bitSpans(reader);
return result.getDocIdSet();
}
+ @Override
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
final OpenBitSet bits = new OpenBitSet(reader.maxDoc());
@@ -86,14 +88,17 @@
return query;
}
+ @Override
public String toString() {
return "SpanQueryFilter(" + query + ")";
}
+ @Override
public boolean equals(Object o) {
return o instanceof SpanQueryFilter && this.query.equals(((SpanQueryFilter) o).query);
}
+ @Override
public int hashCode() {
return query.hashCode() ^ 0x923F64B9;
}
Index: src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java (working copy)
@@ -79,6 +79,7 @@
this.field = maskedField;
}
+ @Override
public String getField() {
return field;
}
@@ -90,22 +91,27 @@
// :NOTE: getBoost and setBoost are not proxied to the maskedQuery
// ...this is done to be more consistent with things like SpanFirstQuery
+ @Override
public Spans getSpans(IndexReader reader) throws IOException {
return maskedQuery.getSpans(reader);
}
+ @Override
public void extractTerms(Set<Term> terms) {
maskedQuery.extractTerms(terms);
}
+ @Override
public Weight createWeight(Searcher searcher) throws IOException {
return maskedQuery.createWeight(searcher);
}
+ @Override
public Similarity getSimilarity(Searcher searcher) {
return maskedQuery.getSimilarity(searcher);
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
FieldMaskingSpanQuery clone = null;
@@ -122,6 +128,7 @@
}
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("mask(");
@@ -133,6 +140,7 @@
return buffer.toString();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof FieldMaskingSpanQuery))
return false;
@@ -143,6 +151,7 @@
}
+ @Override
public int hashCode() {
return getMaskedQuery().hashCode()
^ getField().hashCode()
Index: src/java/org/apache/lucene/search/spans/NearSpansOrdered.java
===================================================================
--- src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (working copy)
@@ -101,12 +101,15 @@
}
// inherit javadocs
+ @Override
public int doc() { return matchDoc; }
// inherit javadocs
+ @Override
public int start() { return matchStart; }
// inherit javadocs
+ @Override
public int end() { return matchEnd; }
public Spans[] getSubSpans() {
@@ -115,16 +118,19 @@
// TODO: Remove warning after API has been finalized
// TODO: Would be nice to be able to lazy load payloads
+ @Override
public Collection<byte[]> getPayload() throws IOException {
return matchPayload;
}
// TODO: Remove warning after API has been finalized
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
return matchPayload.isEmpty() == false;
}
// inherit javadocs
+ @Override
public boolean next() throws IOException {
if (firstTime) {
firstTime = false;
@@ -143,6 +149,7 @@
}
// inherit javadocs
+ @Override
public boolean skipTo(int target) throws IOException {
if (firstTime) {
firstTime = false;
@@ -327,6 +334,7 @@
return match; // ordered and allowed slop
}
+ @Override
public String toString() {
return getClass().getName() + "("+query.toString()+")@"+
(firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END"));
Index: src/java/org/apache/lucene/search/spans/NearSpansUnordered.java
===================================================================
--- src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (working copy)
@@ -56,6 +56,7 @@
initialize(size);
}
+ @Override
protected final boolean lessThan(SpansCell spans1, SpansCell spans2) {
if (spans1.doc() == spans2.doc()) {
return NearSpansOrdered.docSpansOrdered(spans1, spans2);
@@ -78,10 +79,12 @@
this.index = index;
}
+ @Override
public boolean next() throws IOException {
return adjust(spans.next());
}
+ @Override
public boolean skipTo(int target) throws IOException {
return adjust(spans.skipTo(target));
}
@@ -103,19 +106,27 @@
return condition;
}
+ @Override
public int doc() { return spans.doc(); }
+
+ @Override
public int start() { return spans.start(); }
+
+ @Override
public int end() { return spans.end(); }
// TODO: Remove warning after API has been finalized
+ @Override
public Collection<byte[]> getPayload() throws IOException {
return new ArrayList<byte[]>(spans.getPayload());
}
// TODO: Remove warning after API has been finalized
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
return spans.isPayloadAvailable();
}
+ @Override
public String toString() { return spans.toString() + "#" + index; }
}
@@ -138,6 +149,7 @@
public Spans[] getSubSpans() {
return subSpans;
}
+ @Override
public boolean next() throws IOException {
if (firstTime) {
initList(true);
@@ -189,6 +201,7 @@
return false; // no more matches
}
+ @Override
public boolean skipTo(int target) throws IOException {
if (firstTime) { // initialize
initList(false);
@@ -213,8 +226,11 @@
private SpansCell min() { return queue.top(); }
+ @Override
public int doc() { return min().doc(); }
+ @Override
public int start() { return min().start(); }
+ @Override
public int end() { return max.end(); }
// TODO: Remove warning after API has been finalized
@@ -223,6 +239,7 @@
* @return Collection of <code>byte[]</code> payloads
* @throws IOException
*/
+ @Override
public Collection<byte[]> getPayload() throws IOException {
Set<byte[]> matchPayload = new HashSet<byte[]>();
for (SpansCell cell = first; cell != null; cell = cell.next) {
@@ -234,6 +251,7 @@
}
// TODO: Remove warning after API has been finalized
+ @Override
public boolean isPayloadAvailable() {
SpansCell pointer = min();
while (pointer != null) {
@@ -246,6 +264,7 @@
return false;
}
+ @Override
public String toString() {
return getClass().getName() + "("+query.toString()+")@"+
(firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END"));
Index: src/java/org/apache/lucene/search/spans/SpanFirstQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanFirstQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanFirstQuery.java (working copy)
@@ -46,8 +46,10 @@
/** Return the maximum end position permitted in a match. */
public int getEnd() { return end; }
+ @Override
public String getField() { return match.getField(); }
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanFirst(");
@@ -59,20 +61,24 @@
return buffer.toString();
}
+ @Override
public Object clone() {
SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.clone(), end);
spanFirstQuery.setBoost(getBoost());
return spanFirstQuery;
}
+ @Override
public void extractTerms(Set<Term> terms) {
match.extractTerms(terms);
}
+ @Override
public Spans getSpans(final IndexReader reader) throws IOException {
return new Spans() {
private Spans spans = match.getSpans(reader);
+ @Override
public boolean next() throws IOException {
while (spans.next()) { // scan to next match
if (end() <= end)
@@ -81,6 +87,7 @@
return false;
}
+ @Override
public boolean skipTo(int target) throws IOException {
if (!spans.skipTo(target))
return false;
@@ -89,11 +96,15 @@
}
+ @Override
public int doc() { return spans.doc(); }
+ @Override
public int start() { return spans.start(); }
+ @Override
public int end() { return spans.end(); }
// TODO: Remove warning after API has been finalized
+ @Override
public Collection<byte[]> getPayload() throws IOException {
ArrayList<byte[]> result = null;
if (spans.isPayloadAvailable()) {
@@ -103,10 +114,12 @@
}
// TODO: Remove warning after API has been finalized
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
return spans.isPayloadAvailable();
}
+ @Override
public String toString() {
return "spans(" + SpanFirstQuery.this.toString() + ")";
}
@@ -114,6 +127,7 @@
};
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
SpanFirstQuery clone = null;
@@ -130,6 +144,7 @@
}
}
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SpanFirstQuery)) return false;
@@ -140,6 +155,7 @@
&& this.getBoost() == other.getBoost();
}
+ @Override
public int hashCode() {
int h = match.hashCode();
h ^= (h << 8) | (h >>> 25); // reversible
Index: src/java/org/apache/lucene/search/spans/SpanNearQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanNearQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanNearQuery.java (working copy)
@@ -79,8 +79,10 @@
/** Return true if matches are required to be in-order.*/
public boolean isInOrder() { return inOrder; }
+ @Override
public String getField() { return field; }
+ @Override
public void extractTerms(Set<Term> terms) {
for (final SpanQuery clause : clauses) {
clause.extractTerms(terms);
@@ -88,6 +90,7 @@
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanNear([");
@@ -108,6 +111,7 @@
return buffer.toString();
}
+ @Override
public Spans getSpans(final IndexReader reader) throws IOException {
if (clauses.size() == 0) // optimize 0-clause case
return new SpanOrQuery(getClauses()).getSpans(reader);
@@ -120,6 +124,7 @@
: (Spans) new NearSpansUnordered(this, reader);
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
SpanNearQuery clone = null;
for (int i = 0 ; i < clauses.size(); i++) {
@@ -138,6 +143,7 @@
}
}
+ @Override
public Object clone() {
int sz = clauses.size();
SpanQuery[] newClauses = new SpanQuery[sz];
@@ -151,6 +157,7 @@
}
/** Returns true iff <code>o</code> is equal to this. */
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SpanNearQuery)) return false;
@@ -164,6 +171,7 @@
return getBoost() == spanNearQuery.getBoost();
}
+ @Override
public int hashCode() {
int result;
result = clauses.hashCode();
Index: src/java/org/apache/lucene/search/spans/SpanNotQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanNotQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanNotQuery.java (working copy)
@@ -48,10 +48,13 @@
/** Return the SpanQuery whose matches must not overlap those returned. */
public SpanQuery getExclude() { return exclude; }
+ @Override
public String getField() { return include.getField(); }
+ @Override
public void extractTerms(Set<Term> terms) { include.extractTerms(terms); }
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanNot(");
@@ -63,12 +66,14 @@
return buffer.toString();
}
+ @Override
public Object clone() {
SpanNotQuery spanNotQuery = new SpanNotQuery((SpanQuery)include.clone(),(SpanQuery) exclude.clone());
spanNotQuery.setBoost(getBoost());
return spanNotQuery;
}
+ @Override
public Spans getSpans(final IndexReader reader) throws IOException {
return new Spans() {
private Spans includeSpans = include.getSpans(reader);
@@ -77,6 +82,7 @@
private Spans excludeSpans = exclude.getSpans(reader);
private boolean moreExclude = excludeSpans.next();
+ @Override
public boolean next() throws IOException {
if (moreInclude) // move to next include
moreInclude = includeSpans.next();
@@ -102,6 +108,7 @@
return moreInclude;
}
+ @Override
public boolean skipTo(int target) throws IOException {
if (moreInclude) // skip include
moreInclude = includeSpans.skipTo(target);
@@ -127,11 +134,15 @@
return next(); // scan to next match
}
+ @Override
public int doc() { return includeSpans.doc(); }
+ @Override
public int start() { return includeSpans.start(); }
+ @Override
public int end() { return includeSpans.end(); }
// TODO: Remove warning after API has been finalized
+ @Override
public Collection<byte[]> getPayload() throws IOException {
ArrayList<byte[]> result = null;
if (includeSpans.isPayloadAvailable()) {
@@ -141,10 +152,12 @@
}
// TODO: Remove warning after API has been finalized
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
return includeSpans.isPayloadAvailable();
}
+ @Override
public String toString() {
return "spans(" + SpanNotQuery.this.toString() + ")";
}
@@ -152,6 +165,7 @@
};
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
SpanNotQuery clone = null;
@@ -174,6 +188,7 @@
}
/** Returns true iff <code>o</code> is equal to this. */
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SpanNotQuery)) return false;
@@ -184,6 +199,7 @@
&& this.getBoost() == other.getBoost();
}
+ @Override
public int hashCode() {
int h = include.hashCode();
h = (h<<1) | (h >>> 31); // rotate left
Index: src/java/org/apache/lucene/search/spans/SpanOrQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanOrQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanOrQuery.java (working copy)
@@ -57,14 +57,17 @@
return clauses.toArray(new SpanQuery[clauses.size()]);
}
+ @Override
public String getField() { return field; }
+ @Override
public void extractTerms(Set<Term> terms) {
for(final SpanQuery clause: clauses) {
clause.extractTerms(terms);
}
}
+ @Override
public Object clone() {
int sz = clauses.size();
SpanQuery[] newClauses = new SpanQuery[sz];
@@ -77,6 +80,7 @@
return soq;
}
+ @Override
public Query rewrite(IndexReader reader) throws IOException {
SpanOrQuery clone = null;
for (int i = 0 ; i < clauses.size(); i++) {
@@ -95,6 +99,7 @@
}
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanOr([");
@@ -111,6 +116,7 @@
return buffer.toString();
}
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
@@ -123,6 +129,7 @@
return getBoost() == that.getBoost();
}
+ @Override
public int hashCode() {
int h = clauses.hashCode();
h ^= (h << 10) | (h >>> 23);
@@ -136,6 +143,7 @@
initialize(size);
}
+ @Override
protected final boolean lessThan(Spans spans1, Spans spans2) {
if (spans1.doc() == spans2.doc()) {
if (spans1.start() == spans2.start()) {
@@ -149,6 +157,7 @@
}
}
+ @Override
public Spans getSpans(final IndexReader reader) throws IOException {
if (clauses.size() == 1) // optimize 1-clause case
return (clauses.get(0)).getSpans(reader);
@@ -169,6 +178,7 @@
return queue.size() != 0;
}
+ @Override
public boolean next() throws IOException {
if (queue == null) {
return initSpanQueue(-1);
@@ -189,6 +199,7 @@
private Spans top() { return queue.top(); }
+ @Override
public boolean skipTo(int target) throws IOException {
if (queue == null) {
return initSpanQueue(target);
@@ -210,10 +221,14 @@
return next();
}
+ @Override
public int doc() { return top().doc(); }
+ @Override
public int start() { return top().start(); }
+ @Override
public int end() { return top().end(); }
+ @Override
public Collection<byte[]> getPayload() throws IOException {
ArrayList<byte[]> result = null;
Spans theTop = top();
@@ -223,11 +238,13 @@
return result;
}
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
Spans top = top();
return top != null && top.isPayloadAvailable();
}
+ @Override
public String toString() {
return "spans("+SpanOrQuery.this+")@"+
((queue == null)?"START"
Index: src/java/org/apache/lucene/search/spans/SpanQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanQuery.java (working copy)
@@ -33,6 +33,7 @@
/** Returns the name of the field matched by this query.*/
public abstract String getField();
+ @Override
public Weight createWeight(Searcher searcher) throws IOException {
return new SpanWeight(this, searcher);
}
Index: src/java/org/apache/lucene/search/spans/SpanTermQuery.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanTermQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/SpanTermQuery.java (working copy)
@@ -34,12 +34,15 @@
/** Return the term whose spans are matched. */
public Term getTerm() { return term; }
+ @Override
public String getField() { return term.field(); }
+ @Override
public void extractTerms(Set<Term> terms) {
terms.add(term);
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (term.field().equals(field))
@@ -50,6 +53,7 @@
return buffer.toString();
}
+ @Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
@@ -57,6 +61,7 @@
return result;
}
+ @Override
public boolean equals(Object obj) {
if (this == obj)
return true;
@@ -73,6 +78,7 @@
return true;
}
+ @Override
public Spans getSpans(final IndexReader reader) throws IOException {
return new TermSpans(reader.termPositions(term), term);
}
Index: src/java/org/apache/lucene/search/spans/TermSpans.java
===================================================================
--- src/java/org/apache/lucene/search/spans/TermSpans.java (revision 830378)
+++ src/java/org/apache/lucene/search/spans/TermSpans.java (working copy)
@@ -43,6 +43,7 @@
doc = -1;
}
+ @Override
public boolean next() throws IOException {
if (count == freq) {
if (!positions.next()) {
@@ -58,6 +59,7 @@
return true;
}
+ @Override
public boolean skipTo(int target) throws IOException {
if (!positions.skipTo(target)) {
doc = Integer.MAX_VALUE;
@@ -74,19 +76,23 @@
return true;
}
+ @Override
public int doc() {
return doc;
}
+ @Override
public int start() {
return position;
}
+ @Override
public int end() {
return position + 1;
}
// TODO: Remove warning after API has been finalized
+ @Override
public Collection<byte[]> getPayload() throws IOException {
byte [] bytes = new byte[positions.getPayloadLength()];
bytes = positions.getPayload(bytes, 0);
@@ -94,10 +100,12 @@
}
// TODO: Remove warning after API has been finalized
- public boolean isPayloadAvailable() {
+ @Override
+ public boolean isPayloadAvailable() {
return positions.isPayloadAvailable();
}
+ @Override
public String toString() {
return "spans(" + term.toString() + ")@" +
(doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
Index: src/java/org/apache/lucene/search/TermRangeTermEnum.java
===================================================================
--- src/java/org/apache/lucene/search/TermRangeTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/search/TermRangeTermEnum.java (working copy)
@@ -93,14 +93,17 @@
setEnum(reader.terms(new Term(this.field, startTermText)));
}
+ @Override
public float difference() {
return 1.0f;
}
+ @Override
protected boolean endEnum() {
return endEnum;
}
+ @Override
protected boolean termCompare(Term term) {
if (collator == null) {
// Use Unicode code point ordering
Index: src/java/org/apache/lucene/search/TimeLimitingCollector.java
===================================================================
--- src/java/org/apache/lucene/search/TimeLimitingCollector.java (revision 830378)
+++ src/java/org/apache/lucene/search/TimeLimitingCollector.java (working copy)
@@ -70,6 +70,7 @@
this.setDaemon( true );
}
+ @Override
public void run() {
while (true) {
// TODO: Use System.nanoTime() when Lucene moves to Java SE 5.
@@ -194,6 +195,7 @@
* @throws TimeExceededException
* if the time allowed has exceeded.
*/
+ @Override
public void collect(final int doc) throws IOException {
long time = TIMER_THREAD.getMilliseconds();
if (timeout < time) {
@@ -208,14 +210,17 @@
collector.collect(doc);
}
+ @Override
public void setNextReader(IndexReader reader, int base) throws IOException {
collector.setNextReader(reader, base);
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
Index: src/java/org/apache/lucene/search/TopFieldCollector.java
===================================================================
--- src/java/org/apache/lucene/search/TopFieldCollector.java (revision 830378)
+++ src/java/org/apache/lucene/search/TopFieldCollector.java (working copy)
@@ -63,6 +63,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -89,11 +90,13 @@
}
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
this.docBase = docBase;
comparator.setNextReader(reader, docBase);
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
comparator.setScorer(scorer);
}
@@ -113,6 +116,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -138,6 +142,7 @@
}
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -164,6 +169,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -196,6 +202,7 @@
}
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
comparator.setScorer(scorer);
@@ -217,6 +224,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -248,6 +256,7 @@
}
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -276,6 +285,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
@@ -307,6 +317,7 @@
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
@@ -326,6 +337,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
@@ -355,6 +367,7 @@
}
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -383,6 +396,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -429,6 +443,7 @@
}
}
+ @Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
this.docBase = docBase;
for (int i = 0; i < comparators.length; i++) {
@@ -436,6 +451,7 @@
}
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
// set the scorer on all comparators
for (int i = 0; i < comparators.length; i++) {
@@ -457,6 +473,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -505,6 +522,7 @@
}
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -532,6 +550,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
@@ -582,6 +601,7 @@
}
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
@@ -601,6 +621,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
@@ -653,6 +674,7 @@
}
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -678,6 +700,7 @@
bottom = pq.updateTop();
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -729,6 +752,7 @@
}
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
@@ -749,6 +773,7 @@
super(queue, numHits, fillFields);
}
+ @Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
@@ -802,11 +827,13 @@
}
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -934,6 +961,7 @@
* topDocs(int, int) calls them to return the results.
*/
+ @Override
protected void populateResults(ScoreDoc[] results, int howMany) {
if (fillFields) {
// avoid casting if unnecessary.
@@ -949,6 +977,7 @@
}
}
+ @Override
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
if (results == null) {
results = EMPTY_SCOREDOCS;
@@ -960,6 +989,7 @@
return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).getFields(), maxScore);
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
Index: src/java/org/apache/lucene/search/TopScoreDocCollector.java
===================================================================
--- src/java/org/apache/lucene/search/TopScoreDocCollector.java (revision 830378)
+++ src/java/org/apache/lucene/search/TopScoreDocCollector.java (working copy)
@@ -42,6 +42,7 @@
super(numHits);
}
+ @Override
public void collect(int doc) throws IOException {
float score = scorer.score();
totalHits++;
@@ -56,6 +57,7 @@
pqTop = pq.updateTop();
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
@@ -67,6 +69,7 @@
super(numHits);
}
+ @Override
public void collect(int doc) throws IOException {
float score = scorer.score();
totalHits++;
@@ -79,6 +82,7 @@
pqTop = pq.updateTop();
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@@ -116,6 +120,7 @@
pqTop = pq.top();
}
+ @Override
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
if (results == null) {
return EMPTY_TOPDOCS;
@@ -136,10 +141,12 @@
return new TopDocs(totalHits, results, maxScore);
}
+ @Override
public void setNextReader(IndexReader reader, int base) {
docBase = base;
}
+ @Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
Index: src/java/org/apache/lucene/search/WildcardQuery.java
===================================================================
--- src/java/org/apache/lucene/search/WildcardQuery.java (revision 830378)
+++ src/java/org/apache/lucene/search/WildcardQuery.java (working copy)
@@ -50,6 +50,7 @@
&& (text.indexOf('*') == text.length() - 1);
}
+ @Override
protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
if (termContainsWildcard)
return new WildcardTermEnum(reader, getTerm());
Index: src/java/org/apache/lucene/search/WildcardTermEnum.java
===================================================================
--- src/java/org/apache/lucene/search/WildcardTermEnum.java (revision 830378)
+++ src/java/org/apache/lucene/search/WildcardTermEnum.java (working copy)
@@ -65,6 +65,7 @@
setEnum(reader.terms(new Term(searchTerm.field(), pre)));
}
+ @Override
protected final boolean termCompare(Term term) {
if (field == term.field()) {
String searchText = term.text();
@@ -76,10 +77,12 @@
return false;
}
+ @Override
public float difference() {
return 1.0f;
}
+ @Override
public final boolean endEnum() {
return endEnum;
}
Index: src/java/org/apache/lucene/store/BufferedIndexInput.java
===================================================================
--- src/java/org/apache/lucene/store/BufferedIndexInput.java (revision 830378)
+++ src/java/org/apache/lucene/store/BufferedIndexInput.java (working copy)
@@ -33,6 +33,7 @@
private int bufferLength = 0; // end of valid bytes
private int bufferPosition = 0; // next byte to read
+ @Override
public byte readByte() throws IOException {
if (bufferPosition >= bufferLength)
refill();
@@ -88,10 +89,12 @@
throw new IllegalArgumentException("bufferSize must be greater than 0 (got " + bufferSize + ")");
}
+ @Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
readBytes(b, offset, len, true);
}
+ @Override
public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException {
if(len <= (bufferLength-bufferPosition)){
@@ -169,8 +172,10 @@
protected abstract void readInternal(byte[] b, int offset, int length)
throws IOException;
+ @Override
public long getFilePointer() { return bufferStart + bufferPosition; }
+ @Override
public void seek(long pos) throws IOException {
if (pos >= bufferStart && pos < (bufferStart + bufferLength))
bufferPosition = (int)(pos - bufferStart); // seek within buffer
@@ -188,6 +193,7 @@
*/
protected abstract void seekInternal(long pos) throws IOException;
+ @Override
public Object clone() {
BufferedIndexInput clone = (BufferedIndexInput)super.clone();
Index: src/java/org/apache/lucene/store/BufferedIndexOutput.java
===================================================================
--- src/java/org/apache/lucene/store/BufferedIndexOutput.java (revision 830378)
+++ src/java/org/apache/lucene/store/BufferedIndexOutput.java (working copy)
@@ -30,6 +30,7 @@
/** Writes a single byte.
* @see IndexInput#readByte()
*/
+ @Override
public void writeByte(byte b) throws IOException {
if (bufferPosition >= BUFFER_SIZE)
flush();
@@ -41,6 +42,7 @@
* @param length the number of bytes to write
* @see IndexInput#readBytes(byte[],int,int)
*/
+ @Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
int bytesLeft = BUFFER_SIZE - bufferPosition;
// is there enough space in the buffer?
@@ -81,6 +83,7 @@
}
/** Forces any buffered output to be written. */
+ @Override
public void flush() throws IOException {
flushBuffer(buffer, bufferPosition);
bufferStart += bufferPosition;
@@ -105,6 +108,7 @@
protected abstract void flushBuffer(byte[] b, int offset, int len) throws IOException;
/** Closes this stream to further operations. */
+ @Override
public void close() throws IOException {
flush();
}
@@ -113,6 +117,7 @@
* occur.
* @see #seek(long)
*/
+ @Override
public long getFilePointer() {
return bufferStart + bufferPosition;
}
@@ -120,12 +125,14 @@
/** Sets current position in this file, where the next write will occur.
* @see #getFilePointer()
*/
+ @Override
public void seek(long pos) throws IOException {
flush();
bufferStart = pos;
}
/** The number of bytes in the file. */
+ @Override
public abstract long length() throws IOException;
Index: src/java/org/apache/lucene/store/ChecksumIndexInput.java
===================================================================
--- src/java/org/apache/lucene/store/ChecksumIndexInput.java (revision 830378)
+++ src/java/org/apache/lucene/store/ChecksumIndexInput.java (working copy)
@@ -32,12 +32,14 @@
digest = new CRC32();
}
+ @Override
public byte readByte() throws IOException {
final byte b = main.readByte();
digest.update(b);
return b;
}
+ @Override
public void readBytes(byte[] b, int offset, int len)
throws IOException {
main.readBytes(b, offset, len);
@@ -49,18 +51,22 @@
return digest.getValue();
}
+ @Override
public void close() throws IOException {
main.close();
}
+ @Override
public long getFilePointer() {
return main.getFilePointer();
}
+ @Override
public void seek(long pos) {
throw new RuntimeException("not allowed");
}
+ @Override
public long length() {
return main.length();
}
Index: src/java/org/apache/lucene/store/ChecksumIndexOutput.java
===================================================================
--- src/java/org/apache/lucene/store/ChecksumIndexOutput.java (revision 830378)
+++ src/java/org/apache/lucene/store/ChecksumIndexOutput.java (working copy)
@@ -32,11 +32,13 @@
digest = new CRC32();
}
+ @Override
public void writeByte(byte b) throws IOException {
digest.update(b);
main.writeByte(b);
}
+ @Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
digest.update(b, offset, length);
main.writeBytes(b, offset, length);
@@ -46,18 +48,22 @@
return digest.getValue();
}
+ @Override
public void flush() throws IOException {
main.flush();
}
+ @Override
public void close() throws IOException {
main.close();
}
+ @Override
public long getFilePointer() {
return main.getFilePointer();
}
+ @Override
public void seek(long pos) {
throw new RuntimeException("not allowed");
}
@@ -86,6 +92,7 @@
main.writeLong(getChecksum());
}
+ @Override
public long length() throws IOException {
return main.length();
}
Index: src/java/org/apache/lucene/store/FileSwitchDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/FileSwitchDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/FileSwitchDirectory.java (working copy)
@@ -59,6 +59,7 @@
return secondaryDir;
}
+ @Override
public void close() throws IOException {
if (doClose) {
try {
@@ -70,6 +71,7 @@
}
}
+ @Override
public String[] listAll() throws IOException {
String[] primaryFiles = primaryDir.listAll();
String[] secondaryFiles = secondaryDir.listAll();
@@ -97,34 +99,42 @@
}
}
+ @Override
public boolean fileExists(String name) throws IOException {
return getDirectory(name).fileExists(name);
}
+ @Override
public long fileModified(String name) throws IOException {
return getDirectory(name).fileModified(name);
}
+ @Override
public void touchFile(String name) throws IOException {
getDirectory(name).touchFile(name);
}
+ @Override
public void deleteFile(String name) throws IOException {
getDirectory(name).deleteFile(name);
}
+ @Override
public long fileLength(String name) throws IOException {
return getDirectory(name).fileLength(name);
}
+ @Override
public IndexOutput createOutput(String name) throws IOException {
return getDirectory(name).createOutput(name);
}
+ @Override
public void sync(String name) throws IOException {
getDirectory(name).sync(name);
}
+ @Override
public IndexInput openInput(String name) throws IOException {
return getDirectory(name).openInput(name);
}
Index: src/java/org/apache/lucene/store/FSDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/FSDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/FSDirectory.java (working copy)
@@ -238,12 +238,14 @@
/** Lists all files (not subdirectories) in the
* directory.
* @see #listAll(File) */
+ @Override
public String[] listAll() throws IOException {
ensureOpen();
return listAll(directory);
}
/** Returns true iff a file with the given name exists. */
+ @Override
public boolean fileExists(String name) {
ensureOpen();
File file = new File(directory, name);
@@ -251,6 +253,7 @@
}
/** Returns the time the named file was last modified. */
+ @Override
public long fileModified(String name) {
ensureOpen();
File file = new File(directory, name);
@@ -264,6 +267,7 @@
}
/** Set the modified time of an existing file to now. */
+ @Override
public void touchFile(String name) {
ensureOpen();
File file = new File(directory, name);
@@ -271,6 +275,7 @@
}
/** Returns the length in bytes of a file in the directory. */
+ @Override
public long fileLength(String name) {
ensureOpen();
File file = new File(directory, name);
@@ -278,6 +283,7 @@
}
/** Removes an existing file in the directory. */
+ @Override
public void deleteFile(String name) throws IOException {
ensureOpen();
File file = new File(directory, name);
@@ -285,6 +291,7 @@
throw new IOException("Cannot delete " + file);
}
+ @Override
public void sync(String name) throws IOException {
ensureOpen();
File fullFile = new File(directory, name);
@@ -323,6 +330,7 @@
}
// Inherit javadoc
+ @Override
public IndexInput openInput(String name) throws IOException {
ensureOpen();
return openInput(name, BufferedIndexInput.BUFFER_SIZE);
@@ -335,6 +343,7 @@
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
+ @Override
public String getLockID() {
ensureOpen();
String dirName; // name to be hashed
@@ -360,6 +369,7 @@
}
/** Closes the store to future operations. */
+ @Override
public synchronized void close() {
isOpen = false;
}
@@ -370,6 +380,7 @@
}
/** For debug output. */
+ @Override
public String toString() {
return this.getClass().getName() + "@" + directory;
}
Index: src/java/org/apache/lucene/store/IndexInput.java
===================================================================
--- src/java/org/apache/lucene/store/IndexInput.java (revision 830378)
+++ src/java/org/apache/lucene/store/IndexInput.java (working copy)
@@ -217,6 +217,7 @@
* different points in the input from each other and from the stream they
* were cloned from.
*/
+ @Override
public Object clone() {
IndexInput clone = null;
try {
Index: src/java/org/apache/lucene/store/MMapDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/MMapDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/MMapDirectory.java (working copy)
@@ -202,6 +202,7 @@
this.buffer = raf.getChannel().map(MapMode.READ_ONLY, 0, length);
}
+ @Override
public byte readByte() throws IOException {
try {
return buffer.get();
@@ -210,6 +211,7 @@
}
}
+ @Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
try {
buffer.get(b, offset, len);
@@ -218,18 +220,22 @@
}
}
+ @Override
public long getFilePointer() {
return buffer.position();
}
+ @Override
public void seek(long pos) throws IOException {
buffer.position((int)pos);
}
+ @Override
public long length() {
return length;
}
+ @Override
public Object clone() {
MMapIndexInput clone = (MMapIndexInput)super.clone();
clone.isClone = true;
@@ -237,6 +243,7 @@
return clone;
}
+ @Override
public void close() throws IOException {
if (isClone || buffer == null) return;
// unmap the buffer (if enabled) and at least unset it for GC
@@ -299,6 +306,7 @@
seek(0L);
}
+ @Override
public byte readByte() throws IOException {
// Performance might be improved by reading ahead into an array of
// e.g. 128 bytes and readByte() from there.
@@ -314,6 +322,7 @@
return curBuf.get();
}
+ @Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
while (len > curAvail) {
curBuf.get(b, offset, curAvail);
@@ -330,10 +339,12 @@
curAvail -= len;
}
+ @Override
public long getFilePointer() {
return ((long) curBufIndex * maxBufSize) + curBuf.position();
}
+ @Override
public void seek(long pos) throws IOException {
curBufIndex = (int) (pos / maxBufSize);
curBuf = buffers[curBufIndex];
@@ -342,10 +353,12 @@
curAvail = bufSizes[curBufIndex] - bufOffset;
}
+ @Override
public long length() {
return length;
}
+ @Override
public Object clone() {
MultiMMapIndexInput clone = (MultiMMapIndexInput)super.clone();
clone.isClone = true;
@@ -366,6 +379,7 @@
return clone;
}
+ @Override
public void close() throws IOException {
if (isClone || buffers == null) return;
try {
@@ -384,6 +398,7 @@
}
/** Creates an IndexInput for the file with the given name. */
+ @Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
File f = new File(getFile(), name);
@@ -398,6 +413,7 @@
}
/** Creates an IndexOutput for the file with the given name. */
+ @Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSDirectory.SimpleFSIndexOutput(new File(directory, name));
Index: src/java/org/apache/lucene/store/NativeFSLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/NativeFSLockFactory.java (revision 830378)
+++ src/java/org/apache/lucene/store/NativeFSLockFactory.java (working copy)
@@ -122,6 +122,7 @@
setLockDir(lockDir);
}
+ @Override
public synchronized Lock makeLock(String lockName) {
acquireTestLock();
if (lockPrefix != null)
@@ -129,6 +130,7 @@
return new NativeFSLock(lockDir, lockName);
}
+ @Override
public void clearLock(String lockName) throws IOException {
// Note that this isn't strictly required anymore
// because the existence of these files does not mean
@@ -175,6 +177,7 @@
return lock != null;
}
+ @Override
public synchronized boolean obtain() throws IOException {
if (lockExists()) {
@@ -274,6 +277,7 @@
return lockExists();
}
+ @Override
public synchronized void release() throws IOException {
if (lockExists()) {
try {
@@ -299,6 +303,7 @@
}
}
+ @Override
public synchronized boolean isLocked() {
// The test for is isLocked is not directly possible with native file locks:
@@ -318,6 +323,7 @@
}
}
+ @Override
public String toString() {
return "NativeFSLock@" + path;
}
Index: src/java/org/apache/lucene/store/NIOFSDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/NIOFSDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/NIOFSDirectory.java (working copy)
@@ -61,12 +61,14 @@
}
/** Creates an IndexInput for the file with the given name. */
+ @Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new NIOFSIndexInput(new File(getFile(), name), bufferSize, getReadChunkSize());
}
/** Creates an IndexOutput for the file with the given name. */
+ @Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSDirectory.SimpleFSIndexOutput(new File(directory, name));
@@ -86,11 +88,13 @@
channel = file.getChannel();
}
+ @Override
protected void newBuffer(byte[] newBuffer) {
super.newBuffer(newBuffer);
byteBuf = ByteBuffer.wrap(newBuffer);
}
+ @Override
public void close() throws IOException {
if (!isClone && file.isOpen) {
// Close the channel & file
@@ -102,6 +106,7 @@
}
}
+ @Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
final ByteBuffer bb;
Index: src/java/org/apache/lucene/store/NoLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/NoLockFactory.java (revision 830378)
+++ src/java/org/apache/lucene/store/NoLockFactory.java (working copy)
@@ -38,25 +38,31 @@
return singleton;
}
+ @Override
public Lock makeLock(String lockName) {
return singletonLock;
}
+ @Override
public void clearLock(String lockName) {};
};
class NoLock extends Lock {
+ @Override
public boolean obtain() throws IOException {
return true;
}
+ @Override
public void release() {
}
+ @Override
public boolean isLocked() {
return false;
}
+ @Override
public String toString() {
return "NoLock";
}
Index: src/java/org/apache/lucene/store/RAMDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/RAMDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/RAMDirectory.java (working copy)
@@ -69,6 +69,7 @@
Directory.copy(dir, this, closeDir);
}
+ @Override
public synchronized final String[] listAll() {
ensureOpen();
Set<String> fileNames = fileMap.keySet();
@@ -80,6 +81,7 @@
}
/** Returns true iff the named file exists in this directory. */
+ @Override
public final boolean fileExists(String name) {
ensureOpen();
RAMFile file;
@@ -92,6 +94,7 @@
/** Returns the time the named file was last modified.
* @throws IOException if the file does not exist
*/
+ @Override
public final long fileModified(String name) throws IOException {
ensureOpen();
RAMFile file;
@@ -106,6 +109,7 @@
/** Set the modified time of an existing file to now.
* @throws IOException if the file does not exist
*/
+ @Override
public void touchFile(String name) throws IOException {
ensureOpen();
RAMFile file;
@@ -134,6 +138,7 @@
/** Returns the length in bytes of a file in the directory.
* @throws IOException if the file does not exist
*/
+ @Override
public final long fileLength(String name) throws IOException {
ensureOpen();
RAMFile file;
@@ -156,6 +161,7 @@
/** Removes an existing file in the directory.
* @throws IOException if the file does not exist
*/
+ @Override
public synchronized void deleteFile(String name) throws IOException {
ensureOpen();
RAMFile file = fileMap.get(name);
@@ -168,6 +174,7 @@
}
/** Creates a new, empty file in the directory with the given name. Returns a stream writing this file. */
+ @Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
RAMFile file = new RAMFile(this);
@@ -183,6 +190,7 @@
}
/** Returns a stream reading an existing file. */
+ @Override
public IndexInput openInput(String name) throws IOException {
ensureOpen();
RAMFile file;
@@ -195,6 +203,7 @@
}
/** Closes the store to future operations, releasing associated memory. */
+ @Override
public void close() {
isOpen = false;
fileMap = null;
Index: src/java/org/apache/lucene/store/RAMInputStream.java
===================================================================
--- src/java/org/apache/lucene/store/RAMInputStream.java (revision 830378)
+++ src/java/org/apache/lucene/store/RAMInputStream.java (working copy)
@@ -49,14 +49,17 @@
currentBuffer = null;
}
+ @Override
public void close() {
// nothing to do here
}
+ @Override
public long length() {
return length;
}
+ @Override
public byte readByte() throws IOException {
if (bufferPosition >= bufferLength) {
currentBufferIndex++;
@@ -65,6 +68,7 @@
return currentBuffer[bufferPosition++];
}
+ @Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
while (len > 0) {
if (bufferPosition >= bufferLength) {
@@ -100,10 +104,12 @@
}
}
+ @Override
public long getFilePointer() {
return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition;
}
+ @Override
public void seek(long pos) throws IOException {
if (currentBuffer==null || pos < bufferStart || pos >= bufferStart + BUFFER_SIZE) {
currentBufferIndex = (int) (pos / BUFFER_SIZE);
Index: src/java/org/apache/lucene/store/RAMOutputStream.java
===================================================================
--- src/java/org/apache/lucene/store/RAMOutputStream.java (revision 830378)
+++ src/java/org/apache/lucene/store/RAMOutputStream.java (working copy)
@@ -77,10 +77,12 @@
file.setLength(0);
}
+ @Override
public void close() throws IOException {
flush();
}
+ @Override
public void seek(long pos) throws IOException {
// set the file length in case we seek back
// and flush() has not been called yet
@@ -93,10 +95,12 @@
bufferPosition = (int) (pos % BUFFER_SIZE);
}
+ @Override
public long length() {
return file.length;
}
+ @Override
public void writeByte(byte b) throws IOException {
if (bufferPosition == bufferLength) {
currentBufferIndex++;
@@ -105,6 +109,7 @@
currentBuffer[bufferPosition++] = b;
}
+ @Override
public void writeBytes(byte[] b, int offset, int len) throws IOException {
assert b != null;
while (len > 0) {
@@ -140,11 +145,13 @@
}
}
+ @Override
public void flush() throws IOException {
file.setLastModified(System.currentTimeMillis());
setFileLength();
}
+ @Override
public long getFilePointer() {
return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition;
}
Index: src/java/org/apache/lucene/store/SimpleFSDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/SimpleFSDirectory.java (revision 830378)
+++ src/java/org/apache/lucene/store/SimpleFSDirectory.java (working copy)
@@ -50,12 +50,14 @@
}
/** Creates an IndexOutput for the file with the given name. */
+ @Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSIndexOutput(new File(directory, name));
}
/** Creates an IndexInput for the file with the given name. */
+ @Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
@@ -76,6 +78,7 @@
length=length();
}
+ @Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
@@ -96,6 +99,7 @@
}
/** IndexInput methods */
+ @Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
@@ -135,18 +139,22 @@
}
}
+ @Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
+ @Override
protected void seekInternal(long position) {
}
+ @Override
public long length() {
return file.length;
}
+ @Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
@@ -174,9 +182,11 @@
}
/** output methods: */
+ @Override
public void flushBuffer(byte[] b, int offset, int size) throws IOException {
file.write(b, offset, size);
}
+ @Override
public void close() throws IOException {
// only close the file if it has not been closed yet
if (isOpen) {
@@ -199,13 +209,16 @@
}
/** Random-access methods */
+ @Override
public void seek(long pos) throws IOException {
super.seek(pos);
file.seek(pos);
}
+ @Override
public long length() throws IOException {
return file.length();
}
+ @Override
public void setLength(long length) throws IOException {
file.setLength(length);
}
Index: src/java/org/apache/lucene/store/SimpleFSLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/SimpleFSLockFactory.java (revision 830378)
+++ src/java/org/apache/lucene/store/SimpleFSLockFactory.java (working copy)
@@ -80,6 +80,7 @@
setLockDir(new File(lockDirName));
}
+ @Override
public Lock makeLock(String lockName) {
if (lockPrefix != null) {
lockName = lockPrefix + "-" + lockName;
@@ -87,6 +88,7 @@
return new SimpleFSLock(lockDir, lockName);
}
+ @Override
public void clearLock(String lockName) throws IOException {
if (lockDir.exists()) {
if (lockPrefix != null) {
@@ -110,6 +112,7 @@
lockFile = new File(lockDir, lockFileName);
}
+ @Override
public boolean obtain() throws IOException {
// Ensure that lockDir exists and is a directory:
@@ -124,15 +127,18 @@
return lockFile.createNewFile();
}
+ @Override
public void release() throws LockReleaseFailedException {
if (lockFile.exists() && !lockFile.delete())
throw new LockReleaseFailedException("failed to delete " + lockFile);
}
+ @Override
public boolean isLocked() {
return lockFile.exists();
}
+ @Override
public String toString() {
return "SimpleFSLock@" + lockFile;
}
Index: src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/SingleInstanceLockFactory.java (revision 830378)
+++ src/java/org/apache/lucene/store/SingleInstanceLockFactory.java (working copy)
@@ -35,6 +35,7 @@
private HashSet<String> locks = new HashSet<String>();
+ @Override
public Lock makeLock(String lockName) {
// We do not use the LockPrefix at all, because the private
// HashSet instance effectively scopes the locking to this
@@ -42,6 +43,7 @@
return new SingleInstanceLock(locks, lockName);
}
+ @Override
public void clearLock(String lockName) throws IOException {
synchronized(locks) {
if (locks.contains(lockName)) {
@@ -61,24 +63,28 @@
this.lockName = lockName;
}
+ @Override
public boolean obtain() throws IOException {
synchronized(locks) {
return locks.add(lockName);
}
}
+ @Override
public void release() {
synchronized(locks) {
locks.remove(lockName);
}
}
+ @Override
public boolean isLocked() {
synchronized(locks) {
return locks.contains(lockName);
}
}
+ @Override
public String toString() {
return super.toString() + ": " + lockName;
}
Index: src/java/org/apache/lucene/store/VerifyingLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/VerifyingLockFactory.java (revision 830378)
+++ src/java/org/apache/lucene/store/VerifyingLockFactory.java (working copy)
@@ -68,6 +68,7 @@
}
}
+ @Override
public synchronized boolean obtain(long lockWaitTimeout)
throws LockObtainFailedException, IOException {
boolean obtained = lock.obtain(lockWaitTimeout);
@@ -76,15 +77,18 @@
return obtained;
}
+ @Override
public synchronized boolean obtain()
throws LockObtainFailedException, IOException {
return lock.obtain();
}
+ @Override
public synchronized boolean isLocked() {
return lock.isLocked();
}
+ @Override
public synchronized void release() throws IOException {
if (isLocked()) {
verify((byte) 0);
@@ -108,10 +112,12 @@
this.port = port;
}
+ @Override
public synchronized Lock makeLock(String lockName) {
return new CheckedLock(lf.makeLock(lockName));
}
+ @Override
public synchronized void clearLock(String lockName)
throws IOException {
lf.clearLock(lockName);
Index: src/java/org/apache/lucene/util/AttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/util/AttributeImpl.java (revision 830378)
+++ src/java/org/apache/lucene/util/AttributeImpl.java (working copy)
@@ -48,6 +48,7 @@
*
* This method may be overridden by subclasses.
*/
+ @Override
public String toString() {
StringBuilder buffer = new StringBuilder();
Class clazz = this.getClass();
@@ -89,6 +90,7 @@
*
* see also {@link #equals(Object)}
*/
+ @Override
public abstract int hashCode();
/**
@@ -97,6 +99,7 @@
*
* see also {@link Object#equals(Object)}
*/
+ @Override
public abstract boolean equals(Object other);
/**
@@ -110,6 +113,7 @@
* Shallow clone. Subclasses must override this if they
* need to clone any members deeply,
*/
+ @Override
public Object clone() {
Object clone = null;
try {
Index: src/java/org/apache/lucene/util/AttributeSource.java
===================================================================
--- src/java/org/apache/lucene/util/AttributeSource.java (revision 830378)
+++ src/java/org/apache/lucene/util/AttributeSource.java (working copy)
@@ -59,6 +59,7 @@
private DefaultAttributeFactory() {}
+ @Override
public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
try {
return getClassForInterface(attClass).newInstance();
@@ -269,6 +270,7 @@
private AttributeImpl attribute;
private State next;
+ @Override
public Object clone() {
State clone = new State();
clone.attribute = (AttributeImpl) attribute.clone();
@@ -352,6 +354,7 @@
} while (state != null);
}
+ @Override
public int hashCode() {
int code = 0;
if (hasAttributes()) {
@@ -366,6 +369,7 @@
return code;
}
+ @Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
@@ -407,6 +411,7 @@
return false;
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder().append('(');
if (hasAttributes()) {
Index: src/java/org/apache/lucene/util/AverageGuessMemoryModel.java
===================================================================
--- src/java/org/apache/lucene/util/AverageGuessMemoryModel.java (revision 830378)
+++ src/java/org/apache/lucene/util/AverageGuessMemoryModel.java (working copy)
@@ -44,6 +44,7 @@
*
* @see org.apache.lucene.util.MemoryModel#getArraySize()
*/
+ @Override
public int getArraySize() {
return 16;
}
@@ -53,6 +54,7 @@
*
* @see org.apache.lucene.util.MemoryModel#getClassSize()
*/
+ @Override
public int getClassSize() {
return 8;
}
@@ -60,6 +62,7 @@
/* (non-Javadoc)
* @see org.apache.lucene.util.MemoryModel#getPrimitiveSize(java.lang.Class)
*/
+ @Override
public int getPrimitiveSize(Class clazz) {
return sizes.get(clazz).intValue();
}
@@ -67,6 +70,7 @@
/* (non-Javadoc)
* @see org.apache.lucene.util.MemoryModel#getReferenceSize()
*/
+ @Override
public int getReferenceSize() {
return 4;
}
Index: src/java/org/apache/lucene/util/BitVector.java
===================================================================
--- src/java/org/apache/lucene/util/BitVector.java (revision 830378)
+++ src/java/org/apache/lucene/util/BitVector.java (working copy)
@@ -49,6 +49,7 @@
this.size = size;
}
+ @Override
public Object clone() {
byte[] copyBits = new byte[bits.length];
System.arraycopy(bits, 0, copyBits, 0, bits.length);
Index: src/java/org/apache/lucene/util/DocIdBitSet.java
===================================================================
--- src/java/org/apache/lucene/util/DocIdBitSet.java (revision 830378)
+++ src/java/org/apache/lucene/util/DocIdBitSet.java (working copy)
@@ -31,11 +31,13 @@
this.bitSet = bitSet;
}
+ @Override
public DocIdSetIterator iterator() {
return new DocIdBitSetIterator(bitSet);
}
/** This DocIdSet implementation is cacheable. */
+ @Override
public boolean isCacheable() {
return true;
}
@@ -56,10 +58,12 @@
this.docId = -1;
}
+ @Override
public int docID() {
return docId;
}
+ @Override
public int nextDoc() {
// (docId + 1) on next line requires -1 initial value for docNr:
int d = bitSet.nextSetBit(docId + 1);
@@ -68,6 +72,7 @@
return docId;
}
+ @Override
public int advance(int target) {
int d = bitSet.nextSetBit(target);
// -1 returned by BitSet.nextSetBit() when exhausted
Index: src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
===================================================================
--- src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (revision 830378)
+++ src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (working copy)
@@ -297,9 +297,11 @@
this.readerKey = readerKey;
this.fieldName = fieldName;
}
+ @Override
public int hashCode() {
return System.identityHashCode(readerKey) * fieldName.hashCode();
}
+ @Override
public boolean equals(Object that) {
if (! (that instanceof ReaderField)) return false;
@@ -307,6 +309,7 @@
return (this.readerKey == other.readerKey &&
this.fieldName.equals(other.fieldName));
}
+ @Override
public String toString() {
return readerKey.toString() + "+" + fieldName;
}
@@ -352,6 +355,7 @@
* the Type and Msg, followed by each CacheEntry.toString() on it's
* own line prefaced by a tab character
*/
+ @Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append(getType()).append(": ");
@@ -383,6 +387,7 @@
private InsanityType(final String label) {
this.label = label;
}
+ @Override
public String toString() { return label; }
/**
Index: src/java/org/apache/lucene/util/OpenBitSet.java
===================================================================
--- src/java/org/apache/lucene/util/OpenBitSet.java (revision 830378)
+++ src/java/org/apache/lucene/util/OpenBitSet.java (working copy)
@@ -640,6 +640,7 @@
+ @Override
public Object clone() {
try {
OpenBitSet obs = (OpenBitSet)super.clone();
@@ -775,6 +776,7 @@
/** returns true if both sets have the same bits set */
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof OpenBitSet)) return false;
@@ -800,6 +802,7 @@
}
+ @Override
public int hashCode() {
long h = 0x98761234; // something non-zero for length==0
for (int i = bits.length; --i>=0;) {
Index: src/java/org/apache/lucene/util/OpenBitSetIterator.java
===================================================================
--- src/java/org/apache/lucene/util/OpenBitSetIterator.java (revision 830378)
+++ src/java/org/apache/lucene/util/OpenBitSetIterator.java (working copy)
@@ -128,6 +128,7 @@
}
******/
+ @Override
public int nextDoc() {
if (indexArray == 0) {
if (word != 0) {
@@ -155,6 +156,7 @@
return curDocId = (i<<6) + bitIndex;
}
+ @Override
public int advance(int target) {
indexArray = 0;
i = target >> 6;
@@ -185,6 +187,7 @@
return curDocId = (i<<6) + bitIndex;
}
+ @Override
public int docID() {
return curDocId;
}
Index: src/java/org/apache/lucene/util/Parameter.java
===================================================================
--- src/java/org/apache/lucene/util/Parameter.java (revision 830378)
+++ src/java/org/apache/lucene/util/Parameter.java (working copy)
@@ -53,6 +53,7 @@
return getClass() + " " + name;
}
+ @Override
public String toString() {
return name;
}
Index: src/java/org/apache/lucene/util/SortedVIntList.java
===================================================================
--- src/java/org/apache/lucene/util/SortedVIntList.java (revision 830378)
+++ src/java/org/apache/lucene/util/SortedVIntList.java (working copy)
@@ -206,10 +206,12 @@
}
}
+ @Override
public int docID() {
return doc;
}
+ @Override
public int nextDoc() {
if (bytePos >= lastBytePos) {
doc = NO_MORE_DOCS;
@@ -220,6 +222,7 @@
return doc;
}
+ @Override
public int advance(int target) {
while (bytePos < lastBytePos) {
advance();