blob: 6a1206ee474bd37fee5dee8f6e973a4cb928d43f [file] [log] [blame]
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (revision 724107)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (working copy)
@@ -18,6 +18,8 @@
*/
import org.apache.lucene.store.Directory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
@@ -34,6 +36,8 @@
public class ConcurrentMergeScheduler extends MergeScheduler {
+ private static final Logger logger = LoggerFactory.getLogger(ConcurrentMergeScheduler.class);
+
private int mergeThreadPriority = -1;
protected List mergeThreads = new ArrayList();
@@ -94,15 +98,6 @@
}
}
- private boolean verbose() {
- return writer != null && writer.verbose();
- }
-
- private void message(String message) {
- if (verbose())
- writer.message("CMS: " + message);
- }
-
private synchronized void initMergeThreadPriority() {
if (mergeThreadPriority == -1) {
// Default to slightly higher priority than our
@@ -119,12 +114,13 @@
public synchronized void sync() {
while(mergeThreadCount() > 0) {
- if (verbose())
- message("now wait for threads; currently " + mergeThreads.size() + " still running");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now wait for threads; currently " + mergeThreads.size() + " still running");
+ }
final int count = mergeThreads.size();
- if (verbose()) {
+ if (logger.isDebugEnabled()) {
for(int i=0;i<count;i++)
- message(" " + i + ": " + ((MergeThread) mergeThreads.get(i)));
+ logger.debug(" " + i + ": " + ((MergeThread) mergeThreads.get(i)));
}
try {
@@ -161,9 +157,9 @@
// these newly proposed merges will likely already be
// registered.
- if (verbose()) {
- message("now merge");
- message(" index: " + writer.segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("now merge");
+ logger.debug(" index: " + writer.segString());
}
// Iterate, pulling from the IndexWriter's queue of
@@ -176,8 +172,9 @@
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null) {
- if (verbose())
- message(" no more merges pending; now return");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" no more merges pending; now return");
+ }
return;
}
@@ -187,8 +184,9 @@
synchronized(this) {
while (mergeThreadCount() >= maxThreadCount) {
- if (verbose())
- message(" too many merge threads running; stalling...");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" too many merge threads running; stalling...");
+ }
try {
wait();
} catch (InterruptedException ie) {
@@ -196,8 +194,9 @@
}
}
- if (verbose())
- message(" consider merge " + merge.segString(dir));
+ if (logger.isDebugEnabled()) {
+ logger.debug(" consider merge " + merge.segString(dir));
+ }
assert mergeThreadCount() < maxThreadCount;
@@ -205,8 +204,9 @@
// merge:
final MergeThread merger = getMergeThread(writer, merge);
mergeThreads.add(merger);
- if (verbose())
- message(" launch new thread [" + merger.getName() + "]");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" launch new thread [" + merger.getName() + "]");
+ }
merger.start();
}
}
@@ -266,8 +266,9 @@
try {
- if (verbose())
- message(" merge thread: start");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" merge thread: start");
+ }
while(true) {
setRunningMerge(merge);
@@ -278,14 +279,16 @@
merge = writer.getNextMerge();
if (merge != null) {
writer.mergeInit(merge);
- if (verbose())
- message(" merge thread: do another merge " + merge.segString(dir));
+ if (logger.isDebugEnabled()) {
+ logger.debug(" merge thread: do another merge " + merge.segString(dir));
+ }
} else
break;
}
- if (verbose())
- message(" merge thread: done");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" merge thread: done");
+ }
} catch (Throwable exc) {
Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (revision 724107)
+++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (working copy)
@@ -23,6 +23,8 @@
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Gathers all Fieldables for a document under the same
@@ -35,6 +37,8 @@
final class DocFieldProcessorPerThread extends DocConsumerPerThread {
+ private static final Logger logger = LoggerFactory.getLogger(DocFieldProcessorPerThread.class);
+
float docBoost;
int fieldGen;
final DocFieldProcessor docFieldProcessor;
@@ -106,8 +110,9 @@
else
lastPerField.next = perField.next;
- if (state.docWriter.infoStream != null)
- state.docWriter.infoStream.println(" purge field=" + perField.fieldInfo.name);
+ if (logger.isDebugEnabled()) {
+ logger.debug(" purge field=" + perField.fieldInfo.name);
+ }
totalFieldCount--;
@@ -233,8 +238,9 @@
for(int i=0;i<fieldCount;i++)
fields[i].consumer.processFields(fields[i].fields, fields[i].fieldCount);
- if (docState.maxTermPrefix != null && docState.infoStream != null)
- docState.infoStream.println("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped. Please correct the analyzer to not produce such terms. The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
+ if (docState.maxTermPrefix != null && logger.isDebugEnabled()) {
+ logger.debug("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped. Please correct the analyzer to not produce such terms. The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
+ }
return consumer.finishDocument();
}
Index: src/java/org/apache/lucene/index/DocInverterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerField.java (revision 724107)
+++ src/java/org/apache/lucene/index/DocInverterPerField.java (working copy)
@@ -19,11 +19,14 @@
import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.document.Fieldable;
+
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.Fieldable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Holds state for inverting all occurrences of a single
@@ -36,6 +39,8 @@
final class DocInverterPerField extends DocFieldConsumerPerField {
+ private static final Logger logger = LoggerFactory.getLogger(DocInverterPerField.class);
+
final private DocInverterPerThread perThread;
final private FieldInfo fieldInfo;
final InvertedDocConsumerPerField consumer;
@@ -183,8 +188,9 @@
fieldState.position++;
offsetEnd = fieldState.offset + offsetAttribute.endOffset();
if (++fieldState.length >= maxFieldLength) {
- if (docState.infoStream != null)
- docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
+ if (logger.isDebugEnabled()) {
+ logger.debug("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
+ }
break;
}
}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 724107)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy)
@@ -17,28 +17,29 @@
* limitations under the License.
*/
+import java.io.IOException;
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Query;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
-import org.apache.lucene.store.Directory;
import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ArrayUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.ArrayList;
-import java.util.Map.Entry;
-import java.text.NumberFormat;
-
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
@@ -109,6 +110,8 @@
final class DocumentsWriter {
+ private static final Logger logger = LoggerFactory.getLogger(DocumentsWriter.class);
+
IndexWriter writer;
Directory directory;
@@ -134,7 +137,6 @@
private DocFieldProcessor docFieldProcessor;
- PrintStream infoStream;
int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
Similarity similarity;
@@ -144,7 +146,6 @@
DocumentsWriter docWriter;
Analyzer analyzer;
int maxFieldLength;
- PrintStream infoStream;
Similarity similarity;
int docID;
Document doc;
@@ -169,7 +170,7 @@
void setNext(DocWriter next) {
this.next = next;
}
- };
+ }
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
@@ -278,14 +279,6 @@
: true;
}
- /** If non-null, various details of indexing are printed
- * here. */
- synchronized void setInfoStream(PrintStream infoStream) {
- this.infoStream = infoStream;
- for(int i=0;i<threadStates.length;i++)
- threadStates[i].docState.infoStream = infoStream;
- }
-
synchronized void setMaxFieldLength(int maxFieldLength) {
this.maxFieldLength = maxFieldLength;
for(int i=0;i<threadStates.length;i++)
@@ -361,8 +354,9 @@
assert allThreadsIdle();
- if (infoStream != null)
- message("closeDocStore: " + openFiles.size() + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
+ if (logger.isDebugEnabled()) {
+ logger.debug("closeDocStore: " + openFiles.size() + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
+ }
boolean success = false;
@@ -394,11 +388,6 @@
return abortedFiles;
}
- void message(String message) {
- if (infoStream != null)
- writer.message("DW: " + message);
- }
-
final List openFiles = new ArrayList();
final List closedFiles = new ArrayList();
@@ -434,9 +423,10 @@
synchronized void abort() throws IOException {
try {
- if (infoStream != null)
- message("docWriter: now abort");
-
+ if (logger.isDebugEnabled()) {
+ logger.debug("docWriter: now abort");
+ }
+
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
@@ -551,8 +541,9 @@
docStoreOffset = numDocsInStore;
- if (infoStream != null)
- message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
+ if (logger.isDebugEnabled()) {
+ logger.debug("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
+ }
boolean success = false;
@@ -570,13 +561,12 @@
threads.add(threadStates[i].consumer);
consumer.flush(threads, flushState);
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
final long newSegmentSize = segmentSize(flushState.segmentName);
- String message = " oldRAMSize=" + numBytesUsed +
+ logger.debug(" oldRAMSize=" + numBytesUsed +
" newFlushedSize=" + newSegmentSize +
" docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
- " new/old=" + nf.format(100.0*newSegmentSize/numBytesUsed) + "%";
- message(message);
+ " new/old=" + nf.format(100.0*newSegmentSize/numBytesUsed) + "%");
}
flushedDocCount += flushState.numDocs;
@@ -898,12 +888,13 @@
if (!hasDeletes())
return false;
- if (infoStream != null)
- message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
+ if (logger.isDebugEnabled()) {
+ logger.debug("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
deletesFlushed.docIDs.size() + " deleted docIDs and " +
deletesFlushed.queries.size() + " deleted queries on " +
+ infos.size() + " segments.");
-
+ }
+
final int infosEnd = infos.size();
int docStart = 0;
@@ -1108,8 +1099,8 @@
// TODO FI: this is not flexible -- we can't hardwire
// extensions in here:
private long segmentSize(String segmentName) throws IOException {
- // Used only when infoStream != null
- assert infoStream != null;
+ // Used only when debug is enabled
+ assert logger.isDebugEnabled();
long size = directory.fileLength(segmentName + ".tii") +
directory.fileLength(segmentName + ".tis") +
@@ -1275,14 +1266,15 @@
if (numBytesAlloc > freeTrigger) {
- if (infoStream != null)
- message(" RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) +
+ if (logger.isDebugEnabled()) {
+ logger.debug(" RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) +
" vs trigger=" + toMB(flushTrigger) +
" allocMB=" + toMB(numBytesAlloc) +
" vs trigger=" + toMB(freeTrigger) +
" byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) +
" charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*CHAR_NUM_BYTE));
-
+ }
+
final long startBytesAlloc = numBytesAlloc;
int iter = 0;
@@ -1299,11 +1291,11 @@
if (0 == byteBlockAllocator.freeByteBlocks.size() && 0 == freeCharBlocks.size() && 0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
bufferIsFull = numBytesUsed > flushTrigger;
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
if (numBytesUsed > flushTrigger)
- message(" nothing to free; now set bufferIsFull");
+ logger.debug(" nothing to free; now set bufferIsFull");
else
- message(" nothing to free");
+ logger.debug(" nothing to free");
}
assert numBytesUsed <= numBytesAlloc;
break;
@@ -1332,9 +1324,9 @@
iter++;
}
- if (infoStream != null)
- message(" after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc)/1024./1024.) + " usedMB=" + nf.format(numBytesUsed/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.));
-
+ if (logger.isDebugEnabled()) {
+ logger.debug(" after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc)/1024./1024.) + " usedMB=" + nf.format(numBytesUsed/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.));
+ }
} else {
// If we have not crossed the 100% mark, but have
// crossed the 95% mark of RAM we are actually
@@ -1344,11 +1336,11 @@
synchronized(this) {
if (numBytesUsed > flushTrigger) {
- if (infoStream != null)
- message(" RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
+ if (logger.isDebugEnabled()) {
+ logger.debug(" RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
" allocMB=" + nf.format(numBytesAlloc/1024./1024.) +
" triggerMB=" + nf.format(flushTrigger/1024./1024.));
-
+ }
bufferIsFull = true;
}
}
Index: src/java/org/apache/lucene/index/DocumentsWriterThreadState.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriterThreadState.java (revision 724107)
+++ src/java/org/apache/lucene/index/DocumentsWriterThreadState.java (working copy)
@@ -37,7 +37,6 @@
this.docWriter = docWriter;
docState = new DocumentsWriter.DocState();
docState.maxFieldLength = docWriter.maxFieldLength;
- docState.infoStream = docWriter.infoStream;
docState.similarity = docWriter.similarity;
docState.docWriter = docWriter;
consumer = docWriter.consumer.addThread(this);
Index: src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 724107)
+++ src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy)
@@ -18,6 +18,8 @@
*/
import org.apache.lucene.store.Directory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.FileNotFoundException;
@@ -76,6 +78,8 @@
final class IndexFileDeleter {
+ private static final Logger logger = LoggerFactory.getLogger(IndexFileDeleter.class);
+
/* Files that we tried to delete but failed (likely
* because they are open and we are running on Windows),
* so we will retry them again later: */
@@ -100,7 +104,6 @@
/* Commits that the IndexDeletionPolicy have decided to delete: */
private List commitsToDelete = new ArrayList();
- private PrintStream infoStream;
private Directory directory;
private IndexDeletionPolicy policy;
private DocumentsWriter docWriter;
@@ -111,17 +114,29 @@
* infoStream != null */
public static boolean VERBOSE_REF_COUNTS = false;
- void setInfoStream(PrintStream infoStream) {
- this.infoStream = infoStream;
- if (infoStream != null)
- message("setInfoStream deletionPolicy=" + policy);
- }
-
private void message(String message) {
- infoStream.println("IFD [" + Thread.currentThread().getName() + "]: " + message);
+ logger.debug("IFD [" + Thread.currentThread().getName() + "]: " + message);
}
/**
+ * Initialize the deleter: find all previous commits in the Directory, incref
+ * the files they reference, call the policy to let it delete commits. This
+ * will remove any files not referenced by any of the commits.
+ *
+ * @throws CorruptIndexException
+ * if the index is corrupt
+ * @throws IOException
+ * if there is a low-level IO error
+ * @deprecated use
+ * {@link #IndexFileDeleter(Directory, IndexDeletionPolicy, SegmentInfos, DocumentsWriter)}
+ * instead since infoStream is no longer in use.
+ */
+ public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
+ throws CorruptIndexException, IOException {
+ this(directory, policy, segmentInfos, docWriter);
+ }
+
+ /**
* Initialize the deleter: find all previous commits in
* the Directory, incref the files they reference, call
* the policy to let it delete commits. This will remove
@@ -129,15 +144,15 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
+ public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, DocumentsWriter docWriter)
throws CorruptIndexException, IOException {
this.docWriter = docWriter;
- this.infoStream = infoStream;
- if (infoStream != null)
+ if (logger.isDebugEnabled()) {
message("init: current segments file is \"" + segmentInfos.getCurrentSegmentFileName() + "\"; deletionPolicy=" + policy);
-
+ }
+
this.policy = policy;
this.directory = directory;
@@ -165,7 +180,7 @@
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen) {
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("init: load commit \"" + fileName + "\"");
}
SegmentInfos sis = new SegmentInfos();
@@ -179,7 +194,7 @@
// file segments_X exists when in fact it
// doesn't. So, we catch this and handle it
// as if the file does not exist
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point");
}
sis = null;
@@ -211,8 +226,9 @@
} catch (IOException e) {
throw new CorruptIndexException("failed to locate current segments_N file");
}
- if (infoStream != null)
+ if (logger.isDebugEnabled()) {
message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
+ }
currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
commits.add(currentCommitPoint);
incRef(sis, true);
@@ -229,7 +245,7 @@
String fileName = (String) it.next();
RefCount rc = (RefCount) refCounts.get(fileName);
if (0 == rc.count) {
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("init: removing unreferenced file \"" + fileName + "\"");
}
deleteFile(fileName);
@@ -263,7 +279,7 @@
// the now-deleted commits:
for(int i=0;i<size;i++) {
CommitPoint commit = (CommitPoint) commitsToDelete.get(i);
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("deleteCommits: now decRef commit \"" + commit.getSegmentsFileName() + "\"");
}
int size2 = commit.files.size();
@@ -323,7 +339,7 @@
!refCounts.containsKey(fileName) &&
!fileName.equals(IndexFileNames.SEGMENTS_GEN)) {
// Unreferenced file, so remove it
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("refresh [prefix=" + segmentName + "]: removing newly created unreferenced file \"" + fileName + "\"");
}
deleteFile(fileName);
@@ -353,8 +369,9 @@
deletable = null;
int size = oldDeletable.size();
for(int i=0;i<size;i++) {
- if (infoStream != null)
+ if (logger.isDebugEnabled()) {
message("delete pending file " + oldDeletable.get(i));
+ }
deleteFile((String) oldDeletable.get(i));
}
}
@@ -382,7 +399,7 @@
*/
public void checkpoint(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [" + segmentInfos.size() + " segments " + "; isCommit = " + isCommit + "]");
}
@@ -457,7 +474,7 @@
for(int i=0;i<size;i++) {
String fileName = (String) files.get(i);
RefCount rc = getRefCount(fileName);
- if (infoStream != null && VERBOSE_REF_COUNTS) {
+ if (VERBOSE_REF_COUNTS && logger.isDebugEnabled()) {
message(" IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
}
rc.IncRef();
@@ -473,7 +490,7 @@
void decRef(String fileName) throws IOException {
RefCount rc = getRefCount(fileName);
- if (infoStream != null && VERBOSE_REF_COUNTS) {
+ if (VERBOSE_REF_COUNTS && logger.isDebugEnabled()) {
message(" DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
}
if (0 == rc.DecRef()) {
@@ -525,7 +542,7 @@
void deleteFile(String fileName)
throws IOException {
try {
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("delete \"" + fileName + "\"");
}
directory.deleteFile(fileName);
@@ -539,7 +556,7 @@
// the file is open in another process, and queue
// the file for subsequent deletion.
- if (infoStream != null) {
+ if (logger.isDebugEnabled()) {
message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.toString() + "; Will re-try later.");
}
if (deletable == null) {
Index: src/java/org/apache/lucene/index/IndexModifier.java
===================================================================
--- src/java/org/apache/lucene/index/IndexModifier.java (revision 724107)
+++ src/java/org/apache/lucene/index/IndexModifier.java (working copy)
@@ -17,16 +17,15 @@
* limitations under the License.
*/
+import java.io.File;
+import java.io.IOException;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-
/**
* <p>[Note that as of <b>2.1</b>, all but one of the
* methods in this class are available via {@link
@@ -100,7 +99,6 @@
protected boolean open = false;
// Lucene defaults:
- protected PrintStream infoStream = null;
protected boolean useCompoundFile = true;
protected int maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
protected int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
@@ -205,7 +203,6 @@
// because it synchronizes on the directory which can
// cause deadlock
indexWriter.setMergeScheduler(new SerialMergeScheduler());
- indexWriter.setInfoStream(infoStream);
indexWriter.setUseCompoundFile(useCompoundFile);
if (maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH)
indexWriter.setMaxBufferedDocs(maxBufferedDocs);
@@ -382,15 +379,10 @@
* @see IndexWriter#setInfoStream(PrintStream)
* @throws IllegalStateException if the index is closed
*/
- public void setInfoStream(PrintStream infoStream) {
- synchronized(directory) {
- assureOpen();
- if (indexWriter != null) {
- indexWriter.setInfoStream(infoStream);
- }
- this.infoStream = infoStream;
- }
- }
+ // TODO: reenstate and deprecate
+// public void setInfoStream(PrintStream infoStream) {
+// // Do nothing. Method is deprecated and should use a logging framework.
+// }
/**
* @see IndexModifier#setInfoStream(PrintStream)
@@ -400,13 +392,11 @@
* be obtained)
* @throws IOException if there is a low-level IO error
*/
- public PrintStream getInfoStream() throws CorruptIndexException, LockObtainFailedException, IOException {
- synchronized(directory) {
- assureOpen();
- createIndexWriter();
- return indexWriter.getInfoStream();
- }
- }
+ // TODO: reenstate and deprecate
+// public PrintStream getInfoStream() throws CorruptIndexException, LockObtainFailedException, IOException {
+// // Method is deprecated and should use a logging framework.
+// return null;
+// }
/**
* Setting to turn on usage of a compound file. When on, multiple files
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 724107)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -29,6 +29,8 @@
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.BitVector;
import org.apache.lucene.util.Constants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@@ -220,6 +222,8 @@
*/
public class IndexWriter {
+ private static final Logger logger = LoggerFactory.getLogger(IndexWriter.class);
+
/**
* Default value for the write lock timeout (1,000).
* @see #setDefaultWriteLockTimeout
@@ -308,9 +312,6 @@
private final static int MERGE_READ_BUFFER_SIZE = 4096;
// Used for printing messages
- private static Object MESSAGE_ID_LOCK = new Object();
- private static int MESSAGE_ID = 0;
- private int messageID = -1;
volatile private boolean hitOOM;
private Directory directory; // where this index resides
@@ -416,24 +417,16 @@
}
/**
- * Prints a message to the infoStream (if non-null),
- * prefixed with the identifying information for this
- * writer and the thread that's calling it.
+ * Prints a message to the infoStream (if non-null), prefixed with the
+ * identifying information for this writer and the thread that's calling it.
+ *
+ * @deprecated we use SLF4J to output logging messages. You can use Java's
+ * built-in logging by enabling logging for this class, or bind
+ * another package. Read more in http://www.slf4j.org/docs.html.
*/
public void message(String message) {
- if (infoStream != null)
- infoStream.println("IW " + messageID + " [" + Thread.currentThread().getName() + "]: " + message);
}
- private synchronized void setMessageID(PrintStream infoStream) {
- if (infoStream != null && messageID == -1) {
- synchronized(MESSAGE_ID_LOCK) {
- messageID = MESSAGE_ID++;
- }
- }
- this.infoStream = infoStream;
- }
-
/**
* Casts current mergePolicy to LogMergePolicy, and throws
* an exception if the mergePolicy is not a LogMergePolicy.
@@ -1137,7 +1130,6 @@
this.closeDir = closeDir;
directory = d;
analyzer = a;
- setMessageID(defaultInfoStream);
this.maxFieldLength = maxFieldLength;
if (indexingChain == null)
@@ -1181,8 +1173,9 @@
oldInfos.read(directory, commit.getSegmentsFileName());
segmentInfos.replace(oldInfos);
changeCount++;
- if (infoStream != null)
- message("init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
+ if (logger.isDebugEnabled()) {
+ logger.debug("init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
+ }
}
// We assume that this segments_N was previously
@@ -1199,14 +1192,13 @@
setRollbackSegmentInfos(segmentInfos);
docWriter = new DocumentsWriter(directory, this, indexingChain);
- docWriter.setInfoStream(infoStream);
docWriter.setMaxFieldLength(maxFieldLength);
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
deleter = new IndexFileDeleter(directory,
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
- segmentInfos, infoStream, docWriter);
+ segmentInfos, docWriter);
if (deleter.startingCommitDeleted)
// Deletion policy deleted the "head" commit point.
@@ -1217,9 +1209,9 @@
pushMaxBufferedDocs();
- if (infoStream != null) {
- message("init: create=" + create);
- messageState();
+ if (logger.isDebugEnabled()) {
+ logger.debug("init: create=" + create);
+ logger.debug(messageState());
}
} catch (IOException e) {
@@ -1250,8 +1242,9 @@
mergePolicy.close();
mergePolicy = mp;
pushMaxBufferedDocs();
- if (infoStream != null)
- message("setMergePolicy " + mp);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setMergePolicy " + mp);
+ }
}
/**
@@ -1276,8 +1269,9 @@
this.mergeScheduler.close();
}
this.mergeScheduler = mergeScheduler;
- if (infoStream != null)
- message("setMergeScheduler " + mergeScheduler);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setMergeScheduler " + mergeScheduler);
+ }
}
/**
@@ -1347,8 +1341,9 @@
ensureOpen();
this.maxFieldLength = maxFieldLength;
docWriter.setMaxFieldLength(maxFieldLength);
- if (infoStream != null)
- message("setMaxFieldLength " + maxFieldLength);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setMaxFieldLength " + maxFieldLength);
+ }
}
/**
@@ -1391,8 +1386,9 @@
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
docWriter.setMaxBufferedDocs(maxBufferedDocs);
pushMaxBufferedDocs();
- if (infoStream != null)
- message("setMaxBufferedDocs " + maxBufferedDocs);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setMaxBufferedDocs " + maxBufferedDocs);
+ }
}
/**
@@ -1407,8 +1403,9 @@
LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
final int maxBufferedDocs = docWriter.getMaxBufferedDocs();
if (lmp.getMinMergeDocs() != maxBufferedDocs) {
- if (infoStream != null)
- message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
+ }
lmp.setMinMergeDocs(maxBufferedDocs);
}
}
@@ -1452,8 +1449,9 @@
throw new IllegalArgumentException(
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
docWriter.setRAMBufferSizeMB(mb);
- if (infoStream != null)
- message("setRAMBufferSizeMB " + mb);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setRAMBufferSizeMB " + mb);
+ }
}
/**
@@ -1482,8 +1480,9 @@
throw new IllegalArgumentException(
"maxBufferedDeleteTerms must at least be 1 when enabled");
docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
- if (infoStream != null)
- message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
+ if (logger.isDebugEnabled()) {
+ logger.debug("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
+ }
}
/**
@@ -1556,38 +1555,36 @@
maxSyncPauseSeconds = seconds;
}
- /** If non-null, this will be the default infoStream used
- * by a newly instantiated IndexWriter.
- * @see #setInfoStream
+ /**
+ * If non-null, this will be the default infoStream used by a newly
+ * instantiated IndexWriter.
+ *
+ * @deprecated this method does nothing. We use SLF4J for logging messages.
*/
public static void setDefaultInfoStream(PrintStream infoStream) {
- IndexWriter.defaultInfoStream = infoStream;
+ // Do nothing.
}
/**
* Returns the current default infoStream for newly
* instantiated IndexWriters.
- * @see #setDefaultInfoStream
+ * @deprecated always returns null. We now use SLF4J for logging messages.
*/
public static PrintStream getDefaultInfoStream() {
- return IndexWriter.defaultInfoStream;
+ return null;
}
/** If non-null, information about merges, deletes and a
* message when maxFieldLength is reached will be printed
* to this.
+ * @deprecated this method does nothing. We use SLF4J for logging messages.
*/
public void setInfoStream(PrintStream infoStream) {
- ensureOpen();
- setMessageID(infoStream);
- docWriter.setInfoStream(infoStream);
- deleter.setInfoStream(infoStream);
- if (infoStream != null)
- messageState();
+ // Do nothing
}
- private void messageState() {
- message("setInfoStream: dir=" + directory +
+ private String messageState() {
+ return "setInfoStream: dir=" + directory +
" autoCommit=" + autoCommit +
" mergePolicy=" + mergePolicy +
" mergeScheduler=" + mergeScheduler +
@@ -1595,23 +1592,17 @@
" maxBufferedDocs=" + docWriter.getMaxBufferedDocs() +
" maxBuffereDeleteTerms=" + docWriter.getMaxBufferedDeleteTerms() +
" maxFieldLength=" + maxFieldLength +
- " index=" + segString());
+ " index=" + segString();
}
/**
* Returns the current infoStream in use by this writer.
- * @see #setInfoStream
+ * @deprecated always returns null. We now use SLF4J for logging messages.
*/
public PrintStream getInfoStream() {
- ensureOpen();
- return infoStream;
+ return null;
}
- /** Returns true if verbosing is enabled (i.e., infoStream != null). */
- public boolean verbose() {
- return infoStream != null;
- }
-
/**
* Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter. @see
* @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter.
@@ -1748,9 +1739,13 @@
docWriter.pauseAllThreads();
try {
- if (infoStream != null)
- message("now flush at close");
-
+ if (logger.isDebugEnabled()) {
+ logger.debug("now flush at close");
+ }
+
+ // Required for TestIndexWriter.testOutOfMemoryErrorCausesCloseToFail().
+ testPoint("now flush at close");
+
docWriter.close();
// Only allow a new merge to be triggered if we are
@@ -1768,13 +1763,15 @@
mergeScheduler.close();
- if (infoStream != null)
- message("now call final commit()");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now call final commit()");
+ }
commit(0);
- if (infoStream != null)
- message("at close: " + segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("at close: " + segString());
+ }
synchronized(this) {
docWriter = null;
@@ -1801,8 +1798,9 @@
if (!closed) {
if (docWriter != null)
docWriter.resumeAllThreads();
- if (infoStream != null)
- message("hit exception while closing");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception while closing");
+ }
}
}
}
@@ -1823,8 +1821,8 @@
docStoreSegment = docWriter.closeDocStore();
success = true;
} finally {
- if (!success && infoStream != null) {
- message("hit exception closing doc store segment");
+ if (!success && logger.isDebugEnabled()) {
+ logger.debug("hit exception closing doc store segment");
}
}
@@ -1850,8 +1848,9 @@
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception building compound file doc store for segment " + docStoreSegment);
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception building compound file doc store for segment " + docStoreSegment);
+ }
deleter.deleteFile(compoundFileName);
}
}
@@ -2046,8 +2045,9 @@
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception adding document");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception adding document");
+ }
synchronized (this) {
// If docWriter has some aborted files that were
@@ -2206,8 +2206,9 @@
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception updating document");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception updating document");
+ }
synchronized (this) {
// If docWriter has some aborted files that were
@@ -2269,11 +2270,6 @@
}
}
- /** If non-null, information about merges will be printed to this.
- */
- private PrintStream infoStream = null;
- private static PrintStream defaultInfoStream = null;
-
/**
* Requests an "optimize" operation on an index, priming the index
* for the fastest available search. Traditionally this has meant
@@ -2404,8 +2400,9 @@
if (maxNumSegments < 1)
throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
- if (infoStream != null)
- message("optimize: index now " + segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("optimize: index now " + segString());
+ }
flush(true, false, true);
@@ -2503,8 +2500,9 @@
throws CorruptIndexException, IOException {
ensureOpen();
- if (infoStream != null)
- message("expungeDeletes: index now " + segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("expungeDeletes: index now " + segString());
+ }
MergePolicy.MergeSpecification spec;
@@ -2689,8 +2687,9 @@
boolean success = false;
try {
- if (infoStream != null)
- message("now start transaction");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now start transaction");
+ }
assert docWriter.getNumBufferedDeleteTerms() == 0 :
"calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.getNumBufferedDeleteTerms();
@@ -2728,8 +2727,9 @@
if (localAutoCommit) {
- if (infoStream != null)
- message("flush at startTransaction");
+ if (logger.isDebugEnabled()) {
+ logger.debug("flush at startTransaction");
+ }
flush(true, false, false);
@@ -2753,8 +2753,9 @@
*/
private synchronized void rollbackTransaction() throws IOException {
- if (infoStream != null)
- message("now rollback transaction");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now rollback transaction");
+ }
// First restore autoCommit in case we hit an exception below:
autoCommit = localAutoCommit;
@@ -2806,8 +2807,9 @@
*/
private synchronized void commitTransaction() throws IOException {
- if (infoStream != null)
- message("now commit transaction");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now commit transaction");
+ }
// First restore autoCommit in case we hit an exception below:
autoCommit = localAutoCommit;
@@ -2822,8 +2824,9 @@
success = true;
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception committing transaction");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception committing transaction");
+ }
rollbackTransaction();
}
}
@@ -2924,8 +2927,9 @@
docWriter.resumeAllThreads();
closing = false;
notifyAll();
- if (infoStream != null)
- message("hit exception during rollback");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception during rollback");
+ }
}
}
}
@@ -2942,8 +2946,9 @@
Iterator it = pendingMerges.iterator();
while(it.hasNext()) {
final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
- if (infoStream != null)
- message("now abort pending merge " + merge.segString(directory));
+ if (logger.isDebugEnabled()) {
+ logger.debug("now abort pending merge " + merge.segString(directory));
+ }
merge.abort();
mergeFinish(merge);
}
@@ -2952,8 +2957,9 @@
it = runningMerges.iterator();
while(it.hasNext()) {
final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
- if (infoStream != null)
- message("now abort running merge " + merge.segString(directory));
+ if (logger.isDebugEnabled()) {
+ logger.debug("now abort running merge " + merge.segString(directory));
+ }
merge.abort();
}
@@ -2969,8 +2975,9 @@
// because the merge threads periodically check if
// they are aborted.
while(runningMerges.size() > 0) {
- if (infoStream != null)
- message("now wait for " + runningMerges.size() + " running merge to abort");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now wait for " + runningMerges.size() + " running merge to abort");
+ }
doWait();
}
@@ -2979,9 +2986,9 @@
assert 0 == mergingSegments.size();
- if (infoStream != null)
- message("all running merges have aborted");
-
+ if (logger.isDebugEnabled()) {
+ logger.debug("all running merges have aborted");
+ }
} else {
// Ensure any running addIndexes finishes. It's fine
// if a new one attempts to start because from our
@@ -3056,8 +3063,9 @@
try {
- if (infoStream != null)
- message("flush at addIndexes");
+ if (logger.isDebugEnabled()) {
+ logger.debug("flush at addIndexes");
+ }
flush(true, false, true);
boolean success = false;
@@ -3179,8 +3187,9 @@
docWriter.pauseAllThreads();
try {
- if (infoStream != null)
- message("flush at addIndexesNoOptimize");
+ if (logger.isDebugEnabled()) {
+ logger.debug("flush at addIndexesNoOptimize");
+ }
flush(true, false, true);
boolean success = false;
@@ -3438,8 +3447,9 @@
}
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception in addIndexes during merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception in addIndexes during merge");
+ }
rollbackTransaction();
} else {
commitTransaction();
@@ -3480,8 +3490,9 @@
deleter.decRef(files);
if (!success) {
- if (infoStream != null)
- message("hit exception building compound file in addIndexes during merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception building compound file in addIndexes during merge");
+ }
rollbackTransaction();
} else {
@@ -3587,8 +3598,9 @@
if (!autoCommit && pendingCommit != null)
throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
- if (infoStream != null)
- message("prepareCommit: flush");
+ if (logger.isDebugEnabled()) {
+ logger.debug("prepareCommit: flush");
+ }
flush(true, true, true);
@@ -3664,15 +3676,18 @@
waitForCommit();
try {
- if (infoStream != null)
- message("commit: start");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit: start");
+ }
if (autoCommit || pendingCommit == null) {
- if (infoStream != null)
- message("commit: now prepare");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit: now prepare");
+ }
prepareCommit(commitUserData, true);
- } else if (infoStream != null)
- message("commit: already prepared");
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("commit: already prepared");
+ }
finishCommit();
} finally {
@@ -3684,11 +3699,13 @@
if (pendingCommit != null) {
try {
- if (infoStream != null)
- message("commit: pendingCommit != null");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit: pendingCommit != null");
+ }
pendingCommit.finishCommit(directory);
- if (infoStream != null)
- message("commit: wrote segments file \"" + pendingCommit.getCurrentSegmentFileName() + "\"");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit: wrote segments file \"" + pendingCommit.getCurrentSegmentFileName() + "\"");
+ }
lastCommitChangeCount = pendingCommitChangeCount;
segmentInfos.updateGeneration(pendingCommit);
segmentInfos.setUserData(pendingCommit.getUserData());
@@ -3700,11 +3717,13 @@
notifyAll();
}
- } else if (infoStream != null)
- message("commit: pendingCommit == null; skip");
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("commit: pendingCommit == null; skip");
+ }
- if (infoStream != null)
- message("commit: done");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit: done");
+ }
}
/**
@@ -3776,8 +3795,8 @@
boolean docStoreIsCompoundFile = false;
- if (infoStream != null) {
- message(" flush: segment=" + docWriter.getSegment() +
+ if (logger.isDebugEnabled()) {
+ logger.debug(" flush: segment=" + docWriter.getSegment() +
" docStoreSegment=" + docWriter.getDocStoreSegment() +
" docStoreOffset=" + docStoreOffset +
" flushDocs=" + flushDocs +
@@ -3785,7 +3804,7 @@
" flushDocStores=" + flushDocStores +
" numDocs=" + numDocs +
" numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
- message(" index before flush " + segString());
+ logger.debug(" index before flush " + segString());
}
// Check if the doc stores must be separately flushed
@@ -3793,8 +3812,9 @@
// to flush, reference it
if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
// We must separately flush the doc store
- if (infoStream != null)
- message(" flush shared docStore segment " + docStoreSegment);
+ if (logger.isDebugEnabled()) {
+ logger.debug(" flush shared docStore segment " + docStoreSegment);
+ }
docStoreIsCompoundFile = flushDocStores();
flushDocStores = false;
@@ -3815,8 +3835,9 @@
success = true;
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception flushing segment " + segment);
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception flushing segment " + segment);
+ }
deleter.refresh(segment);
}
}
@@ -3866,8 +3887,9 @@
success = true;
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception creating compound file for newly flushed segment " + segment);
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception creating compound file for newly flushed segment " + segment);
+ }
deleter.deleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
}
}
@@ -3942,8 +3964,9 @@
final SegmentInfos sourceSegmentsClone = merge.segmentsClone;
final SegmentInfos sourceSegments = merge.segments;
- if (infoStream != null)
- message("commitMergeDeletes " + merge.segString(directory));
+ if (logger.isDebugEnabled()) {
+ logger.debug("commitMergeDeletes " + merge.segString(directory));
+ }
// Carefully merge deletes that occurred after we
// started merging:
@@ -4019,8 +4042,9 @@
if (deletes != null) {
merge.info.advanceDelGen();
- if (infoStream != null)
- message("commit merge deletes to " + merge.info.getDelFileName());
+ if (logger.isDebugEnabled()) {
+ logger.debug("commit merge deletes to " + merge.info.getDelFileName());
+ }
deletes.write(directory, merge.info.getDelFileName());
merge.info.setDelCount(delCount);
assert delCount == deletes.count();
@@ -4035,8 +4059,9 @@
if (hitOOM)
return false;
- if (infoStream != null)
- message("commitMerge: " + merge.segString(directory) + " index=" + segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("commitMerge: " + merge.segString(directory) + " index=" + segString());
+ }
assert merge.registerDone;
@@ -4047,8 +4072,9 @@
// file that current segments does not reference), we
// abort this merge
if (merge.isAborted()) {
- if (infoStream != null)
- message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+ if (logger.isDebugEnabled()) {
+ logger.debug("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+ }
deleter.refresh(merge.info.name);
return false;
@@ -4153,8 +4179,9 @@
try {
mergeInit(merge);
- if (infoStream != null)
- message("now merge\n merge=" + merge.segString(directory) + "\n merge=" + merge + "\n index=" + segString());
+ if (logger.isDebugEnabled()) {
+ logger.debug("now merge\n merge=" + merge.segString(directory) + "\n merge=" + merge + "\n index=" + segString());
+ }
mergeMiddle(merge);
success = true;
@@ -4168,8 +4195,9 @@
mergeFinish(merge);
if (!success) {
- if (infoStream != null)
- message("hit exception during merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception during merge");
+ }
if (merge.info != null && !segmentInfos.contains(merge.info))
deleter.refresh(merge.info.name);
}
@@ -4222,8 +4250,9 @@
pendingMerges.add(merge);
- if (infoStream != null)
- message("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]");
+ if (logger.isDebugEnabled()) {
+ logger.debug("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]");
+ }
merge.mergeGen = mergeGen;
merge.isExternal = isExternal;
@@ -4363,8 +4392,9 @@
// TODO: if we know we are about to merge away these
// newly flushed doc store files then we should not
// make compound file out of them...
- if (infoStream != null)
- message("now flush at merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug("now flush at merge");
+ }
doFlush(true, false);
//flush(false, true, false);
}
@@ -4480,8 +4510,9 @@
SegmentInfos sourceSegmentsClone = merge.segmentsClone;
final int numSegments = sourceSegments.size();
- if (infoStream != null)
- message("merging " + merge.segString(directory));
+ if (logger.isDebugEnabled()) {
+ logger.debug("merging " + merge.segString(directory));
+ }
merger = new SegmentMerger(this, mergedName, merge);
@@ -4498,8 +4529,8 @@
merger.add(reader);
totDocCount += reader.numDocs();
}
- if (infoStream != null) {
- message("merge: total "+totDocCount+" docs");
+ if (logger.isDebugEnabled()) {
+ logger.debug("merge: total "+totDocCount+" docs");
}
merge.checkAborted(directory);
@@ -4555,8 +4586,9 @@
handleMergeException(t, merge);
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception creating compound file during merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception creating compound file during merge");
+ }
synchronized(this) {
deleter.deleteFile(compoundFileName);
}
@@ -4564,8 +4596,9 @@
}
if (merge.isAborted()) {
- if (infoStream != null)
- message("abort merge after building CFS");
+ if (logger.isDebugEnabled()) {
+ logger.debug("abort merge after building CFS");
+ }
deleter.deleteFile(compoundFileName);
return 0;
}
@@ -4619,8 +4652,9 @@
success = true;
} finally {
if (!success) {
- if (infoStream != null)
- message("hit exception flushing deletes");
+ if (logger.isDebugEnabled()) {
+ logger.debug("hit exception flushing deletes");
+ }
// Carefully remove any partially written .del
// files
@@ -4790,8 +4824,9 @@
try {
- if (infoStream != null)
- message("startCommit(): start sizeInBytes=" + sizeInBytes);
+ if (logger.isDebugEnabled()) {
+ logger.debug("startCommit(): start sizeInBytes=" + sizeInBytes);
+ }
if (sizeInBytes > 0)
syncPause(sizeInBytes);
@@ -4821,8 +4856,9 @@
assert lastCommitChangeCount <= changeCount;
if (changeCount == lastCommitChangeCount) {
- if (infoStream != null)
- message(" skip startCommit(): no changes pending");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" skip startCommit(): no changes pending");
+ }
return;
}
@@ -4832,8 +4868,9 @@
// threads can be doing this at once, if say a large
// merge and a small merge finish at the same time:
- if (infoStream != null)
- message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount);
+ if (logger.isDebugEnabled()) {
+ logger.debug("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount);
+ }
toSync = (SegmentInfos) segmentInfos.clone();
@@ -4869,8 +4906,9 @@
// Because we incRef'd this commit point, above,
// the file had better exist:
assert directory.fileExists(fileName): "file '" + fileName + "' does not exist dir=" + directory;
- if (infoStream != null)
- message("now sync " + fileName);
+ if (logger.isDebugEnabled()) {
+ logger.debug("now sync " + fileName);
+ }
directory.sync(fileName);
success = true;
} finally {
@@ -4901,8 +4939,9 @@
// Wait now for any current pending commit to complete:
while(pendingCommit != null) {
- if (infoStream != null)
- message("wait for existing pendingCommit to finish...");
+ if (logger.isDebugEnabled()) {
+ logger.debug("wait for existing pendingCommit to finish...");
+ }
doWait();
}
@@ -4931,15 +4970,18 @@
pendingCommitChangeCount = myChangeCount;
success = true;
} finally {
- if (!success && infoStream != null)
- message("hit exception committing segments file");
+ if (!success && logger.isDebugEnabled()) {
+ logger.debug("hit exception committing segments file");
+ }
}
- } else if (infoStream != null)
- message("sync superseded by newer infos");
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("sync superseded by newer infos");
+ }
}
- if (infoStream != null)
- message("done all syncs");
+ if (logger.isDebugEnabled()) {
+ logger.debug("done all syncs");
+ }
assert testPoint("midStartCommitSuccess");
@@ -5042,6 +5084,7 @@
public static final MaxFieldLength LIMITED
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
}
+
// Used only by assert for testing. Current points:
// startDoFlush
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java (revision 724107)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java (working copy)
@@ -20,7 +20,8 @@
import java.io.IOException;
import java.util.Set;
-import org.apache.lucene.store.Directory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** <p>This class implements a {@link MergePolicy} that tries
* to merge segments into levels of exponentially
@@ -39,6 +40,8 @@
public abstract class LogMergePolicy extends MergePolicy {
+ private static final Logger logger = LoggerFactory.getLogger(LogMergePolicy.class);
+
/** Defines the allowed range of log(size) for each
* level. A level is computed by taking the max segment
* log size, minuse LEVEL_LOG_SPAN, and finding all
@@ -61,17 +64,7 @@
private boolean useCompoundFile = true;
private boolean useCompoundDocStore = true;
- private IndexWriter writer;
- protected boolean verbose() {
- return writer != null && writer.verbose();
- }
-
- private void message(String message) {
- if (verbose())
- writer.message("LMP: " + message);
- }
-
/** <p>Returns the number of segments that are merged at
* once and also controls the total number of segments
* allowed to accumulate in the index.</p> */
@@ -256,29 +249,29 @@
*/
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos,
IndexWriter writer)
- throws CorruptIndexException, IOException
- {
- this.writer = writer;
-
+ throws CorruptIndexException, IOException {
final int numSegments = segmentInfos.size();
- if (verbose())
- message("findMergesToExpungeDeletes: " + numSegments + " segments");
+ if (logger.isDebugEnabled()) {
+ logger.debug("findMergesToExpungeDeletes: " + numSegments + " segments");
+ }
MergeSpecification spec = new MergeSpecification();
int firstSegmentWithDeletions = -1;
for(int i=0;i<numSegments;i++) {
final SegmentInfo info = segmentInfos.info(i);
if (info.hasDeletions()) {
- if (verbose())
- message(" segment " + info.name + " has deletions");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" segment " + info.name + " has deletions");
+ }
if (firstSegmentWithDeletions == -1)
firstSegmentWithDeletions = i;
else if (i - firstSegmentWithDeletions == mergeFactor) {
// We've seen mergeFactor segments in a row with
// deletions, so force a merge now:
- if (verbose())
- message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
+ }
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
firstSegmentWithDeletions = i;
}
@@ -286,16 +279,18 @@
// End of a sequence of segments with deletions, so,
// merge those past segments even if it's fewer than
// mergeFactor segments
- if (verbose())
- message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
+ }
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
firstSegmentWithDeletions = -1;
}
}
if (firstSegmentWithDeletions != -1) {
- if (verbose())
- message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
+ }
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
}
@@ -312,17 +307,15 @@
public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
final int numSegments = infos.size();
- this.writer = writer;
- if (verbose())
- message("findMerges: " + numSegments + " segments");
+ if (logger.isDebugEnabled()) {
+ logger.debug("findMerges: " + numSegments + " segments");
+ }
// Compute levels, which is just log (base mergeFactor)
// of the size of each segment
float[] levels = new float[numSegments];
final float norm = (float) Math.log(mergeFactor);
- final Directory directory = writer.getDirectory();
-
for(int i=0;i<numSegments;i++) {
final SegmentInfo info = infos.info(i);
long size = size(info);
@@ -381,8 +374,9 @@
}
upto--;
}
- if (verbose())
- message(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments");
+ }
// Finally, record all merges that are viable at this level:
int end = start + mergeFactor;
@@ -396,12 +390,14 @@
if (!anyTooLarge) {
if (spec == null)
spec = new MergeSpecification();
- if (verbose())
- message(" " + start + " to " + end + ": add this merge");
+ if (logger.isDebugEnabled()) {
+ logger.debug(" " + start + " to " + end + ": add this merge");
+ }
spec.add(new OneMerge(infos.range(start, end), useCompoundFile));
- } else if (verbose())
- message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
-
+ } else if (logger.isDebugEnabled()) {
+ logger.debug(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
+ }
+
start = end;
end = start + mergeFactor;
}
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java (revision 724107)
+++ src/java/org/apache/lucene/index/SegmentInfos.java (working copy)
@@ -24,6 +24,8 @@
import org.apache.lucene.store.ChecksumIndexOutput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.NoSuchDirectoryException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
@@ -33,6 +35,8 @@
final class SegmentInfos extends Vector {
+ private static final Logger logger = LoggerFactory.getLogger(SegmentInfos.class);
+
/** The file format version, a negative number. */
/* Works since counter, the old 1st entry, is always >= 0 */
public static final int FORMAT = -1;
@@ -91,12 +95,6 @@
private String userData; // Opaque String that user can specify during IndexWriter.commit
- /**
- * If non-null, information about loading segments_N files
- * will be printed here. @see #setInfoStream.
- */
- private static PrintStream infoStream;
-
public final SegmentInfo info(int i) {
return (SegmentInfo) get(i);
}
@@ -429,9 +427,10 @@
/** If non-null, information about retries when loading
* the segments file will be printed to this.
+ * @deprecated this method does nothing. We use SLF4J for logging messages.
*/
public static void setInfoStream(PrintStream infoStream) {
- SegmentInfos.infoStream = infoStream;
+ // Do nothing
}
/* Advanced configuration of retry logic in loading
@@ -491,15 +490,14 @@
/**
* @see #setInfoStream
+ * @deprecated this method always returns null. We use SLF4J for logging messages.
*/
public static PrintStream getInfoStream() {
- return infoStream;
+ return null;
}
private static void message(String message) {
- if (infoStream != null) {
- infoStream.println("SIS [" + Thread.currentThread().getName() + "]: " + message);
- }
+ logger.debug("SIS [" + Thread.currentThread().getName() + "]: " + message);
}
/**
@@ -570,7 +568,9 @@
if (files != null)
genA = getCurrentSegmentGeneration(files);
- message("directory listing genA=" + genA);
+ if (logger.isDebugEnabled()) {
+ message("directory listing genA=" + genA);
+ }
// Method 2: open segments.gen and read its
// contents. Then we take the larger of the two
@@ -584,10 +584,14 @@
try {
genInput = directory.openInput(IndexFileNames.SEGMENTS_GEN);
} catch (FileNotFoundException e) {
- message("segments.gen open: FileNotFoundException " + e);
+ if (logger.isDebugEnabled()) {
+ message("segments.gen open: FileNotFoundException " + e);
+ }
break;
} catch (IOException e) {
- message("segments.gen open: IOException " + e);
+ if (logger.isDebugEnabled()) {
+ message("segments.gen open: IOException " + e);
+ }
}
if (genInput != null) {
@@ -596,7 +600,9 @@
if (version == FORMAT_LOCKLESS) {
long gen0 = genInput.readLong();
long gen1 = genInput.readLong();
- message("fallback check: " + gen0 + "; " + gen1);
+ if (logger.isDebugEnabled()) {
+ message("fallback check: " + gen0 + "; " + gen1);
+ }
if (gen0 == gen1) {
// The file is consistent.
genB = gen0;
@@ -617,7 +623,9 @@
}
}
- message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
+ if (logger.isDebugEnabled()) {
+ message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
+ }
// Pick the larger of the two gen's:
if (genA > genB)
@@ -649,7 +657,9 @@
if (genLookaheadCount < defaultGenLookaheadCount) {
gen++;
genLookaheadCount++;
- message("look ahead increment gen to " + gen);
+ if (logger.isDebugEnabled()) {
+ message("look ahead increment gen to " + gen);
+ }
}
}
@@ -684,7 +694,7 @@
try {
Object v = doBody(segmentFileName);
- if (exc != null) {
+ if (exc != null && logger.isDebugEnabled()) {
message("success on " + segmentFileName);
}
return v;
@@ -695,7 +705,9 @@
exc = err;
}
- message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
+ if (logger.isDebugEnabled()) {
+ message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
+ }
if (!retry && gen > 1) {
@@ -715,15 +727,19 @@
prevExists = new File(fileDirectory, prevSegmentFileName).exists();
if (prevExists) {
- message("fallback to prior segment file '" + prevSegmentFileName + "'");
+ if (logger.isDebugEnabled()) {
+ message("fallback to prior segment file '" + prevSegmentFileName + "'");
+ }
try {
Object v = doBody(prevSegmentFileName);
- if (exc != null) {
+ if (exc != null && logger.isDebugEnabled()) {
message("success on fallback " + prevSegmentFileName);
}
return v;
} catch (IOException err2) {
- message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
+ if (logger.isDebugEnabled()) {
+ message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
+ }
}
}
}
Index: src/test/org/apache/lucene/index/TestIndexModifier.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexModifier.java (revision 724107)
+++ src/test/org/apache/lucene/index/TestIndexModifier.java (working copy)
@@ -17,24 +17,22 @@
* limitations under the License.
*/
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.File;
+import java.io.IOException;
+import java.util.EmptyStackException;
+import java.util.Random;
+import java.util.Stack;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
-import java.io.File;
-import java.io.IOException;
-import java.util.EmptyStackException;
-import java.util.Random;
-import java.util.Stack;
-
/**
* Tests for the "IndexModifier" class, including accesses from two threads at the
* same time.
@@ -72,7 +70,6 @@
assertEquals(0, i.docCount());
// Lucene defaults:
- assertNull(i.getInfoStream());
assertTrue(i.getUseCompoundFile());
assertEquals(IndexWriter.DISABLE_AUTO_FLUSH, i.getMaxBufferedDocs());
assertEquals(10000, i.getMaxFieldLength());
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 724107)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -17,10 +17,8 @@
* limitations under the License.
*/
-import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
-import java.io.PrintStream;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
@@ -39,6 +37,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
@@ -4212,17 +4211,17 @@
final List thrown = new ArrayList();
- final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) {
- public void message(final String message) {
- if (message.startsWith("now flush at close") && 0 == thrown.size()) {
+ final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), MaxFieldLength.UNLIMITED) {
+ boolean testPoint(String name) {
+ if (name.startsWith("now flush at close") && 0 == thrown.size()) {
thrown.add(null);
- throw new OutOfMemoryError("fake OOME at " + message);
+ throw new OutOfMemoryError("fake OOME at " + name);
}
+
+ return super.testPoint(name);
}
};
- // need to set an info stream so message is called
- writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
try {
writer.close();
fail("OutOfMemoryError expected");
Index: src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 724107)
+++ src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy)
@@ -137,9 +137,6 @@
//writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(0.1);
- if (DEBUG)
- writer.setInfoStream(System.out);
-
IndexerThread thread = new IndexerThread(0, writer);
thread.run();
if (thread.failure != null) {
@@ -175,9 +172,6 @@
//writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(0.2);
- if (DEBUG)
- writer.setInfoStream(System.out);
-
final int NUM_THREADS = 4;
final IndexerThread[] threads = new IndexerThread[NUM_THREADS];