blob: 0acf48155aa6ffb20d418278b8ccfdbcb432d573 [file] [log] [blame]
Index: src/test/org/apache/lucene/store/MockRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMDirectory.java (revision 600561)
+++ src/test/org/apache/lucene/store/MockRAMDirectory.java (working copy)
@@ -146,11 +146,17 @@
RAMFile file = new RAMFile(this);
synchronized (this) {
RAMFile existing = (RAMFile)fileMap.get(name);
- if (existing!=null) {
- sizeInBytes -= existing.sizeInBytes;
- existing.directory = null;
+ // Enforce write once:
+ if (existing!=null && !name.equals("segments.gen"))
+ throw new IOException("file " + name + " already exists");
+ else {
+ if (existing!=null) {
+ sizeInBytes -= existing.sizeInBytes;
+ existing.directory = null;
+ }
+
+ fileMap.put(name, file);
}
- fileMap.put(name, file);
}
return new MockRAMOutputStream(this, file);
Index: src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
===================================================================
--- src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 600561)
+++ src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy)
@@ -193,6 +193,7 @@
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.setMergeScheduler(cms);
writer.setMaxBufferedDocs(2);
+ writer.setMergeFactor(100);
for(int j=0;j<201;j++) {
idField.setValue(Integer.toString(iter*201+j));
@@ -205,10 +206,16 @@
delID += 5;
}
+ // Force a bunch of merge threads to kick off so we
+ // stress out aborting them on close:
+ writer.setMergeFactor(3);
+ writer.addDocument(doc);
+ writer.flush();
+
writer.close(false);
IndexReader reader = IndexReader.open(directory);
- assertEquals((1+iter)*181, reader.numDocs());
+ assertEquals((1+iter)*182, reader.numDocs());
reader.close();
// Reopen
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 600561)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -2602,12 +2602,9 @@
// file that current segments does not reference), we
// abort this merge
if (merge.isAborted()) {
+ if (infoStream != null)
+ message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
- if (infoStream != null) {
- if (merge.isAborted())
- message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
- }
-
assert merge.increfDone;
decrefMergeSegments(merge);
deleter.refresh(merge.info.name);
@@ -2866,9 +2863,8 @@
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
- // Bind a new segment name here so even with
- // ConcurrentMergePolicy we keep deterministic segment
- // names.
+ if (merge.isAborted())
+ throw new IOException("merge is aborted");
assert merge.registerDone;
@@ -2982,6 +2978,10 @@
merge.increfDone = true;
merge.mergeDocStores = mergeDocStores;
+
+ // Bind a new segment name here so even with
+ // ConcurrentMergePolicy we keep deterministic segment
+ // names.
merge.info = new SegmentInfo(newSegmentName(), 0,
directory, false, true,
docStoreOffset,
@@ -3033,6 +3033,7 @@
try {
int totDocCount = 0;
+
for (int i = 0; i < numSegments; i++) {
SegmentInfo si = sourceSegmentsClone.info(i);
IndexReader reader = SegmentReader.get(si, MERGE_READ_BUFFER_SIZE, merge.mergeDocStores); // no need to set deleter (yet)
@@ -3043,6 +3044,9 @@
message("merge: total "+totDocCount+" docs");
}
+ if (merge.isAborted())
+ throw new IOException("merge is aborted");
+
mergedDocCount = merge.info.docCount = merger.merge(merge.mergeDocStores);
assert mergedDocCount == totDocCount;