blob: 30f0bb56e58ed55497dbe6bbe78362fb216abc64 [file] [log] [blame]
Index: src/java/org/apache/lucene/index/TermsHashConsumerPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashConsumerPerField.java (revision 929752)
+++ src/java/org/apache/lucene/index/TermsHashConsumerPerField.java (working copy)
@@ -34,8 +34,6 @@
abstract void newTerm(int termID) throws IOException;
abstract void addTerm(int termID) throws IOException;
abstract int getStreamCount();
-
- abstract ParallelPostingsArray createPostingsArray(int size);
- abstract int bytesPerPosting();
+ abstract ParallelPostingsArray createPostingsArray(int size);
}
Index: src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerField.java (revision 929752)
+++ src/java/org/apache/lucene/index/TermsHashPerField.java (working copy)
@@ -57,7 +57,7 @@
private final BytesRef utf8;
private Comparator<BytesRef> termComp;
- private final int bytesPerPosting;
+ private int bytesPerPosting;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
@@ -78,17 +78,17 @@
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
-
- // +3: Posting is referenced by hash, which
- // targets 25-50% fill factor; approximate this
- // as 3X # pointers
- bytesPerPosting = consumer.bytesPerPosting() + 3*DocumentsWriter.INT_NUM_BYTE;
}
void initPostingsArray() {
assert postingsArray == null;
postingsArray = consumer.createPostingsArray(postingsHashSize);
+
+ // +3: Posting is referenced by hash, which
+ // targets 25-50% fill factor; approximate this
+ // as 3X # pointers
+ bytesPerPosting = postingsArray.bytesPerPosting() + 3*DocumentsWriter.INT_NUM_BYTE;
if (perThread.termsHash.trackAllocations) {
perThread.termsHash.docWriter.bytesAllocated(bytesPerPosting * postingsHashSize);
@@ -135,11 +135,11 @@
nextPerField.abort();
}
- private void growParallelPostingsArray() {
- int oldSize = postingsArray.byteStarts.length;
- int newSize = (int) (oldSize * 1.5);
- this.postingsArray = this.postingsArray.resize(newSize);
-
+ private final void growParallelPostingsArray() {
+ int oldSize = postingsArray.size;
+ this.postingsArray = this.postingsArray.grow();
+ int newSize = postingsArray.size;
+
if (perThread.termsHash.trackAllocations) {
perThread.termsHash.docWriter.bytesAllocated(bytesPerPosting * (newSize - oldSize));
}
@@ -349,7 +349,7 @@
// New posting
termID = numPostings++;
- if (termID >= postingsArray.textStarts.length) {
+ if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
if (perThread.termsHash.trackAllocations) {
@@ -455,7 +455,7 @@
// New posting
termID = numPostings++;
- if (termID >= postingsArray.textStarts.length) {
+ if (termID >= postingsArray.size) {
growParallelPostingsArray();
}
if (perThread.termsHash.trackAllocations) {
Index: src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (revision 929752)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (working copy)
@@ -185,29 +185,30 @@
int lastDocIDs[]; // Last docID where this term occurred
int lastDocCodes[]; // Code for prior doc
int lastPositions[]; // Last position where this term occurred
-
+
@Override
- ParallelPostingsArray resize(int newSize) {
- FreqProxPostingsArray newArray = new FreqProxPostingsArray(newSize);
- copy(this, newArray);
- return newArray;
+ ParallelPostingsArray newInstance(int size) {
+ return new FreqProxPostingsArray(size);
}
-
- void copy(FreqProxPostingsArray fromArray, FreqProxPostingsArray toArray) {
- super.copy(fromArray, toArray);
- System.arraycopy(fromArray.docFreqs, 0, toArray.docFreqs, 0, fromArray.docFreqs.length);
- System.arraycopy(fromArray.lastDocIDs, 0, toArray.lastDocIDs, 0, fromArray.lastDocIDs.length);
- System.arraycopy(fromArray.lastDocCodes, 0, toArray.lastDocCodes, 0, fromArray.lastDocCodes.length);
- System.arraycopy(fromArray.lastPositions, 0, toArray.lastPositions, 0, fromArray.lastPositions.length);
+
+ void copyTo(ParallelPostingsArray toArray, int numToCopy) {
+ assert toArray instanceof FreqProxPostingsArray;
+ FreqProxPostingsArray to = (FreqProxPostingsArray) toArray;
+
+ super.copyTo(toArray, numToCopy);
+
+ System.arraycopy(docFreqs, 0, to.docFreqs, 0, numToCopy);
+ System.arraycopy(lastDocIDs, 0, to.lastDocIDs, 0, numToCopy);
+ System.arraycopy(lastDocCodes, 0, to.lastDocCodes, 0, numToCopy);
+ System.arraycopy(lastPositions, 0, to.lastPositions, 0, numToCopy);
}
-
+
+ @Override
+ int bytesPerPosting() {
+ return ParallelPostingsArray.BYTES_PER_POSTING + 4 * DocumentsWriter.INT_NUM_BYTE;
+ }
}
- @Override
- int bytesPerPosting() {
- return ParallelPostingsArray.BYTES_PER_POSTING + 4 * DocumentsWriter.INT_NUM_BYTE;
- }
-
public void abort() {}
}
Index: src/java/org/apache/lucene/index/ParallelPostingsArray.java
===================================================================
--- src/java/org/apache/lucene/index/ParallelPostingsArray.java (revision 929752)
+++ src/java/org/apache/lucene/index/ParallelPostingsArray.java (working copy)
@@ -1,5 +1,7 @@
package org.apache.lucene.index;
+import org.apache.lucene.util.ArrayUtil;
+
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -21,25 +23,42 @@
class ParallelPostingsArray {
final static int BYTES_PER_POSTING = 3 * DocumentsWriter.INT_NUM_BYTE;
+ final int size;
final int[] textStarts;
final int[] intStarts;
final int[] byteStarts;
-
- public ParallelPostingsArray(final int size) {
+
+ ParallelPostingsArray(final int size) {
+ this.size = size;
textStarts = new int[size];
intStarts = new int[size];
byteStarts = new int[size];
}
-
- ParallelPostingsArray resize(int newSize) {
- ParallelPostingsArray newArray = new ParallelPostingsArray(newSize);
- copy(this, newArray);
+
+ int bytesPerPosting() {
+ return BYTES_PER_POSTING;
+ }
+
+ ParallelPostingsArray newInstance(int size) {
+ return new ParallelPostingsArray(size);
+ }
+
+ final ParallelPostingsArray grow() {
+ int newSize = ArrayUtil.oversize(size + 1, bytesPerPosting());
+ ParallelPostingsArray newArray = newInstance(newSize);
+ copyTo(newArray, size);
return newArray;
}
-
- void copy(ParallelPostingsArray fromArray, ParallelPostingsArray toArray) {
- System.arraycopy(fromArray.textStarts, 0, toArray.textStarts, 0, fromArray.textStarts.length);
- System.arraycopy(fromArray.intStarts, 0, toArray.intStarts, 0, fromArray.intStarts.length);
- System.arraycopy(fromArray.byteStarts, 0, toArray.byteStarts, 0, fromArray.byteStarts.length);
+
+ final ParallelPostingsArray shrink(int targetSize) {
+ ParallelPostingsArray newArray = newInstance(targetSize);
+ copyTo(newArray, targetSize);
+ return newArray;
}
+
+ void copyTo(ParallelPostingsArray toArray, int numToCopy) {
+ System.arraycopy(textStarts, 0, toArray.textStarts, 0, numToCopy);
+ System.arraycopy(intStarts, 0, toArray.intStarts, 0, numToCopy);
+ System.arraycopy(byteStarts, 0, toArray.byteStarts, 0, numToCopy);
+ }
}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 929752)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy)
@@ -1430,7 +1430,7 @@
0 == byteBlockAllocator.freeByteBlocks.size() &&
0 == freeIntBlocks.size() && !any) {
// Nothing else to free -- must flush now.
- bufferIsFull = numBytesUsed+deletesRAMUsed > flushTrigger;
+ bufferIsFull = numBytesAlloc+deletesRAMUsed > flushTrigger;
if (infoStream != null) {
if (numBytesUsed > flushTrigger)
message(" nothing to free; now set bufferIsFull");
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (revision 929752)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (working copy)
@@ -270,24 +270,27 @@
int[] freqs; // How many times this term occurred in the current doc
int[] lastOffsets; // Last offset we saw
int[] lastPositions; // Last position where this term occurred
-
+
@Override
- ParallelPostingsArray resize(int newSize) {
- TermVectorsPostingsArray newArray = new TermVectorsPostingsArray(newSize);
- copy(this, newArray);
- return newArray;
+ ParallelPostingsArray newInstance(int size) {
+ return new TermVectorsPostingsArray(size);
}
-
- void copy(TermVectorsPostingsArray fromArray, TermVectorsPostingsArray toArray) {
- super.copy(fromArray, toArray);
- System.arraycopy(fromArray.freqs, 0, toArray.freqs, 0, fromArray.freqs.length);
- System.arraycopy(fromArray.lastOffsets, 0, toArray.lastOffsets, 0, fromArray.lastOffsets.length);
- System.arraycopy(fromArray.lastPositions, 0, toArray.lastPositions, 0, fromArray.lastPositions.length);
+
+ @Override
+ void copyTo(ParallelPostingsArray toArray, int numToCopy) {
+ assert toArray instanceof TermVectorsPostingsArray;
+ TermVectorsPostingsArray to = (TermVectorsPostingsArray) toArray;
+
+ super.copyTo(toArray, numToCopy);
+
+ System.arraycopy(freqs, 0, to.freqs, 0, size);
+ System.arraycopy(lastOffsets, 0, to.lastOffsets, 0, size);
+ System.arraycopy(lastPositions, 0, to.lastPositions, 0, size);
}
+
+ @Override
+ int bytesPerPosting() {
+ return super.bytesPerPosting() + 3 * DocumentsWriter.INT_NUM_BYTE;
+ }
}
-
- @Override
- int bytesPerPosting() {
- return ParallelPostingsArray.BYTES_PER_POSTING + 3 * DocumentsWriter.INT_NUM_BYTE;
- }
}