blob: dbd8591d9e4bb17f91ab197b3fdacec37ad24186 [file] [log] [blame]
Index: lucene/src/java/org/apache/lucene/search/cache/parser/DoubleParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/DoubleParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/DoubleParser.java (revision )
@@ -0,0 +1,79 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+/** Interface to parse doubles from document fields.
+ * @see AtomicFieldCache#getDoubles(String, DoubleParser)
+ */
+public interface DoubleParser extends Parser {
+
+ DoubleParser DEFAULT_DOUBLE_PARSER = new DefaultDoubleParser();
+ DoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new NumericDoubleParser();
+
+ /** Return an long representation of this field's value. */
+ public double parseDouble(BytesRef term);
+
+ /** The default parser for double values, which are encoded by {@link Double#toString(double)} */
+ public static class DefaultDoubleParser implements DoubleParser {
+
+ public double parseDouble(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Double.parseDouble(term.utf8ToString());
+ }
+
+ protected Object readResolve() {
+ return DEFAULT_DOUBLE_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".DEFAULT_DOUBLE_PARSER";
+ }
+ }
+
+ /**
+ * A parser instance for double values encoded with {@link org.apache.lucene.util.NumericUtils}, e.g. when indexed
+ * via {@link org.apache.lucene.document.NumericField}/{@link org.apache.lucene.analysis.NumericTokenStream}.
+ */
+ public static class NumericDoubleParser implements DoubleParser {
+
+ public double parseDouble(BytesRef term) {
+ if (NumericUtils.getPrefixCodedLongShift(term) > 0)
+ throw new AtomicFieldCache.StopFillCacheException();
+ return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term));
+ }
+
+ protected Object readResolve() {
+ return NUMERIC_UTILS_DOUBLE_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER";
+ }
+
+ }
+
+}
Index: solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
===================================================================
--- solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision )
@@ -192,7 +192,8 @@
}
- /** Register sub-objects such as caches
+ /**
+ * Register sub-objects such as caches
*/
public void register() {
// register self
@@ -202,6 +203,7 @@
cache.setState(SolrCache.State.LIVE);
core.getInfoRegistry().put(cache.name(), cache);
}
+ core.getInfoRegistry().put("fieldCache", new SolrFieldCacheMBean(this));
registerTime=System.currentTimeMillis();
}
Index: lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (revision )
@@ -24,8 +24,8 @@
import java.util.Set;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.CacheEntry;
+import org.apache.lucene.search.cache.CacheEntry;
+import org.apache.lucene.search.cache.AtomicFieldCache;
/**
* Provides methods for sanity checking that entries in the FieldCache
@@ -45,7 +45,7 @@
* usages of the FieldCache.
* </p>
* @lucene.experimental
- * @see FieldCache
+ * @see AtomicFieldCache
* @see FieldCacheSanityChecker.Insanity
* @see FieldCacheSanityChecker.InsanityType
*/
@@ -68,7 +68,7 @@
* Quick and dirty convenience method
* @see #check
*/
- public static Insanity[] checkSanity(FieldCache cache) {
+ public static Insanity[] checkSanity(AtomicFieldCache cache) {
return checkSanity(cache.getCacheEntries());
}
@@ -119,7 +119,7 @@
final CacheEntry item = cacheEntries[i];
final Object val = item.getValue();
- if (val instanceof FieldCache.CreationPlaceholder)
+ if (val instanceof AtomicFieldCache.CreationPlaceholder)
continue;
final ReaderField rf = new ReaderField(item.getReaderKey(),
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupHeadsCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupHeadsCollector.java (revision 1175430)
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupHeadsCollector.java (revision )
@@ -18,7 +18,11 @@
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
@@ -27,7 +31,7 @@
/**
* A base implementation of {@link AbstractAllGroupHeadsCollector} for retrieving the most relevant groups when grouping
* on a string based group field. More specifically this all concrete implementations of this base implementation
- * use {@link org.apache.lucene.search.FieldCache.DocTermsIndex}.
+ * use {@link DocTermsIndex}.
*
* @lucene.experimental
*/
@@ -38,7 +42,7 @@
final String groupField;
final BytesRef scratchBytesRef = new BytesRef();
- FieldCache.DocTermsIndex groupIndex;
+ DocTermsIndex groupIndex;
IndexReader.AtomicReaderContext readerContext;
protected TermAllGroupHeadsCollector(String groupField, int numberOfSorts) {
@@ -142,7 +146,7 @@
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
this.readerContext = context;
- groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
+ groupIndex = context.reader.getFieldCache().getTermsIndex(groupField);
for (GroupHead groupHead : groups.values()) {
for (int i = 0; i < groupHead.comparators.length; i++) {
@@ -198,7 +202,7 @@
private final List<GroupHead> collectedGroups;
private final SortField[] fields;
- private FieldCache.DocTermsIndex[] sortsIndex;
+ private DocTermsIndex[] sortsIndex;
private Scorer scorer;
private GroupHead[] segmentGroupHeads;
@@ -209,7 +213,7 @@
final SortField[] sortFields = sortWithinGroup.getSort();
fields = new SortField[sortFields.length];
- sortsIndex = new FieldCache.DocTermsIndex[sortFields.length];
+ sortsIndex = new DocTermsIndex[sortFields.length];
for (int i = 0; i < sortFields.length; i++) {
reversed[i] = sortFields[i].getReverse() ? -1 : 1;
fields[i] = sortFields[i];
@@ -243,13 +247,13 @@
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
this.readerContext = context;
- groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
+ groupIndex = context.reader.getFieldCache().getTermsIndex(groupField);
for (int i = 0; i < fields.length; i++) {
if (fields[i].getType() == SortField.Type.SCORE) {
continue;
}
- sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader, fields[i].getField());
+ sortsIndex[i] = context.reader.getFieldCache().getTermsIndex(fields[i].getField());
}
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
@@ -337,7 +341,7 @@
private final List<GroupHead> collectedGroups;
private final SortField[] fields;
- private FieldCache.DocTermsIndex[] sortsIndex;
+ private DocTermsIndex[] sortsIndex;
private GroupHead[] segmentGroupHeads;
OrdAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) {
@@ -347,7 +351,7 @@
final SortField[] sortFields = sortWithinGroup.getSort();
fields = new SortField[sortFields.length];
- sortsIndex = new FieldCache.DocTermsIndex[sortFields.length];
+ sortsIndex = new DocTermsIndex[sortFields.length];
for (int i = 0; i < sortFields.length; i++) {
reversed[i] = sortFields[i].getReverse() ? -1 : 1;
fields[i] = sortFields[i];
@@ -380,9 +384,9 @@
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
this.readerContext = context;
- groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
+ groupIndex = context.reader.getFieldCache().getTermsIndex(groupField);
for (int i = 0; i < fields.length; i++) {
- sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader, fields[i].getField());
+ sortsIndex[i] = context.reader.getFieldCache().getTermsIndex(fields[i].getField());
}
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
@@ -488,7 +492,7 @@
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
this.readerContext = context;
- groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
+ groupIndex = context.reader.getFieldCache().getTermsIndex(groupField);
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
ordSet.clear();
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java (revision )
@@ -19,10 +19,11 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.queries.function.DocValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueInt;
@@ -31,7 +32,7 @@
import java.util.Map;
/**
- * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getStringIndex().
+ * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.cache.AtomicFieldCache} using getStringIndex().
* <br>
* The native lucene index order is used to assign an ordinal value for each field value.
* <br>Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1.
@@ -65,7 +66,7 @@
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final int off = readerContext.docBase;
final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader;
- final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(topReader, field);
+ final DocTermsIndex sindex = new SlowMultiReaderWrapper(topReader).getFieldCache().getTermsIndex(field);
return new IntDocValues(this) {
protected String toTerm(String readableValue) {
return readableValue;
Index: solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
===================================================================
--- solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java (revision )
@@ -21,8 +21,8 @@
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.BytesRef;
@@ -227,7 +227,7 @@
this.context = context;
}
- FieldCache.DocTermsIndex si;
+ DocTermsIndex si;
int startTermIndex;
int endTermIndex;
int[] counts;
@@ -238,7 +238,7 @@
BytesRef tempBR = new BytesRef();
void countTerms() throws IOException {
- si = FieldCache.DEFAULT.getTermsIndex(context.reader, fieldName);
+ si = context.reader.getFieldCache().getTermsIndex(fieldName);
// SolrCore.log.info("reader= " + reader + " FC=" + System.identityHashCode(si));
if (prefix!=null) {
Index: lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (revision )
@@ -17,16 +17,18 @@
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
+import org.apache.lucene.index.codecs.PerDocValues;
+import org.apache.lucene.search.cache.*;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ReaderUtil; // javadoc
+import org.apache.lucene.util.FieldCacheSanityChecker;
+import org.apache.lucene.util.MapBackedSet;
+import org.apache.lucene.util.ReaderUtil;
-import org.apache.lucene.index.DirectoryReader; // javadoc
-import org.apache.lucene.index.MultiReader; // javadoc
-import org.apache.lucene.index.codecs.PerDocValues;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
/**
* This class forces a composite reader (eg a {@link
@@ -37,29 +39,43 @@
* APIs on-the-fly, using the static methods in {@link
* MultiFields}, by stepping through the sub-readers to
* merge fields/terms, appending docs, etc.
- *
+ * <p/>
* <p>If you ever hit an UnsupportedOperationException saying
* "please use MultiFields.XXX instead", the simple
* but non-performant workaround is to wrap your reader
* using this class.</p>
- *
+ * <p/>
* <p><b>NOTE</b>: this class almost always results in a
* performance hit. If this is important to your use case,
* it's better to get the sequential sub readers (see {@link
* ReaderUtil#gatherSubReaders}, instead, and iterate through them
* yourself.</p>
+ *
+ * @lucene.insane
*/
-
public final class SlowMultiReaderWrapper extends FilterIndexReader {
+ private final static InsaneFieldCache insaneFieldCache = new InsaneFieldCache();
+
private final ReaderContext readerContext;
- private final Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
+ private final Map<String, byte[]> normsCache = new HashMap<String, byte[]>();
-
+
+ // Same instance for every SlowMultiReaderWrapper instance works well with MapBackedSet
+ private final static InsaneReaderFinishedListener insaneReaderFinishedListener = new InsaneReaderFinishedListener();
+
public SlowMultiReaderWrapper(IndexReader other) {
super(other);
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
+ if (in == null) {
+ return;
- }
+ }
+ if (in.readerFinishedListeners == null) {
+ in.readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener, Boolean>());
+ }
+ in.addReaderFinishedListener(insaneReaderFinishedListener);
+ }
+
@Override
public String toString() {
return "SlowMultiReaderWrapper(" + in + ")";
@@ -82,7 +98,7 @@
ensureOpen();
return MultiFields.getLiveDocs(in);
}
-
+
@Override
public IndexReader[] getSequentialSubReaders() {
return null;
@@ -98,24 +114,564 @@
return null;
if (normsCache.containsKey(field)) // cached omitNorms, not missing key
return null;
-
+
bytes = MultiNorms.norms(in, field);
normsCache.put(field, bytes);
return bytes;
}
-
+
@Override
public ReaderContext getTopReaderContext() {
ensureOpen();
return readerContext;
}
-
+
@Override
- protected void doSetNorm(int n, String field, byte value)
- throws CorruptIndexException, IOException {
+ protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException {
synchronized(normsCache) {
normsCache.remove(field);
}
in.doSetNorm(n, field, value);
}
+
+ @Override
+ public AtomicFieldCache getFieldCache() {
+ return new InsaneNonAtomicFieldCacheWrapper(in);
-}
+ }
+
+ public static InsaneNonAtomicFieldCache getNonAtomicFieldCache() {
+ return (InsaneNonAtomicFieldCache) new SlowMultiReaderWrapper(null).getFieldCache();
+ }
+
+ /**
+ * Concrete subclasses maintain field values cache for more than one reader.
+ *
+ * @lucene.insane
+ */
+ public interface InsaneNonAtomicFieldCache extends AtomicFieldCache {
+
+ /**
+ * Purges all cache entries for all IndexReader keys.
+ */
+ public void purgeAllCaches();
+
+
+ }
+
+ private static class InsaneReaderFinishedListener implements ReaderFinishedListener {
+
+ public void finished(IndexReader reader) {
+ insaneFieldCache.purge(reader);
+ }
+
+ }
+
+ private static class InsaneNonAtomicFieldCacheWrapper implements InsaneNonAtomicFieldCache {
+
+ private final IndexReader in;
+
+ public InsaneNonAtomicFieldCacheWrapper(IndexReader in) {
+ this.in = in;
+ }
+
+ public byte[] getBytes(String field) throws IOException {
+ return insaneFieldCache.getBytes(in, field);
+ }
+
+ public byte[] getBytes(String field, ByteParser parser) throws IOException {
+ return insaneFieldCache.getBytes(in, field, parser);
+ }
+
+ public CachedArray.ByteValues getBytes(String field, EntryCreator<CachedArray.ByteValues> creator) throws IOException {
+ return insaneFieldCache.getBytes(in, field, creator);
+ }
+
+ public short[] getShorts(String field) throws IOException {
+ return insaneFieldCache.getShorts(in, field);
+ }
+
+ public short[] getShorts(String field, ShortParser parser) throws IOException {
+ return insaneFieldCache.getShorts(in, field);
+ }
+
+ public CachedArray.ShortValues getShorts(String field, EntryCreator<CachedArray.ShortValues> creator) throws IOException {
+ return insaneFieldCache.getShorts(in, field, creator);
+ }
+
+ public int[] getInts(String field) throws IOException {
+ return insaneFieldCache.getInts(in, field);
+ }
+
+ public int[] getInts(String field, IntParser parser) throws IOException {
+ return insaneFieldCache.getInts(in, field, parser);
+ }
+
+ public CachedArray.IntValues getInts(String field, EntryCreator<CachedArray.IntValues> creator) throws IOException {
+ return insaneFieldCache.getInts(in, field, creator);
+ }
+
+ public float[] getFloats(String field) throws IOException {
+ return insaneFieldCache.getFloats(in, field);
+ }
+
+ public float[] getFloats(String field, FloatParser parser) throws IOException {
+ return insaneFieldCache.getFloats(in, field, parser);
+ }
+
+ public CachedArray.FloatValues getFloats(String field, EntryCreator<CachedArray.FloatValues> creator) throws IOException {
+ return insaneFieldCache.getFloats(in, field, creator);
+ }
+
+ public long[] getLongs(String field) throws IOException {
+ return insaneFieldCache.getLongs(in, field);
+ }
+
+ public long[] getLongs(String field, LongParser parser) throws IOException {
+ return insaneFieldCache.getLongs(in, field, parser);
+ }
+
+ public CachedArray.LongValues getLongs(String field, EntryCreator<CachedArray.LongValues> creator) throws IOException {
+ return insaneFieldCache.getLongs(in, field, creator);
+ }
+
+ public double[] getDoubles(String field) throws IOException {
+ return insaneFieldCache.getDoubles(in, field);
+ }
+
+ public double[] getDoubles(String field, DoubleParser parser) throws IOException {
+ return insaneFieldCache.getDoubles(in, field, parser);
+ }
+
+ public CachedArray.DoubleValues getDoubles(String field, EntryCreator<CachedArray.DoubleValues> creator) throws IOException {
+ return insaneFieldCache.getDoubles(in, field, creator);
+ }
+
+ public DocTerms getTerms(String field) throws IOException {
+ return insaneFieldCache.getTerms(in, field);
+ }
+
+ public DocTerms getTerms(String field, boolean fasterButMoreRAM) throws IOException {
+ return insaneFieldCache.getTerms(in, field, fasterButMoreRAM);
+ }
+
+ public DocTermsIndex getTermsIndex(String field) throws IOException {
+ return insaneFieldCache.getTermsIndex(in, field);
+ }
+
+ public DocTermsIndex getTermsIndex(String field, boolean fasterButMoreRAM) throws IOException {
+ return insaneFieldCache.getTermsIndex(in, field, fasterButMoreRAM);
+ }
+
+ public DocTermOrds getDocTermOrds(String field) throws IOException {
+ return insaneFieldCache.getDocTermOrds(in, field);
+ }
+
+ public CacheEntry[] getCacheEntries() {
+ return insaneFieldCache.getCacheEntries();
+ }
+
+ public void purgeCache() {
+ insaneFieldCache.purge(in);
+ }
+
+ public void purgeAllCaches() {
+ insaneFieldCache.purgeAllCaches();
+ }
+
+ public void setInfoStream(PrintStream stream) {
+ insaneFieldCache.setInfoStream(stream);
+ }
+
+ public PrintStream getInfoStream() {
+ return insaneFieldCache.getInfoStream();
+ }
+
+ }
+
+ private static class InsaneFieldCache {
+
+ private Map<Class<?>, Cache> caches;
+
+ InsaneFieldCache() {
+ init();
+ }
+
+ private synchronized void init() {
+ caches = new HashMap<Class<?>, Cache>(9);
+ caches.put(Byte.TYPE, new Cache<CachedArray.ByteValues>(this));
+ caches.put(Short.TYPE, new Cache<CachedArray.ShortValues>(this));
+ caches.put(Integer.TYPE, new Cache<CachedArray.IntValues>(this));
+ caches.put(Float.TYPE, new Cache<CachedArray.FloatValues>(this));
+ caches.put(Long.TYPE, new Cache<CachedArray.LongValues>(this));
+ caches.put(Double.TYPE, new Cache<CachedArray.DoubleValues>(this));
+ caches.put(DocTermsIndex.class, new Cache<DocTermsIndex>(this));
+ caches.put(DocTerms.class, new Cache<DocTerms>(this));
+ caches.put(DocTermOrds.class, new Cache<DocTermOrds>(this));
+ }
+
+ public synchronized void purgeAllCaches() {
+ init();
+ }
+
+ public synchronized void purge(IndexReader r) {
+ for (Cache c : caches.values()) {
+ c.purge(r);
+ }
+ }
+
+ public synchronized CacheEntry[] getCacheEntries() {
+ List<CacheEntry> result = new ArrayList<CacheEntry>(17);
+ for (final Map.Entry<Class<?>, Cache> cacheEntry : caches.entrySet()) {
+ final Cache<?> cache = cacheEntry.getValue();
+ final Class<?> cacheType = cacheEntry.getKey();
+ synchronized (cache.readerCache) {
+ for (Object readerKey : cache.readerCache.keySet()) {
+ Map<?, Object> innerCache = cache.readerCache.get(readerKey);
+ for (final Map.Entry<?, Object> mapEntry : innerCache.entrySet()) {
+ Entry entry = (Entry) mapEntry.getKey();
+ result.add(new CacheEntryImpl(readerKey, entry.field,
+ cacheType, entry.creator,
+ mapEntry.getValue()));
+ }
+ }
+ }
+ }
+ return result.toArray(new CacheEntry[result.size()]);
+ }
+
+ private static final class CacheEntryImpl extends CacheEntry {
+ private final Object readerKey;
+ private final String fieldName;
+ private final Class<?> cacheType;
+ private final EntryCreator custom;
+ private final Object value;
+
+ CacheEntryImpl(Object readerKey, String fieldName,
+ Class<?> cacheType,
+ EntryCreator custom,
+ Object value) {
+ this.readerKey = readerKey;
+ this.fieldName = fieldName;
+ this.cacheType = cacheType;
+ this.custom = custom;
+ this.value = value;
+
+ // :HACK: for testing.
+// if (null != locale || SortField.CUSTOM != sortFieldType) {
+// throw new RuntimeException("Locale/sortFieldType: " + this);
+// }
+
+ }
+
+ @Override
+ public Object getReaderKey() {
+ return readerKey;
+ }
+
+ @Override
+ public String getFieldName() {
+ return fieldName;
+ }
+
+ @Override
+ public Class<?> getCacheType() {
+ return cacheType;
+ }
+
+ @Override
+ public Object getCustom() {
+ return custom;
+ }
+
+ @Override
+ public Object getValue() {
+ return value;
+ }
+ }
+
+ final static IndexReader.ReaderFinishedListener purgeReader = new IndexReader.ReaderFinishedListener() {
+ @Override
+ public void finished(IndexReader reader) {
+ insaneFieldCache.purge(reader);
+ }
+ };
+
+ /**
+ * Expert: Internal cache.
+ */
+ final static class Cache<T> {
+
+ Cache() {
+ this.wrapper = null;
+ }
+
+ Cache(InsaneFieldCache wrapper) {
+ this.wrapper = wrapper;
+ }
+
+ final InsaneFieldCache wrapper;
+
+ final Map<Object, Map<Entry<T>, Object>> readerCache = new WeakHashMap<Object, Map<Entry<T>, Object>>();
+
+ protected Object createValue(IndexReader reader, Entry entryKey) throws IOException {
+ return entryKey.creator.create(reader);
+ }
+
+ /**
+ * Remove this reader from the cache, if present.
+ */
+ public void purge(IndexReader r) {
+ Object readerKey = r.getCoreCacheKey();
+ synchronized (readerCache) {
+ readerCache.remove(readerKey);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public Object get(IndexReader reader, Entry<T> key) throws IOException {
+ Map<Entry<T>, Object> innerCache;
+ Object value;
+ final Object readerKey = reader.getCoreCacheKey();
+ synchronized (readerCache) {
+ innerCache = readerCache.get(readerKey);
+ if (innerCache == null) {
+ // First time this reader is using FieldCache
+ innerCache = new HashMap<Entry<T>, Object>();
+ readerCache.put(readerKey, innerCache);
+ reader.addReaderFinishedListener(purgeReader);
+ value = null;
+ } else {
+ value = innerCache.get(key);
+ }
+ if (value == null) {
+ value = new AtomicFieldCache.CreationPlaceholder();
+ innerCache.put(key, value);
+ }
+ }
+ if (value instanceof AtomicFieldCache.CreationPlaceholder) {
+ synchronized (value) {
+ AtomicFieldCache.CreationPlaceholder progress = (AtomicFieldCache.CreationPlaceholder) value;
+ if (progress.value == null) {
+ progress.value = createValue(reader, key);
+ synchronized (readerCache) {
+ innerCache.put(key, progress.value);
+ }
+
+ // Only check if key.custom (the parser) is
+ // non-null; else, we check twice for a single
+ // call to FieldCache.getXXX
+ if (key.creator != null && wrapper != null) {
+ final PrintStream infoStream = wrapper.getInfoStream();
+ if (infoStream != null) {
+ printNewInsanity(infoStream, progress.value);
+ }
+ }
+ }
+ return progress.value;
+ }
+ }
+
+ // Validate new entries
+ if (key.creator.shouldValidate()) {
+ key.creator.validate((T) value, reader);
+ }
+ return value;
+ }
+
+ private void printNewInsanity(PrintStream infoStream, Object value) {
+ final FieldCacheSanityChecker.Insanity[] insanities = FieldCacheSanityChecker.checkSanity(wrapper.getCacheEntries());
+ for (int i = 0; i < insanities.length; i++) {
+ final FieldCacheSanityChecker.Insanity insanity = insanities[i];
+ final CacheEntry[] entries = insanity.getCacheEntries();
+ for (int j = 0; j < entries.length; j++) {
+ if (entries[j].getValue() == value) {
+ // OK this insanity involves our entry
+ infoStream.println("WARNING: new FieldCache insanity created\nDetails: " + insanity.toString());
+ infoStream.println("\nStack:\n");
+ new Throwable().printStackTrace(infoStream);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Expert: Every composite-key in the internal cache is of this type.
+ */
+ static class Entry<T> {
+ final String field; // which Fieldable
+ final EntryCreator<T> creator; // which custom comparator or parser
+
+ /**
+ * Creates one of these objects for a custom comparator/parser.
+ */
+ Entry(String field, EntryCreator<T> custom) {
+ this.field = field;
+ this.creator = custom;
+ }
+
+ /**
+ * Two of these are equal iff they reference the same field and type.
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof Entry) {
+ Entry other = (Entry) o;
+ if (other.field.equals(field)) {
+ if (other.creator == null) {
+ if (creator == null) return true;
+ } else if (other.creator.equals(creator)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Composes a hashcode based on the field and type.
+ */
+ @Override
+ public int hashCode() {
+ return field.hashCode() ^ (creator == null ? 0 : creator.hashCode());
+ }
+ }
+
+ // inherit javadocs
+ public byte[] getBytes(IndexReader reader, String field) throws IOException {
+ return getBytes(reader, field, new ByteValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public byte[] getBytes(IndexReader reader, String field, ByteParser parser) throws IOException {
+ return getBytes(reader, field, new ByteValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.ByteValues getBytes(IndexReader reader, String field, EntryCreator<CachedArray.ByteValues> creator) throws IOException {
+ return (CachedArray.ByteValues) caches.get(Byte.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public short[] getShorts(IndexReader reader, String field) throws IOException {
+ return getShorts(reader, field, new ShortValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public short[] getShorts(IndexReader reader, String field, ShortParser parser) throws IOException {
+ return getShorts(reader, field, new ShortValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.ShortValues getShorts(IndexReader reader, String field, EntryCreator<CachedArray.ShortValues> creator) throws IOException {
+ return (CachedArray.ShortValues) caches.get(Short.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public int[] getInts(IndexReader reader, String field) throws IOException {
+ return getInts(reader, field, new IntValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public int[] getInts(IndexReader reader, String field, IntParser parser) throws IOException {
+ return getInts(reader, field, new IntValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.IntValues getInts(IndexReader reader, String field, EntryCreator<CachedArray.IntValues> creator) throws IOException {
+ return (CachedArray.IntValues) caches.get(Integer.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public float[] getFloats(IndexReader reader, String field) throws IOException {
+ return getFloats(reader, field, new FloatValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public float[] getFloats(IndexReader reader, String field, FloatParser parser) throws IOException {
+ return getFloats(reader, field, new FloatValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.FloatValues getFloats(IndexReader reader, String field, EntryCreator<CachedArray.FloatValues> creator) throws IOException {
+ return (CachedArray.FloatValues) caches.get(Float.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ public long[] getLongs(IndexReader reader, String field) throws IOException {
+ return getLongs(reader, field, new LongValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public long[] getLongs(IndexReader reader, String field, LongParser parser) throws IOException {
+ return getLongs(reader, field, new LongValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.LongValues getLongs(IndexReader reader, String field, EntryCreator<CachedArray.LongValues> creator) throws IOException {
+ return (CachedArray.LongValues) caches.get(Long.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public double[] getDoubles(IndexReader reader, String field) throws IOException {
+ return getDoubles(reader, field, new DoubleValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public double[] getDoubles(IndexReader reader, String field, DoubleParser parser) throws IOException {
+ return getDoubles(reader, field, new DoubleValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.DoubleValues getDoubles(IndexReader reader, String field, EntryCreator<CachedArray.DoubleValues> creator) throws IOException {
+ return (CachedArray.DoubleValues) caches.get(Double.TYPE).get(reader, new Entry(field, creator));
+ }
+
+ public DocTermsIndex getTermsIndex(IndexReader reader, String field) throws IOException {
+ return getTermsIndex(reader, field, new DocTermsIndexCreator(field));
+ }
+
+ public DocTermsIndex getTermsIndex(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
+ return getTermsIndex(reader, field, new DocTermsIndexCreator(field,
+ fasterButMoreRAM ? DocTermsIndexCreator.FASTER_BUT_MORE_RAM : 0));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTermsIndex getTermsIndex(IndexReader reader, String field, EntryCreator<DocTermsIndex> creator) throws IOException {
+ return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new Entry(field, creator));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException {
+ return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, new DocTermOrdsCreator(field, 0)));
+ }
+
+ // TODO: this if DocTermsIndex was already created, we
+ // should share it...
+ public DocTerms getTerms(IndexReader reader, String field) throws IOException {
+ return getTerms(reader, field, new DocTermsCreator(field));
+ }
+
+ public DocTerms getTerms(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
+ return getTerms(reader, field, new DocTermsCreator(field,
+ fasterButMoreRAM ? DocTermsCreator.FASTER_BUT_MORE_RAM : 0));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTerms getTerms(IndexReader reader, String field, EntryCreator<DocTerms> creator) throws IOException {
+ return (DocTerms) caches.get(DocTerms.class).get(reader, new Entry(field, creator));
+ }
+
+ private volatile PrintStream infoStream;
+
+ public void setInfoStream(PrintStream stream) {
+ infoStream = stream;
+ }
+
+ public PrintStream getInfoStream() {
+ return infoStream;
+ }
+
+ }
+
+}
Index: lucene/src/java/org/apache/lucene/search/cache/ShortValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/ShortValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/ShortValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.Parser;
-import org.apache.lucene.search.FieldCache.ShortParser;
+import org.apache.lucene.search.cache.parser.ShortParser;
import org.apache.lucene.search.cache.CachedArray.ShortValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -102,7 +101,7 @@
protected void fillShortValues( ShortValues vals, IndexReader reader, String field ) throws IOException
{
if( parser == null ) {
- parser = FieldCache.DEFAULT_SHORT_PARSER;
+ parser = ShortParser.DEFAULT_SHORT_PARSER;
}
setParserAndResetCounts(vals, parser);
@@ -134,7 +133,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: lucene/src/test-framework/org/apache/lucene/search/CheckHits.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/search/CheckHits.java (revision 1175430)
+++ lucene/src/test-framework/org/apache/lucene/search/CheckHits.java (revision )
@@ -27,6 +27,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.store.Directory;
public class CheckHits {
@@ -105,9 +106,10 @@
(random, searcher, i);
s.search(query, c);
Assert.assertEquals("Wrap Reader " + i + ": " +
- query.toString(defaultFieldName),
- correct, actual);
+ query.toString(defaultFieldName),
+ correct, actual);
- FieldCache.DEFAULT.purge(s.getIndexReader()); // our wrapping can create insanity otherwise
+ new SlowMultiReaderWrapper(s.getIndexReader()).getFieldCache().purgeCache();
+// FieldCache.DEFAULT.purge(s.getIndexReader()); // our wrapping can create insanity otherwise
s.close();
}
}
Index: solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
===================================================================
--- solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java (revision 1175430)
+++ solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java (revision )
@@ -18,16 +18,14 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.codecs.CodecProvider;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.junit.Ignore;
+
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
@@ -197,7 +195,7 @@
Arrays.asList("v1","\0:[* TO *]"), 88,12
);
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache()); // avoid FC insanity
}
@Test
@@ -280,7 +278,7 @@
// System.out.println("Done test "+i);
}
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache()); // avoid FC insanity
}
@Test
@@ -405,7 +403,7 @@
);
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache()); // avoid FC insanity
}
/**
@@ -624,7 +622,7 @@
singleTest(fieldAsFunc, "sqrt(\0)");
assertTrue(orig != FileFloatSource.onlyForTesting);
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache()); // avoid FC insanity
}
/**
@@ -651,7 +649,7 @@
100,10, 25,5, 0,0, 1,1);
singleTest(fieldAsFunc, "log(\0)", 1,0);
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache()); // avoid FC insanity
}
@Test
Index: lucene/src/java/org/apache/lucene/search/cache/parser/ShortParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/ShortParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/ShortParser.java (revision )
@@ -0,0 +1,56 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+
+/** Interface to parse shorts from document fields.
+ * @see AtomicFieldCache#getShorts(String, ShortParser)
+ */
+public interface ShortParser extends Parser {
+
+ ShortParser DEFAULT_SHORT_PARSER = new DefaultShortParser();
+
+ /** Return a short representation of this field's value. */
+ public short parseShort(BytesRef term);
+
+
+ /** The default parser for short values, which are encoded by {@link Short#toString(short)} */
+ public static class DefaultShortParser implements ShortParser {
+
+ public short parseShort(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Short.parseShort(term.utf8ToString());
+ }
+
+ protected Object readResolve() {
+ return DEFAULT_SHORT_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".DEFAULT_SHORT_PARSER";
+ }
+
+ }
+
+}
Index: lucene/src/java/org/apache/lucene/search/cache/DocTermsCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/DocTermsCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/DocTermsCreator.java (revision )
@@ -25,7 +25,6 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PagedBytes;
Index: lucene/src/java/org/apache/lucene/document/NumericField.java
===================================================================
--- lucene/src/java/org/apache/lucene/document/NumericField.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/document/NumericField.java (revision )
@@ -27,7 +27,7 @@
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.search.NumericRangeFilter; // javadocs
-import org.apache.lucene.search.FieldCache; // javadocs
+import org.apache.lucene.search.cache.AtomicFieldCache; // javadocs
/**
* <p>
@@ -73,7 +73,7 @@
* NumericRangeFilter}. To sort according to a
* <code>NumericField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#INT}. <code>NumericField</code>
- * values can also be loaded directly from {@link FieldCache}.</p>
+ * values can also be loaded directly from {@link AtomicFieldCache}.</p>
*
* <p>By default, a <code>NumericField</code>'s value is not stored but
* is indexed for range filtering and sorting. You can use
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java (revision )
@@ -32,7 +32,7 @@
import java.util.Map;
/**
- * Obtains float field values from the {@link org.apache.lucene.search.FieldCache}
+ * Obtains float field values from the {@link org.apache.lucene.search.cache.AtomicFieldCache}
* using <code>getFloats()</code>
* and makes those values available as other numeric types, casting as needed.
*
@@ -52,7 +52,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final DoubleValues vals = cache.getDoubles(readerContext.reader, field, creator);
+ final DoubleValues vals = readerContext.reader.getFieldCache().getDoubles(field, creator);
final double[] arr = vals.values;
final Bits valid = vals.valid;
Index: lucene/src/java/org/apache/lucene/index/SegmentFieldCacheImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentFieldCacheImpl.java (revision )
+++ lucene/src/java/org/apache/lucene/index/SegmentFieldCacheImpl.java (revision )
@@ -0,0 +1,382 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.*;
+import org.apache.lucene.search.cache.parser.*;
+import org.apache.lucene.util.FieldCacheSanityChecker;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @lucene.experimental
+ */
+class SegmentFieldCacheImpl implements AtomicFieldCache {
+
+ private IndexReader indexReader;
+ private final Map<Class<?>, Cache> cache;
+
+ SegmentFieldCacheImpl(IndexReader indexReader) {
+ if (indexReader == null) {
+ throw new IllegalArgumentException("Supplied indexReader cannot be null");
+ }
+
+ this.indexReader = indexReader;
+ cache = new HashMap<Class<?>, Cache>(9);
+ initCache();
+ }
+
+ private void initCache() {
+ cache.put(Byte.TYPE, new Cache<CachedArray.ByteValues>(this, indexReader));
+ cache.put(Short.TYPE, new Cache<CachedArray.ShortValues>(this, indexReader));
+ cache.put(Integer.TYPE, new Cache<CachedArray.IntValues>(this, indexReader));
+ cache.put(Float.TYPE, new Cache<CachedArray.FloatValues>(this, indexReader));
+ cache.put(Long.TYPE, new Cache<CachedArray.LongValues>(this, indexReader));
+ cache.put(Double.TYPE, new Cache<CachedArray.DoubleValues>(this, indexReader));
+ cache.put(DocTermsIndex.class, new Cache<DocTermsIndex>(this, indexReader));
+ cache.put(DocTerms.class, new Cache<DocTerms>(this, indexReader));
+ cache.put(DocTermOrds.class, new Cache<DocTermOrds>(this, indexReader));
+ }
+
+ // Invoked incase of IR reopen / clone. Sometimes the IR instance change, so we need to update.
+ void updateIndexReader(IndexReader ir) {
+ this.indexReader = ir;
+ for (Cache c : cache.values()) {
+ c.updateIR(ir);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public byte[] getBytes(String field) throws IOException {
+ return getBytes(field, new ByteValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public byte[] getBytes(String field, ByteParser parser) throws IOException {
+ return getBytes(field, new ByteValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.ByteValues getBytes(String field, EntryCreator<CachedArray.ByteValues> creator) throws IOException {
+ return (CachedArray.ByteValues) cache.get(Byte.TYPE).get(new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public short[] getShorts(String field) throws IOException {
+ return getShorts(field, new ShortValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public short[] getShorts(String field, ShortParser parser) throws IOException {
+ return getShorts(field, new ShortValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.ShortValues getShorts(String field, EntryCreator<CachedArray.ShortValues> creator) throws IOException {
+ return (CachedArray.ShortValues) cache.get(Short.TYPE).get(new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public int[] getInts(String field) throws IOException {
+ return getInts(field, new IntValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public int[] getInts(String field, IntParser parser) throws IOException {
+ return getInts(field, new IntValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.IntValues getInts(String field, EntryCreator<CachedArray.IntValues> creator) throws IOException {
+ return (CachedArray.IntValues) cache.get(Integer.TYPE).get(new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public float[] getFloats(String field) throws IOException {
+ return getFloats(field, new FloatValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public float[] getFloats(String field, FloatParser parser) throws IOException {
+ return getFloats(field, new FloatValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.FloatValues getFloats(String field, EntryCreator<CachedArray.FloatValues> creator) throws IOException {
+ return (CachedArray.FloatValues) cache.get(Float.TYPE).get(new Entry(field, creator));
+ }
+
+ public long[] getLongs(String field) throws IOException {
+ return getLongs(field, new LongValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public long[] getLongs(String field, LongParser parser) throws IOException {
+ return getLongs(field, new LongValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.LongValues getLongs(String field, EntryCreator<CachedArray.LongValues> creator) throws IOException {
+ return (CachedArray.LongValues) cache.get(Long.TYPE).get(new Entry(field, creator));
+ }
+
+ // inherit javadocs
+ public double[] getDoubles(String field) throws IOException {
+ return getDoubles(field, new DoubleValuesCreator(field, null)).values;
+ }
+
+ // inherit javadocs
+ public double[] getDoubles(String field, DoubleParser parser) throws IOException {
+ return getDoubles(field, new DoubleValuesCreator(field, parser)).values;
+ }
+
+ @SuppressWarnings("unchecked")
+ public CachedArray.DoubleValues getDoubles(String field, EntryCreator<CachedArray.DoubleValues> creator) throws IOException {
+ return (CachedArray.DoubleValues) cache.get(Double.TYPE).get(new Entry(field, creator));
+ }
+
+ public DocTermsIndex getTermsIndex(String field) throws IOException {
+ return getTermsIndex(field, new DocTermsIndexCreator(field));
+ }
+
+ public DocTermsIndex getTermsIndex(String field, boolean fasterButMoreRAM) throws IOException {
+ return getTermsIndex(field, new DocTermsIndexCreator(field,
+ fasterButMoreRAM ? DocTermsIndexCreator.FASTER_BUT_MORE_RAM : 0));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTermsIndex getTermsIndex(String field, EntryCreator<DocTermsIndex> creator) throws IOException {
+ return (DocTermsIndex) cache.get(DocTermsIndex.class).get(new Entry(field, creator));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTermOrds getDocTermOrds(String field) throws IOException {
+ return (DocTermOrds) cache.get(DocTermOrds.class).get(new Entry(field, new DocTermOrdsCreator(field, 0)));
+ }
+
+ // TODO: this if DocTermsIndex was already created, we
+ // should share it...
+ public DocTerms getTerms(String field) throws IOException {
+ return getTerms(field, new DocTermsCreator(field));
+ }
+
+ public DocTerms getTerms(String field, boolean fasterButMoreRAM) throws IOException {
+ return getTerms(field, new DocTermsCreator(field,
+ fasterButMoreRAM ? DocTermsCreator.FASTER_BUT_MORE_RAM : 0));
+ }
+
+ @SuppressWarnings("unchecked")
+ public DocTerms getTerms(String field, EntryCreator<DocTerms> creator) throws IOException {
+ return (DocTerms) cache.get(DocTerms.class).get(new Entry(field, creator));
+ }
+
+ private volatile PrintStream infoStream;
+
+ public void setInfoStream(PrintStream stream) {
+ infoStream = stream;
+ }
+
+ public PrintStream getInfoStream() {
+ return infoStream;
+ }
+
+ public CacheEntry[] getCacheEntries() {
+ List<CacheEntry> result = new ArrayList<CacheEntry>(17);
+ for (final Map.Entry<Class<?>, Cache> cacheEntry : cache.entrySet()) {
+ final Class<?> cacheType = cacheEntry.getKey();
+ final Cache<?> cache = cacheEntry.getValue();
+ synchronized (cache.readerCache) {
+ for (final Map.Entry<?, Object> mapEntry : cache.readerCache.entrySet()) {
+ Entry entry = (Entry) mapEntry.getKey();
+ result.add(new CacheEntryImpl(indexReader, entry.field, cacheType, entry.creator, mapEntry.getValue()));
+ }
+ }
+ }
+ return result.toArray(new CacheEntry[result.size()]);
+ }
+
+ public void purgeCache() {
+ cache.clear();
+ initCache();
+ }
+
+
+ private static class Cache<T> {
+
+ private final AtomicFieldCache wrapper;
+ private IndexReader indexReader;
+ private final Map<Entry<T>,Object> readerCache;
+
+ Cache(AtomicFieldCache wrapper, IndexReader indexReader) {
+ this.wrapper = wrapper;
+ this.indexReader = indexReader;
+ this.readerCache = new HashMap<Entry<T>,Object>();
+ }
+
+ void updateIR(IndexReader ir) {
+ this.indexReader = ir;
+ }
+
+ protected Object createValue(IndexReader reader, Entry entryKey) throws IOException {
+ return entryKey.creator.create(reader);
+ }
+
+ @SuppressWarnings("unchecked")
+ public Object get(Entry<T> key) throws IOException {
+ Object value;
+
+ synchronized (readerCache) {
+ value = readerCache.get(key);
+ if (value == null) {
+ value = new CreationPlaceholder();
+ readerCache.put(key, value);
+ }
+ }
+ if (value instanceof CreationPlaceholder) {
+ synchronized (value) {
+ CreationPlaceholder progress = (CreationPlaceholder) value;
+ if (progress.value != null) {
+ return progress.value;
+ }
+ progress.value = createValue(indexReader, key);
+ synchronized (readerCache) {
+ readerCache.put(key, progress.value);
+ }
+
+ // Only check if key.custom (the parser) is
+ // non-null; else, we check twice for a single
+ // call to FieldCache.getXXX
+ if (key.creator != null && wrapper != null) {
+ final PrintStream infoStream = wrapper.getInfoStream();
+ if (infoStream != null) {
+ printNewInsanity(infoStream, progress.value);
+ }
+ }
+ return progress.value;
+ }
+ }
+
+ // Validate new entries
+ if( key.creator.shouldValidate() ) {
+ key.creator.validate( (T)value, indexReader);
+ }
+ return value;
+ }
+
+ private void printNewInsanity(PrintStream infoStream, Object value) {
+ final FieldCacheSanityChecker.Insanity[] insanities = FieldCacheSanityChecker.checkSanity(wrapper.getCacheEntries());
+ for(int i=0;i<insanities.length;i++) {
+ final FieldCacheSanityChecker.Insanity insanity = insanities[i];
+ final CacheEntry[] entries = insanity.getCacheEntries();
+ for(int j=0;j<entries.length;j++) {
+ if (entries[j].getValue() == value) {
+ // OK this insanity involves our entry
+ infoStream.println("WARNING: new FieldCache insanity created\nDetails: " + insanity.toString());
+ infoStream.println("\nStack:\n");
+ new Throwable().printStackTrace(infoStream);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /** Expert: Every composite-key in the internal cache is of this type. */
+ private static class Entry<T> {
+
+ private final String field; // which Fieldable
+ private final EntryCreator<T> creator; // which custom comparator or parser
+
+ /** Creates one of these objects for a custom comparator/parser. */
+ Entry (String field, EntryCreator<T> custom) {
+ this.field = field;
+ this.creator = custom;
+ }
+
+ /** Two of these are equal iff they reference the same field and type. */
+ @Override
+ public boolean equals (Object o) {
+ if (o instanceof Entry) {
+ Entry other = (Entry) o;
+ if (other.field.equals(field)) {
+ if (other.creator == null) {
+ if (creator == null) return true;
+ } else if (other.creator.equals (creator)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /** Composes a hashcode based on the field and type. */
+ @Override
+ public int hashCode() {
+ return field.hashCode() ^ (creator==null ? 0 : creator.hashCode());
+ }
+ }
+
+ private static class CacheEntryImpl extends CacheEntry {
+
+ private final IndexReader indexReader;
+ private final String fieldName;
+ private final Class<?> cacheType;
+ private final EntryCreator custom;
+ private final Object value;
+
+ CacheEntryImpl(IndexReader indexReader,
+ String fieldName,
+ Class<?> cacheType,
+ EntryCreator custom,
+ Object value) {
+ this.indexReader = indexReader;
+ this.fieldName = fieldName;
+ this.cacheType = cacheType;
+ this.custom = custom;
+ this.value = value;
+ }
+
+ public Object getReaderKey() {
+ return indexReader;
+ }
+
+ public String getFieldName() {
+ return fieldName;
+ }
+
+ public Class<?> getCacheType() {
+ return cacheType;
+ }
+
+ public Object getCustom() {
+ return custom;
+ }
+
+ public Object getValue() {
+ return value;
+ }
+ }
+
+}
\ No newline at end of file
Index: lucene/src/java/org/apache/lucene/search/cache/DocTermsIndex.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/DocTermsIndex.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/DocTermsIndex.java (revision )
@@ -0,0 +1,81 @@
+package org.apache.lucene.search.cache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.packed.PackedInts;
+
+/**
+ *
+ */
+public abstract class DocTermsIndex {
+
+ public int binarySearchLookup(BytesRef key, BytesRef spare) {
+ // this special case is the reason that Arrays.binarySearch() isn't useful.
+ if (key == null)
+ return 0;
+
+ int low = 1;
+ int high = numOrd()-1;
+
+ while (low <= high) {
+ int mid = (low + high) >>> 1;
+ int cmp = lookup(mid, spare).compareTo(key);
+
+ if (cmp < 0)
+ low = mid + 1;
+ else if (cmp > 0)
+ high = mid - 1;
+ else
+ return mid; // key found
+ }
+ return -(low + 1); // key not found.
+ }
+
+ /** The BytesRef argument must not be null; the method
+ * returns the same BytesRef, or an empty (length=0)
+ * BytesRef if this ord is the null ord (0). */
+ public abstract BytesRef lookup(int ord, BytesRef reuse);
+
+ /** Convenience method, to lookup the Term for a doc.
+ * If this doc is deleted or did not have this field,
+ * this will return an empty (length=0) BytesRef. */
+ public BytesRef getTerm(int docID, BytesRef reuse) {
+ return lookup(getOrd(docID), reuse);
+ }
+
+ /** Returns sort ord for this document. Ord 0 is
+ * reserved for docs that are deleted or did not have
+ * this field. */
+ public abstract int getOrd(int docID);
+
+ /** Returns total unique ord count; this includes +1 for
+ * the null ord (always 0). */
+ public abstract int numOrd();
+
+ /** Number of documents */
+ public abstract int size();
+
+ /** Returns a TermsEnum that can iterate over the values in this index entry */
+ public abstract TermsEnum getTermsEnum();
+
+ /** @lucene.internal */
+ public abstract PackedInts.Reader getDocToOrd();
+
+}
Index: lucene/src/java/org/apache/lucene/search/cache/parser/Parser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/Parser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/Parser.java (revision )
@@ -0,0 +1,29 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.search.SortField;
+
+/**
+ * Marker interface as super-interface to all parsers. It
+ * is used to specify a custom parser to {@link
+ * SortField#SortField(String, FieldCache.Parser)}.
+ */
+public interface Parser {
+}
Index: lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java (revision )
@@ -28,8 +28,8 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.FieldCache.*;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.FixedBitSet;
@@ -130,27 +130,28 @@
key_2 = new ByteValuesCreator( "ff", null ).getCacheKey();
assertThat( "different options should share same key", key_1, is( key_2 ) );
- key_1 = new IntValuesCreator( "ff", FieldCache.DEFAULT_INT_PARSER ).getCacheKey();
- key_2 = new IntValuesCreator( "ff", FieldCache.NUMERIC_UTILS_INT_PARSER ).getCacheKey();
+ key_1 = new IntValuesCreator( "ff", IntParser.DEFAULT_INT_PARSER ).getCacheKey();
+ key_2 = new IntValuesCreator( "ff", IntParser.NUMERIC_UTILS_INT_PARSER ).getCacheKey();
assertThat( "diferent parser should have same key", key_1, is( key_2 ) );
}
- private CachedArray getWithReflection( FieldCache cache, NumberTypeTester tester, int flags ) throws IOException
+ private CachedArray getWithReflection(AtomicFieldCache cache, NumberTypeTester tester, int flags ) throws IOException
{
try {
- Method getXXX = cache.getClass().getMethod( tester.funcName, IndexReader.class, String.class, EntryCreator.class );
+ Method getXXX = cache.getClass().getMethod(tester.funcName, String.class, EntryCreator.class);
+ getXXX.setAccessible(true);
- Constructor constructor = tester.creator.getConstructor( String.class, tester.parser, Integer.TYPE );
+ Constructor constructor = tester.creator.getConstructor(String.class, tester.parser, Integer.TYPE);
- CachedArrayCreator creator = (CachedArrayCreator)constructor.newInstance( tester.field, null, flags );
+ CachedArrayCreator creator = (CachedArrayCreator)constructor.newInstance(tester.field, null, flags);
- return (CachedArray) getXXX.invoke(cache, reader, tester.field, creator );
+ return (CachedArray) getXXX.invoke(cache, tester.field, creator);
}
catch( Exception ex ) {
- throw new RuntimeException( "Reflection failed", ex );
+ throw new RuntimeException("Reflection failed", ex);
}
}
public void testCachedArrays() throws IOException
{
- FieldCache cache = FieldCache.DEFAULT;
+ AtomicFieldCache cache = new SlowMultiReaderWrapper(reader).getFieldCache();
// Check the Different CachedArray Types
CachedArray last = null;
@@ -171,37 +172,37 @@
}
// Now switch the the parser (for the same type) and expect an error
- cache.purgeAllCaches();
+ purgeFieldCache(cache);
int flags = CachedArrayCreator.CACHE_VALUES_AND_BITS_VALIDATE;
field = "theRandomInt";
- last = cache.getInts(reader, field, new IntValuesCreator( field, FieldCache.DEFAULT_INT_PARSER, flags ) );
+ last = cache.getInts(field, new IntValuesCreator( field, IntParser.DEFAULT_INT_PARSER, flags ) );
checkCachedArrayValuesAndBits( typeTests[2], last );
try {
- cache.getInts(reader, field, new IntValuesCreator( field, FieldCache.NUMERIC_UTILS_INT_PARSER, flags ) );
+ cache.getInts(field, new IntValuesCreator( field, IntParser.NUMERIC_UTILS_INT_PARSER, flags ) );
fail( "Should fail if you ask for the same type with a different parser : " + field );
} catch( Exception ex ) {} // expected
field = "theRandomLong";
- last = cache.getLongs(reader, field, new LongValuesCreator( field, FieldCache.DEFAULT_LONG_PARSER, flags ) );
+ last = cache.getLongs(field, new LongValuesCreator( field, LongParser.DEFAULT_LONG_PARSER, flags ) );
checkCachedArrayValuesAndBits( typeTests[3], last );
try {
- cache.getLongs(reader, field, new LongValuesCreator( field, FieldCache.NUMERIC_UTILS_LONG_PARSER, flags ) );
+ cache.getLongs(field, new LongValuesCreator( field, LongParser.NUMERIC_UTILS_LONG_PARSER, flags ) );
fail( "Should fail if you ask for the same type with a different parser : " + field );
} catch( Exception ex ) {} // expected
field = "theRandomFloat";
- last = cache.getFloats(reader, field, new FloatValuesCreator( field, FieldCache.DEFAULT_FLOAT_PARSER, flags ) );
+ last = cache.getFloats(field, new FloatValuesCreator( field, FloatParser.DEFAULT_FLOAT_PARSER, flags ) );
checkCachedArrayValuesAndBits( typeTests[4], last );
try {
- cache.getFloats(reader, field, new FloatValuesCreator( field, FieldCache.NUMERIC_UTILS_FLOAT_PARSER, flags ) );
+ cache.getFloats(field, new FloatValuesCreator( field, FloatParser.NUMERIC_UTILS_FLOAT_PARSER, flags ) );
fail( "Should fail if you ask for the same type with a different parser : " + field );
} catch( Exception ex ) {} // expected
field = "theRandomDouble";
- last = cache.getDoubles(reader, field, new DoubleValuesCreator( field, FieldCache.DEFAULT_DOUBLE_PARSER, flags ) );
+ last = cache.getDoubles(field, new DoubleValuesCreator( field, DoubleParser.DEFAULT_DOUBLE_PARSER, flags ) );
checkCachedArrayValuesAndBits( typeTests[5], last );
try {
- cache.getDoubles(reader, field, new DoubleValuesCreator( field, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, flags ) );
+ cache.getDoubles(field, new DoubleValuesCreator( field, DoubleParser.NUMERIC_UTILS_DOUBLE_PARSER, flags ) );
fail( "Should fail if you ask for the same type with a different parser : " + field );
} catch( Exception ex ) {} // expected
}
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/package.html
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (revision 1175430)
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (revision )
@@ -65,7 +65,7 @@
<ul>
<li> For the two-pass grouping collector, the group field must be a
single-valued indexed field.
- {@link org.apache.lucene.search.FieldCache} is used to load the {@link org.apache.lucene.search.FieldCache.DocTermsIndex} for this field.
+ {@link org.apache.lucene.search.cache.AtomicFieldCache} is used to load the {@link org.apache.lucene.search.cache.DocTermsIndex} for this field.
<li> Although Solr support grouping by function and this module has abstraction of what a group is, there are currently only
implementations for grouping based on terms.
<li> Sharding is not directly supported, though is not too
Index: lucene/src/java/org/apache/lucene/search/cache/DocTermsIndexCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/DocTermsIndexCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/DocTermsIndexCreator.java (revision )
@@ -29,7 +29,6 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache.DocTermsIndex;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java (revision )
@@ -19,17 +19,18 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.queries.function.DocValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.ReaderUtil;
import java.io.IOException;
import java.util.Map;
/**
- * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getTermsIndex()
+ * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.cache.AtomicFieldCache} using getTermsIndex()
* and reverses the order.
* <br>
* The native lucene index order is used to assign an ordinal value for each field value.
@@ -66,7 +67,7 @@
final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader;
final int off = readerContext.docBase;
- final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(topReader, field);
+ final DocTermsIndex sindex = new SlowMultiReaderWrapper(topReader).getFieldCache().getTermsIndex(field);
final int end = sindex.numOrd();
return new IntDocValues(this) {
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java (revision 1175430)
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java (revision )
@@ -18,15 +18,15 @@
*/
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
/**
* Concrete implementation of {@link AbstractFirstPassGroupingCollector} that groups based on
- * field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex}
+ * field values and more specifically uses {@link DocTermsIndex}
* to collect groups.
*
* @lucene.experimental
@@ -34,7 +34,7 @@
public class TermFirstPassGroupingCollector extends AbstractFirstPassGroupingCollector<BytesRef> {
private final BytesRef scratchBytesRef = new BytesRef();
- private FieldCache.DocTermsIndex index;
+ private DocTermsIndex index;
private String groupField;
@@ -80,6 +80,6 @@
@Override
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
super.setNextReader(readerContext);
- index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField);
+ index = readerContext.reader.getFieldCache().getTermsIndex(groupField);
}
}
Index: lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java (revision )
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.search.cache.AtomicFieldCache; //for javadoc
/**
* Expert: A hit queue for sorting by hits by terms in more than one field.
@@ -29,7 +30,7 @@
* @lucene.experimental
* @since 2.9
* @see IndexSearcher#search(Query,Filter,int,Sort)
- * @see FieldCache
+ * @see AtomicFieldCache
*/
public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends PriorityQueue<T> {
Index: lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision )
@@ -20,13 +20,16 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.cache.DocTermsIndex;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.document.NumericField; // for javadocs
+import org.apache.lucene.search.cache.AtomicFieldCache; // for javadocs
/**
- * A range filter built on top of a cached single term field (in {@link FieldCache}).
+ * A range filter built on top of a cached single term field (in {@link AtomicFieldCache}).
*
* <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used.
* Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache,
@@ -41,9 +44,9 @@
* Furthermore, it does not need the numeric values encoded by {@link NumericField}. But
* it has the problem that it only works with exact one value/document (see below).
*
- * <p>As with all {@link FieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for
+ * <p>As with all {@link AtomicFieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for
* fields which exact one term for each document (except for {@link #newStringRange}
- * where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges
+ * where 0 terms are also allowed). Due to a restriction of {@link AtomicFieldCache}, for numeric ranges
* all terms that do not have a numeric value, 0 is assumed.
*
* <p>Thus it works on dates, prices and other single value fields but will not work on
@@ -51,18 +54,18 @@
* there is only a single term.
*
* <p>This class does not have an constructor, use one of the static factory methods available,
- * that create a correct instance for different data types supported by {@link FieldCache}.
+ * that create a correct instance for different data types supported by {@link AtomicFieldCache}.
*/
public abstract class FieldCacheRangeFilter<T> extends Filter {
final String field;
- final FieldCache.Parser parser;
+ final Parser parser;
final T lowerVal;
final T upperVal;
final boolean includeLower;
final boolean includeUpper;
- private FieldCacheRangeFilter(String field, FieldCache.Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
+ private FieldCacheRangeFilter(String field, Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
this.field = field;
this.parser = parser;
this.lowerVal = lowerVal;
@@ -76,7 +79,7 @@
public abstract DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException;
/**
- * Creates a string range filter using {@link FieldCache#getTermsIndex}. This works with all
+ * Creates a string range filter using {@link AtomicFieldCache#getTermsIndex}. This works with all
* fields containing zero or one term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -84,7 +87,7 @@
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
- final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
+ final DocTermsIndex fcsi = context.reader.getFieldCache().getTermsIndex(field);
final BytesRef spare = new BytesRef();
final int lowerPoint = fcsi.binarySearchLookup(lowerVal == null ? null : new BytesRef(lowerVal), spare);
final int upperPoint = fcsi.binarySearchLookup(upperVal == null ? null : new BytesRef(upperVal), spare);
@@ -136,7 +139,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getBytes(String)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -145,11 +148,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getBytes(String, org.apache.lucene.search.cache.parser.ByteParser)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Byte> newByteRange(String field, ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -174,7 +177,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final byte[] values = FieldCache.DEFAULT.getBytes(context.reader, field, (FieldCache.ByteParser) parser);
+ final byte[] values = context.reader.getFieldCache().getBytes(field, (ByteParser) parser);
// we only respect deleted docs if the range contains 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override
@@ -187,7 +190,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getShorts(String)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -196,11 +199,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getShorts(String, org.apache.lucene.search.cache.parser.ShortParser)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Short> newShortRange(String field, ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -225,7 +228,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final short[] values = FieldCache.DEFAULT.getShorts(context.reader, field, (FieldCache.ShortParser) parser);
+ final short[] values = context.reader.getFieldCache().getShorts(field, (ShortParser) parser);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override
@@ -238,7 +241,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getInts(String)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -247,11 +250,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getInts(String, org.apache.lucene.search.cache.parser.IntParser)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Integer> newIntRange(String field, IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -276,7 +279,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final int[] values = FieldCache.DEFAULT.getInts(context.reader, field, (FieldCache.IntParser) parser);
+ final int[] values = context.reader.getFieldCache().getInts(field, (IntParser) parser);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override
@@ -289,7 +292,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getLongs(String)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -298,11 +301,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getLongs(String, org.apache.lucene.search.cache.parser.LongParser)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Long> newLongRange(String field, LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -327,7 +330,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final long[] values = FieldCache.DEFAULT.getLongs(context.reader, field, (FieldCache.LongParser) parser);
+ final long[] values = context.reader.getFieldCache().getLongs(field, (LongParser) parser);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L)) {
@Override
@@ -340,7 +343,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getFloats(String)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -349,11 +352,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getFloats(String, org.apache.lucene.search.cache.parser.FloatParser)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Float> newFloatRange(String field, FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -382,7 +385,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final float[] values = FieldCache.DEFAULT.getFloats(context.reader, field, (FieldCache.FloatParser) parser);
+ final float[] values = context.reader.getFieldCache().getFloats(field, (FloatParser) parser);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f)) {
@Override
@@ -395,7 +398,7 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getDoubles(String)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -404,11 +407,11 @@
}
/**
- * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all
+ * Creates a numeric range filter using {@link AtomicFieldCache#getDoubles(String, org.apache.lucene.search.cache.parser.DoubleParser)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
- public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
+ public static FieldCacheRangeFilter<Double> newDoubleRange(String field, DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
@@ -437,7 +440,7 @@
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final double[] values = FieldCache.DEFAULT.getDoubles(context.reader, field, (FieldCache.DoubleParser) parser);
+ final double[] values = context.reader.getFieldCache().getDoubles(field, (DoubleParser) parser);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0)) {
@Override
@@ -503,7 +506,7 @@
public T getUpperVal() { return upperVal; }
/** Returns the current numeric parser ({@code null} for {@code T} is {@code String}} */
- public FieldCache.Parser getParser() { return parser; }
+ public Parser getParser() { return parser; }
static abstract class FieldCacheDocIdSet extends DocIdSet {
private final IndexReader reader;
Index: modules/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java (revision )
@@ -22,7 +22,7 @@
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader; // for javadocs
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.FieldCache; // for javadocs
+import org.apache.lucene.search.cache.AtomicFieldCache; // for javadocs
/**
* An instance of this subclass should be returned by
@@ -31,7 +31,7 @@
* <p>Since Lucene 2.9, queries operate on each segment of an index separately,
* so the protected {@link #context} field can be used to resolve doc IDs,
* as the supplied <code>doc</code> ID is per-segment and without knowledge
- * of the IndexReader you cannot access the document or {@link FieldCache}.
+ * of the IndexReader you cannot access the document or {@link AtomicFieldCache}.
*
* @lucene.experimental
* @since 2.9.2
Index: modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java
===================================================================
--- modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java (revision 1175430)
+++ modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java (revision )
@@ -25,6 +25,7 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
@@ -244,8 +245,9 @@
final IndexReader r = w.getReader();
w.close();
+ SlowMultiReaderWrapper smrw = new SlowMultiReaderWrapper(r);
// NOTE: intentional but temporary field cache insanity!
- final int[] docIdToFieldId = FieldCache.DEFAULT.getInts(r, "id");
+ final int[] docIdToFieldId = smrw.getFieldCache().getInts("id");
final int[] fieldIdToDocID = new int[numDocs];
for (int i = 0; i < docIdToFieldId.length; i++) {
int fieldId = docIdToFieldId[i];
@@ -330,7 +332,7 @@
}
s.close();
} finally {
- FieldCache.DEFAULT.purge(r);
+ smrw.getFieldCache().purgeCache();
}
r.close();
Index: lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java
===================================================================
--- lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java (revision 1175430)
+++ lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java (revision )
@@ -21,7 +21,6 @@
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.FilteredDocIdSet;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.spatial.DistanceUtils;
@@ -62,8 +61,8 @@
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
- final double[] latIndex = FieldCache.DEFAULT.getDoubles(context.reader, latField);
- final double[] lngIndex = FieldCache.DEFAULT.getDoubles(context.reader, lngField);
+ final double[] latIndex = context.reader.getFieldCache().getDoubles(latField);
+ final double[] lngIndex = context.reader.getFieldCache().getDoubles(lngField);
final int docBase = nextDocBase;
nextDocBase += context.reader.maxDoc();
Index: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (revision )
@@ -30,6 +30,7 @@
import java.util.Map;
import java.util.WeakHashMap;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.solr.common.params.QueryElevationParams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -478,7 +479,7 @@
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
return new FieldComparator<Integer>() {
- FieldCache.DocTermsIndex idIndex;
+ DocTermsIndex idIndex;
private final int[] values = new int[numHits];
int bottomVal;
private final BytesRef tempBR = new BytesRef();
@@ -511,7 +512,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, fieldname);
+ idIndex = context.reader.getFieldCache().getTermsIndex(fieldname);
return this;
}
Index: lucene/src/java/org/apache/lucene/search/cache/IntValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/IntValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/IntValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.IntParser;
-import org.apache.lucene.search.FieldCache.Parser;
+import org.apache.lucene.search.cache.parser.IntParser;
import org.apache.lucene.search.cache.CachedArray.IntValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -103,13 +102,13 @@
{
if( parser == null ) {
try {
- parser = FieldCache.DEFAULT_INT_PARSER;
+ parser = IntParser.DEFAULT_INT_PARSER;
fillIntValues( vals, reader, field );
return;
}
catch (NumberFormatException ne) {
vals.parserHashCode = null;
- parser = FieldCache.NUMERIC_UTILS_INT_PARSER;
+ parser = IntParser.NUMERIC_UTILS_INT_PARSER;
fillIntValues( vals, reader, field );
return;
}
@@ -147,7 +146,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: solr/core/src/test/org/apache/solr/TestJoin.java
===================================================================
--- solr/core/src/test/org/apache/solr/TestJoin.java (revision 1175430)
+++ solr/core/src/test/org/apache/solr/TestJoin.java (revision )
@@ -17,17 +17,9 @@
package org.apache.solr;
-import org.apache.lucene.search.FieldCache;
import org.apache.noggit.JSONUtil;
import org.apache.noggit.ObjectBuilder;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.JsonUpdateRequestHandler;
import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.servlet.DirectSolrConnection;
-import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
Index: lucene/src/java/org/apache/lucene/search/SortField.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/SortField.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/SortField.java (revision )
@@ -21,6 +21,7 @@
import java.util.Comparator;
import org.apache.lucene.search.cache.*;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
@@ -133,11 +134,11 @@
}
/** Creates a sort by terms in the given field, parsed
- * to numeric values using a custom {@link FieldCache.Parser}.
+ * to numeric values using a custom {@link Parser}.
* @param field Name of field to sort by. Must not be null.
- * @param parser Instance of a {@link FieldCache.Parser},
+ * @param parser Instance of a {@link Parser},
* which must subclass one of the existing numeric
- * parsers from {@link FieldCache}. Sort type is inferred
+ * parsers from {@link AtomicFieldCache}. Sort type is inferred
* by testing which numeric parser the parser subclasses.
* @throws IllegalArgumentException if the parser fails to
* subclass an existing numeric parser, or field is null
@@ -145,16 +146,16 @@
* @deprecated (4.0) use EntryCreator version
*/
@Deprecated
- public SortField(String field, FieldCache.Parser parser) {
+ public SortField(String field, Parser parser) {
this(field, parser, false);
}
/** Creates a sort, possibly in reverse, by terms in the given field, parsed
- * to numeric values using a custom {@link FieldCache.Parser}.
+ * to numeric values using a custom {@link Parser}.
* @param field Name of field to sort by. Must not be null.
- * @param parser Instance of a {@link FieldCache.Parser},
+ * @param parser Instance of a {@link Parser},
* which must subclass one of the existing numeric
- * parsers from {@link FieldCache}. Sort type is inferred
+ * parsers from {@link AtomicFieldCache}. Sort type is inferred
* by testing which numeric parser the parser subclasses.
* @param reverse True if natural order should be reversed.
* @throws IllegalArgumentException if the parser fails to
@@ -163,30 +164,30 @@
* @deprecated (4.0) use EntryCreator version
*/
@Deprecated
- public SortField(String field, FieldCache.Parser parser, boolean reverse) {
+ public SortField(String field, Parser parser, boolean reverse) {
if (field == null) {
throw new IllegalArgumentException("field can only be null when type is SCORE or DOC");
}
this.field = field;
this.reverse = reverse;
- if (parser instanceof FieldCache.IntParser) {
- this.creator = new IntValuesCreator( field, (FieldCache.IntParser)parser );
+ if (parser instanceof IntParser) {
+ this.creator = new IntValuesCreator( field, (IntParser)parser );
}
- else if (parser instanceof FieldCache.FloatParser) {
- this.creator = new FloatValuesCreator( field, (FieldCache.FloatParser)parser );
+ else if (parser instanceof FloatParser) {
+ this.creator = new FloatValuesCreator( field, (FloatParser)parser );
}
- else if (parser instanceof FieldCache.ShortParser) {
- this.creator = new ShortValuesCreator( field, (FieldCache.ShortParser)parser );
+ else if (parser instanceof ShortParser) {
+ this.creator = new ShortValuesCreator( field, (ShortParser)parser );
}
- else if (parser instanceof FieldCache.ByteParser) {
- this.creator = new ByteValuesCreator( field, (FieldCache.ByteParser)parser );
+ else if (parser instanceof ByteParser) {
+ this.creator = new ByteValuesCreator( field, (ByteParser)parser );
}
- else if (parser instanceof FieldCache.LongParser) {
- this.creator = new LongValuesCreator( field, (FieldCache.LongParser)parser );
+ else if (parser instanceof LongParser) {
+ this.creator = new LongValuesCreator( field, (LongParser)parser );
}
- else if (parser instanceof FieldCache.DoubleParser) {
- this.creator = new DoubleValuesCreator( field, (FieldCache.DoubleParser)parser );
+ else if (parser instanceof DoubleParser) {
+ this.creator = new DoubleValuesCreator( field, (DoubleParser)parser );
}
else
throw new IllegalArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")");
@@ -280,13 +281,13 @@
return type;
}
- /** Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
+ /** Returns the instance of a {@link AtomicFieldCache} parser that fits to the given sort type.
* May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
- * @return An instance of a {@link FieldCache} parser, or <code>null</code>.
+ * @return An instance of a {@link AtomicFieldCache} parser, or <code>null</code>.
* @deprecated (4.0) use getEntryCreator()
*/
@Deprecated
- public FieldCache.Parser getParser() {
+ public Parser getParser() {
return (creator==null) ? null : creator.getParser();
}
@@ -373,7 +374,7 @@
/** Returns true if <code>o</code> is equal to this. If a
* {@link FieldComparatorSource} or {@link
- * FieldCache.Parser} was provided, it must properly
+ * Parser} was provided, it must properly
* implement equals (unless a singleton is always used). */
@Override
public boolean equals(Object o) {
@@ -391,7 +392,7 @@
/** Returns true if <code>o</code> is equal to this. If a
* {@link FieldComparatorSource} or {@link
- * FieldCache.Parser} was provided, it must properly
+ * Parser} was provided, it must properly
* implement hashCode (unless a singleton is always
* used). */
@Override
Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentReader.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/index/SegmentReader.java (revision )
@@ -17,27 +17,19 @@
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexInput;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.codecs.PerDocValues;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
-import org.apache.lucene.util.BitVector;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CloseableThreadLocal;
-import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.*;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
/**
* @lucene.experimental
*/
@@ -70,8 +62,35 @@
SegmentCoreReaders core;
+ private SegmentFieldCacheImpl segmentCache;
+
+ public SegmentReader() {
+ this.segmentCache = new SegmentFieldCacheImpl(this);
+ this.readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
+ readerFinishedListeners.add(new ReaderFinishedListener() {
+
+ public void finished(IndexReader reader) {
+ segmentCache.purgeCache();
+ }
+
+ });
+ }
+
+ public SegmentReader(SegmentFieldCacheImpl segmentCache) {
+ this.segmentCache = segmentCache;
+ this.readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
+ readerFinishedListeners.add(new ReaderFinishedListener() {
+
+ public void finished(IndexReader reader) {
+ SegmentReader.this.segmentCache.purgeCache();
+ }
+
+ });
+ segmentCache.updateIndexReader(this);
+ }
+
/**
- * Sets the initial value
+ * Sets the initial value
*/
private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
@Override
@@ -81,7 +100,7 @@
}
Map<String,SegmentNorms> norms = new HashMap<String,SegmentNorms>();
-
+
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
@@ -101,7 +120,7 @@
int termInfosIndexDivisor,
IOContext context)
throws CorruptIndexException, IOException {
-
+
SegmentReader instance = new SegmentReader();
instance.readOnly = readOnly;
instance.si = si;
@@ -149,7 +168,7 @@
// Verify # deletes does not exceed maxDoc for this
// segment:
- assert si.getDelCount() <= maxDoc() :
+ assert si.getDelCount() <= maxDoc() :
"delete count mismatch: " + recomputedCount + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name;
return true;
@@ -167,7 +186,7 @@
} else
assert si.getDelCount() == 0;
}
-
+
/**
* Clones the norm bytes. May be overridden by subclasses. New and experimental.
* @param bytes Byte array to clone
@@ -178,7 +197,7 @@
System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
return cloneBytes;
}
-
+
/**
* Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
* @param bv BitVector to clone
@@ -220,7 +239,7 @@
boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
&& (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
boolean normsUpToDate = true;
-
+
Set<Integer> fieldNormsChanged = new HashSet<Integer>();
for (FieldInfo fi : core.fieldInfos) {
int fieldNumber = fi.number;
@@ -234,14 +253,14 @@
// also if both old and new readers aren't readonly, we clone to avoid sharing modifications
if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly) {
return this;
- }
+ }
// When cloning, the incoming SegmentInfos should not
// have any changes in it:
assert !doClone || (normsUpToDate && deletionsUpToDate);
// clone reader
- SegmentReader clone = new SegmentReader();
+ final SegmentReader clone = deletionsUpToDate ? new SegmentReader(segmentCache) : new SegmentReader();
boolean success = false;
try {
@@ -259,7 +278,7 @@
clone.hasChanges = hasChanges;
hasChanges = false;
}
-
+
if (doClone) {
if (liveDocs != null) {
liveDocsRef.incrementAndGet();
@@ -303,7 +322,7 @@
clone.decRef();
}
}
-
+
return clone;
}
@@ -828,6 +847,11 @@
}
@Override
+ public AtomicFieldCache getFieldCache() {
+ return segmentCache;
+ }
+
+ @Override
protected void readerFinished() {
// Do nothing here -- we have more careful control on
// when to notify that a SegmentReader has finished,
Index: lucene/src/test/org/apache/lucene/search/TestFieldCache.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestFieldCache.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/TestInsaneFieldCache.java (revision )
@@ -1,12 +1,13 @@
package org.apache.lucene.search;
-/**
- * Copyright 2004 The Apache Software Foundation
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
@@ -20,6 +21,10 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.search.cache.DocTerms;
+import org.apache.lucene.search.cache.DocTermsIndex;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
@@ -33,7 +38,7 @@
import java.util.LinkedHashSet;
import java.util.List;
-public class TestFieldCache extends LuceneTestCase {
+public class TestInsaneFieldCache extends LuceneTestCase {
protected IndexReader reader;
private int NUM_DOCS;
private int NUM_ORDS;
@@ -98,68 +103,70 @@
}
public void testInfoStream() throws Exception {
+ SlowMultiReaderWrapper smrw = new SlowMultiReaderWrapper(reader);
+ AtomicFieldCache cache = smrw.getFieldCache();
try {
- FieldCache cache = FieldCache.DEFAULT;
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
cache.setInfoStream(new PrintStream(bos));
- cache.getDoubles(reader, "theDouble");
- cache.getFloats(reader, "theDouble");
+ cache.getDoubles("theDouble");
+ cache.getFloats("theDouble");
assertTrue(bos.toString().indexOf("WARNING") != -1);
} finally {
- FieldCache.DEFAULT.purgeAllCaches();
+ purgeFieldCache(cache);
}
}
public void test() throws IOException {
- FieldCache cache = FieldCache.DEFAULT;
- double [] doubles = cache.getDoubles(reader, "theDouble");
- assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble"));
- assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER));
+ SlowMultiReaderWrapper smrw = new SlowMultiReaderWrapper(reader);
+ AtomicFieldCache cache = smrw.getFieldCache();
+ double [] doubles = cache.getDoubles("theDouble");
+ assertSame("Second request to cache return same array", doubles, cache.getDoubles("theDouble"));
+ assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles("theDouble", DoubleParser.DEFAULT_DOUBLE_PARSER));
assertTrue("doubles Size: " + doubles.length + " is not: " + NUM_DOCS, doubles.length == NUM_DOCS);
for (int i = 0; i < doubles.length; i++) {
assertTrue(doubles[i] + " does not equal: " + (Double.MAX_VALUE - i), doubles[i] == (Double.MAX_VALUE - i));
}
- long [] longs = cache.getLongs(reader, "theLong");
- assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong"));
- assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER));
+ long [] longs = cache.getLongs("theLong");
+ assertSame("Second request to cache return same array", longs, cache.getLongs("theLong"));
+ assertSame("Second request with explicit parser return same array", longs, cache.getLongs("theLong", LongParser.DEFAULT_LONG_PARSER));
assertTrue("longs Size: " + longs.length + " is not: " + NUM_DOCS, longs.length == NUM_DOCS);
for (int i = 0; i < longs.length; i++) {
assertTrue(longs[i] + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs[i] == (Long.MAX_VALUE - i));
}
- byte [] bytes = cache.getBytes(reader, "theByte");
- assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte"));
- assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER));
+ byte [] bytes = cache.getBytes("theByte");
+ assertSame("Second request to cache return same array", bytes, cache.getBytes("theByte"));
+ assertSame("Second request with explicit parser return same array", bytes, cache.getBytes("theByte", ByteParser.DEFAULT_BYTE_PARSER));
assertTrue("bytes Size: " + bytes.length + " is not: " + NUM_DOCS, bytes.length == NUM_DOCS);
for (int i = 0; i < bytes.length; i++) {
assertTrue(bytes[i] + " does not equal: " + (Byte.MAX_VALUE - i), bytes[i] == (byte) (Byte.MAX_VALUE - i));
}
- short [] shorts = cache.getShorts(reader, "theShort");
- assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort"));
- assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER));
+ short [] shorts = cache.getShorts("theShort");
+ assertSame("Second request to cache return same array", shorts, cache.getShorts("theShort"));
+ assertSame("Second request with explicit parser return same array", shorts, cache.getShorts("theShort", ShortParser.DEFAULT_SHORT_PARSER));
assertTrue("shorts Size: " + shorts.length + " is not: " + NUM_DOCS, shorts.length == NUM_DOCS);
for (int i = 0; i < shorts.length; i++) {
assertTrue(shorts[i] + " does not equal: " + (Short.MAX_VALUE - i), shorts[i] == (short) (Short.MAX_VALUE - i));
}
- int [] ints = cache.getInts(reader, "theInt");
- assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt"));
- assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER));
+ int [] ints = cache.getInts("theInt");
+ assertSame("Second request to cache return same array", ints, cache.getInts("theInt"));
+ assertSame("Second request with explicit parser return same array", ints, cache.getInts("theInt", IntParser.DEFAULT_INT_PARSER));
assertTrue("ints Size: " + ints.length + " is not: " + NUM_DOCS, ints.length == NUM_DOCS);
for (int i = 0; i < ints.length; i++) {
assertTrue(ints[i] + " does not equal: " + (Integer.MAX_VALUE - i), ints[i] == (Integer.MAX_VALUE - i));
}
- float [] floats = cache.getFloats(reader, "theFloat");
- assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat"));
- assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER));
+ float [] floats = cache.getFloats("theFloat");
+ assertSame("Second request to cache return same array", floats, cache.getFloats("theFloat"));
+ assertSame("Second request with explicit parser return same array", floats, cache.getFloats("theFloat", FloatParser.DEFAULT_FLOAT_PARSER));
assertTrue("floats Size: " + floats.length + " is not: " + NUM_DOCS, floats.length == NUM_DOCS);
for (int i = 0; i < floats.length; i++) {
assertTrue(floats[i] + " does not equal: " + (Float.MAX_VALUE - i), floats[i] == (Float.MAX_VALUE - i));
@@ -167,8 +174,8 @@
}
// getTermsIndex
- FieldCache.DocTermsIndex termsIndex = cache.getTermsIndex(reader, "theRandomUnicodeString");
- assertSame("Second request to cache return same array", termsIndex, cache.getTermsIndex(reader, "theRandomUnicodeString"));
+ DocTermsIndex termsIndex = cache.getTermsIndex("theRandomUnicodeString");
+ assertSame("Second request to cache return same array", termsIndex, cache.getTermsIndex("theRandomUnicodeString"));
assertTrue("doubles Size: " + termsIndex.size() + " is not: " + NUM_DOCS, termsIndex.size() == NUM_DOCS);
final BytesRef br = new BytesRef();
for (int i = 0; i < NUM_DOCS; i++) {
@@ -199,11 +206,11 @@
}
// test bad field
- termsIndex = cache.getTermsIndex(reader, "bogusfield");
+ termsIndex = cache.getTermsIndex("bogusfield");
// getTerms
- FieldCache.DocTerms terms = cache.getTerms(reader, "theRandomUnicodeString");
- assertSame("Second request to cache return same array", terms, cache.getTerms(reader, "theRandomUnicodeString"));
+ DocTerms terms = cache.getTerms("theRandomUnicodeString");
+ assertSame("Second request to cache return same array", terms, cache.getTerms("theRandomUnicodeString"));
assertTrue("doubles Size: " + terms.size() + " is not: " + NUM_DOCS, terms.size() == NUM_DOCS);
for (int i = 0; i < NUM_DOCS; i++) {
final BytesRef term = terms.getTerm(i, br);
@@ -212,12 +219,12 @@
}
// test bad field
- terms = cache.getTerms(reader, "bogusfield");
+ terms = cache.getTerms("bogusfield");
// getDocTermOrds
- DocTermOrds termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField");
+ DocTermOrds termOrds = cache.getDocTermOrds("theRandomUnicodeMultiValuedField");
TermsEnum termsEnum = termOrds.getOrdTermsEnum(reader);
- assertSame("Second request to cache return same DocTermOrds", termOrds, cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField"));
+ assertSame("Second request to cache return same DocTermOrds", termOrds, cache.getDocTermOrds("theRandomUnicodeMultiValuedField"));
DocTermOrds.TermOrdsIterator reuse = null;
for (int i = 0; i < NUM_DOCS; i++) {
reuse = termOrds.lookup(i, reuse);
@@ -253,17 +260,18 @@
}
// test bad field
- termOrds = cache.getDocTermOrds(reader, "bogusfield");
+ termOrds = cache.getDocTermOrds("bogusfield");
- FieldCache.DEFAULT.purge(reader);
+ purgeFieldCache(cache);
}
public void testEmptyIndex() throws Exception {
Directory dir = newDirectory();
IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(500));
IndexReader r = IndexReader.open(writer, true);
- FieldCache.DocTerms terms = FieldCache.DEFAULT.getTerms(r, "foobar");
- FieldCache.DocTermsIndex termsIndex = FieldCache.DEFAULT.getTermsIndex(r, "foobar");
+ SlowMultiReaderWrapper smrw = new SlowMultiReaderWrapper(r);
+ DocTerms terms = smrw.getFieldCache().getTerms("foobar");
+ DocTermsIndex termsIndex = smrw.getFieldCache().getTermsIndex("foobar");
writer.close();
r.close();
dir.close();
Index: solr/core/src/test/org/apache/solr/TestGroupingSearch.java
===================================================================
--- solr/core/src/test/org/apache/solr/TestGroupingSearch.java (revision 1175430)
+++ solr/core/src/test/org/apache/solr/TestGroupingSearch.java (revision )
@@ -17,7 +17,7 @@
package org.apache.solr;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.noggit.JSONUtil;
import org.apache.noggit.ObjectBuilder;
import org.apache.solr.common.params.GroupParams;
@@ -427,7 +427,7 @@
,"/grouped/"+f+"/matches==10"
,"/facet_counts/facet_fields/"+f+"==['1',3, '2',3, '3',2, '4',1, '5',1]"
);
- purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
+ purgeFieldCache(new SlowMultiReaderWrapper(null).getFieldCache()); // avoid FC insanity
// test that grouping works with highlighting
assertJQ(req("fq",filt, "q","{!func}"+f2, "group","true", "group.field",f, "fl","id"
Index: lucene/src/java/org/apache/lucene/search/cache/parser/LongParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/LongParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/LongParser.java (revision )
@@ -0,0 +1,81 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * Interface to parse long from document fields.
+ * @see AtomicFieldCache#getLongs(String, LongParser)
+ */
+public interface LongParser extends Parser {
+
+ LongParser DEFAULT_LONG_PARSER = new DefaultLongParser();
+ LongParser NUMERIC_UTILS_LONG_PARSER = new NumericLongParser();
+
+ /** Return an long representation of this field's value. */
+ public long parseLong(BytesRef term);
+
+
+ /** The default parser for long values, which are encoded by {@link Long#toString(long)} */
+ public static class DefaultLongParser implements LongParser {
+
+ public long parseLong(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Long.parseLong(term.utf8ToString());
+ }
+
+ protected Object readResolve() {
+ return DEFAULT_LONG_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".DEFAULT_LONG_PARSER";
+ }
+ }
+
+ /**
+ * A parser instance for long values encoded by {@link org.apache.lucene.util.NumericUtils}, e.g. when indexed
+ * via {@link org.apache.lucene.document.NumericField}/{@link org.apache.lucene.analysis.NumericTokenStream}.
+ */
+ public static class NumericLongParser implements LongParser {
+
+ public long parseLong(BytesRef term) {
+ if (NumericUtils.getPrefixCodedLongShift(term) > 0)
+ throw new AtomicFieldCache.StopFillCacheException();
+ return NumericUtils.prefixCodedToLong(term);
+ }
+
+ protected Object readResolve() {
+ return NUMERIC_UTILS_LONG_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER";
+ }
+
+ }
+
+}
Index: lucene/src/java/org/apache/lucene/search/cache/CacheEntry.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/CacheEntry.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/CacheEntry.java (revision )
@@ -0,0 +1,81 @@
+package org.apache.lucene.search.cache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.RamUsageEstimator;
+
+import java.text.DecimalFormat;
+
+/**
+ * EXPERT: A unique Identifier/Description for each item in the FieldCache.
+ * Can be useful for logging/debugging.
+ * @lucene.experimental
+ */
+public abstract class CacheEntry {
+ public abstract Object getReaderKey();
+ public abstract String getFieldName();
+ public abstract Class<?> getCacheType();
+ public abstract Object getCustom();
+ public abstract Object getValue();
+ private String size = null;
+ protected final void setEstimatedSize(String size) {
+ this.size = size;
+ }
+ /**
+ * @see #estimateSize(org.apache.lucene.util.RamUsageEstimator)
+ */
+ public void estimateSize() {
+ estimateSize(new RamUsageEstimator(false)); // doesn't check for interned
+ }
+ /**
+ * Computes (and stores) the estimated size of the cache Value
+ * @see #getEstimatedSize
+ */
+ public void estimateSize(RamUsageEstimator ramCalc) {
+ long size = ramCalc.estimateRamUsage(getValue());
+ setEstimatedSize(RamUsageEstimator.humanReadableUnits
+ (size, new DecimalFormat("0.#")));
+
+ }
+ /**
+ * The most recently estimated size of the value, null unless
+ * estimateSize has been called.
+ */
+ public final String getEstimatedSize() {
+ return size;
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append("'").append(getReaderKey()).append("'=>");
+ b.append("'").append(getFieldName()).append("',");
+ b.append(getCacheType()).append(",").append(getCustom());
+ b.append("=>").append(getValue().getClass().getName()).append("#");
+ b.append(System.identityHashCode(getValue()));
+
+ String s = getEstimatedSize();
+ if(null != s) {
+ b.append(" (size =~ ").append(s).append(')');
+ }
+
+ return b.toString();
+ }
+
+}
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java (revision 1175430)
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java (revision )
@@ -18,8 +18,8 @@
*/
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
@@ -27,7 +27,7 @@
/**
* Concrete implementation of {@link AbstractSecondPassGroupingCollector} that groups based on
- * field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex}
+ * field values and more specifically uses {@link DocTermsIndex}
* to collect grouped docs.
*
* @lucene.experimental
@@ -35,7 +35,7 @@
public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingCollector<BytesRef> {
private final SentinelIntSet ordSet;
- private FieldCache.DocTermsIndex index;
+ private DocTermsIndex index;
private final BytesRef spareBytesRef = new BytesRef();
private final String groupField;
@@ -52,7 +52,7 @@
@Override
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
super.setNextReader(readerContext);
- index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField);
+ index = readerContext.reader.getFieldCache().getTermsIndex(groupField);
// Rebuild ordSet
ordSet.clear();
Index: solr/core/src/java/org/apache/solr/schema/TrieField.java
===================================================================
--- solr/core/src/java/org/apache/solr/schema/TrieField.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/schema/TrieField.java (revision )
@@ -30,6 +30,10 @@
import org.apache.lucene.search.cache.FloatValuesCreator;
import org.apache.lucene.search.cache.IntValuesCreator;
import org.apache.lucene.search.cache.LongValuesCreator;
+import org.apache.lucene.search.cache.parser.DoubleParser;
+import org.apache.lucene.search.cache.parser.FloatParser;
+import org.apache.lucene.search.cache.parser.IntParser;
+import org.apache.lucene.search.cache.parser.LongParser;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.NumericUtils;
@@ -39,7 +43,6 @@
import org.apache.solr.common.SolrException;
import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.search.QParser;
-import org.apache.solr.search.function.*;
import java.io.IOException;
import java.util.Locale;
@@ -151,7 +154,7 @@
missingValue = top ? Integer.MAX_VALUE : Integer.MIN_VALUE;
}
return new SortField( new IntValuesCreator( field.getName(),
- FieldCache.NUMERIC_UTILS_INT_PARSER, flags ), top).setMissingValue( missingValue );
+ IntParser.NUMERIC_UTILS_INT_PARSER, flags ), top).setMissingValue( missingValue );
case FLOAT:
if( sortMissingLast ) {
@@ -161,7 +164,7 @@
missingValue = top ? Float.POSITIVE_INFINITY : Float.NEGATIVE_INFINITY;
}
return new SortField( new FloatValuesCreator( field.getName(),
- FieldCache.NUMERIC_UTILS_FLOAT_PARSER, flags ), top).setMissingValue( missingValue );
+ FloatParser.NUMERIC_UTILS_FLOAT_PARSER, flags ), top).setMissingValue( missingValue );
case DATE: // fallthrough
case LONG:
@@ -172,7 +175,7 @@
missingValue = top ? Long.MAX_VALUE : Long.MIN_VALUE;
}
return new SortField( new LongValuesCreator( field.getName(),
- FieldCache.NUMERIC_UTILS_LONG_PARSER, flags ), top).setMissingValue( missingValue );
+ LongParser.NUMERIC_UTILS_LONG_PARSER, flags ), top).setMissingValue( missingValue );
case DOUBLE:
if( sortMissingLast ) {
@@ -182,7 +185,7 @@
missingValue = top ? Double.POSITIVE_INFINITY : Double.NEGATIVE_INFINITY;
}
return new SortField( new DoubleValuesCreator( field.getName(),
- FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, flags ), top).setMissingValue( missingValue );
+ DoubleParser.NUMERIC_UTILS_DOUBLE_PARSER, flags ), top).setMissingValue( missingValue );
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + field.name);
@@ -195,15 +198,15 @@
int flags = CachedArrayCreator.CACHE_VALUES_AND_BITS;
switch (type) {
case INTEGER:
- return new IntFieldSource( new IntValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_INT_PARSER, flags ) );
+ return new IntFieldSource( new IntValuesCreator( field.getName(), IntParser.NUMERIC_UTILS_INT_PARSER, flags ) );
case FLOAT:
- return new FloatFieldSource( new FloatValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_FLOAT_PARSER, flags ));
+ return new FloatFieldSource( new FloatValuesCreator( field.getName(), FloatParser.NUMERIC_UTILS_FLOAT_PARSER, flags ));
case DATE:
- return new TrieDateFieldSource( new LongValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER, flags ));
+ return new TrieDateFieldSource( new LongValuesCreator( field.getName(), LongParser.NUMERIC_UTILS_LONG_PARSER, flags ));
case LONG:
- return new LongFieldSource( new LongValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER, flags ) );
+ return new LongFieldSource( new LongValuesCreator( field.getName(), LongParser.NUMERIC_UTILS_LONG_PARSER, flags ) );
case DOUBLE:
- return new DoubleFieldSource( new DoubleValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, flags ));
+ return new DoubleFieldSource( new DoubleValuesCreator( field.getName(), DoubleParser.NUMERIC_UTILS_DOUBLE_PARSER, flags ));
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + field.name);
}
Index: lucene/src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/TestSort.java (revision )
@@ -31,26 +31,14 @@
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.MultiReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
-import org.apache.lucene.search.cache.ByteValuesCreator;
-import org.apache.lucene.search.cache.CachedArrayCreator;
-import org.apache.lucene.search.cache.DoubleValuesCreator;
-import org.apache.lucene.search.cache.FloatValuesCreator;
-import org.apache.lucene.search.cache.IntValuesCreator;
-import org.apache.lucene.search.cache.LongValuesCreator;
-import org.apache.lucene.search.cache.ShortValuesCreator;
+import org.apache.lucene.search.cache.*;
+import org.apache.lucene.search.cache.parser.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BytesRef;
@@ -449,62 +437,60 @@
public void testCustomFieldParserSort() throws Exception {
// since tests explicilty uses different parsers on the same fieldname
// we explicitly check/purge the FieldCache between each assertMatch
- FieldCache fc = FieldCache.DEFAULT;
-
- sort.setSort (new SortField ("parser", new FieldCache.IntParser(){
+ sort.setSort (new SortField ("parser", new IntParser(){
public final int parseInt(final BytesRef term) {
return (term.bytes[term.offset]-'A') * 123456;
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " IntParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
- sort.setSort (new SortField ("parser", new FieldCache.FloatParser(){
+ sort.setSort (new SortField ("parser", new FloatParser(){
public final float parseFloat(final BytesRef term) {
return (float) Math.sqrt( term.bytes[term.offset] );
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " FloatParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
- sort.setSort (new SortField ("parser", new FieldCache.LongParser(){
+ sort.setSort (new SortField ("parser", new LongParser(){
public final long parseLong(final BytesRef term) {
return (term.bytes[term.offset]-'A') * 1234567890L;
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " LongParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
- sort.setSort (new SortField ("parser", new FieldCache.DoubleParser(){
+ sort.setSort (new SortField ("parser", new DoubleParser(){
public final double parseDouble(final BytesRef term) {
return Math.pow( term.bytes[term.offset], (term.bytes[term.offset]-'A') );
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " DoubleParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
- sort.setSort (new SortField ("parser", new FieldCache.ByteParser(){
+ sort.setSort (new SortField ("parser", new ByteParser(){
public final byte parseByte(final BytesRef term) {
return (byte) (term.bytes[term.offset]-'A');
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " ByteParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
- sort.setSort (new SortField ("parser", new FieldCache.ShortParser(){
+ sort.setSort (new SortField ("parser", new ShortParser(){
public final short parseShort(final BytesRef term) {
return (short) (term.bytes[term.offset]-'A');
}
}), SortField.FIELD_DOC );
assertMatches (full, queryA, sort, "JIHGFEDCBA");
assertSaneFieldCaches(getName() + " ShortParser");
- fc.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
}
// test sorts when there's nothing in the index
@@ -563,7 +549,7 @@
bottomValue = slotValues[bottom];
}
- private static final FieldCache.IntParser testIntParser = new FieldCache.IntParser() {
+ private static final IntParser testIntParser = new IntParser() {
public final int parseInt(final BytesRef term) {
return (term.bytes[term.offset]-'A') * 123456;
}
@@ -571,7 +557,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- docValues = FieldCache.DEFAULT.getInts(context.reader, "parser", testIntParser);
+ docValues = context.reader.getFieldCache().getInts("parser", testIntParser);
return this;
}
@@ -1051,7 +1037,7 @@
// FieldCache behavior, and should have reused hte cache in several cases
assertSaneFieldCaches(getName() + " various");
// next we'll check Locale based (String[]) for 'string', so purge first
- FieldCache.DEFAULT.purgeAllCaches();
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
}
private void assertMatches(IndexSearcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
Index: solr/core/src/java/org/apache/solr/schema/BoolField.java
===================================================================
--- solr/core/src/java/org/apache/solr/schema/BoolField.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/schema/BoolField.java (revision )
@@ -17,15 +17,18 @@
package org.apache.solr.schema;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.queries.function.DocValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.BoolDocValues;
import org.apache.lucene.queries.function.valuesource.OrdFieldSource;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.cache.DocTermsIndex;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueBool;
@@ -35,10 +38,12 @@
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.analysis.SolrAnalyzer;
+import org.apache.solr.response.TextResponseWriter;
+import org.apache.solr.search.QParser;
-import java.util.Map;
-import java.io.Reader;
import java.io.IOException;
+import java.io.Reader;
+import java.util.Map;
/**
*
*/
@@ -171,7 +176,7 @@
@Override
public DocValues getValues(Map context, IndexReader.AtomicReaderContext readerContext) throws IOException {
- final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, field);
+ final DocTermsIndex sindex = readerContext.reader.getFieldCache().getTermsIndex(field);
// figure out what ord maps to true
int nord = sindex.numOrd();
Index: lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (revision 1175430)
+++ lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (revision )
@@ -24,11 +24,8 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.MultiReader;
import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
@@ -114,13 +111,13 @@
if (wrap) {
IndexSearcher wrapped;
check(random, q1, wrapped = wrapUnderlyingReader(random, s, -1), false);
- FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // // our wrapping can create insanity otherwise
+ new SlowMultiReaderWrapper(wrapped.getIndexReader()).getFieldCache().purgeCache(); // our wrapping can create insanity otherwise
wrapped.close();
check(random, q1, wrapped = wrapUnderlyingReader(random, s, 0), false);
- FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // // our wrapping can create insanity otherwise
+ new SlowMultiReaderWrapper(wrapped.getIndexReader()).getFieldCache().purgeCache(); // our wrapping can create insanity otherwise
wrapped.close();
check(random, q1, wrapped = wrapUnderlyingReader(random, s, +1), false);
- FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // // our wrapping can create insanity otherwise
+ new SlowMultiReaderWrapper(wrapped.getIndexReader()).getFieldCache().purgeCache(); // our wrapping can create insanity otherwise
wrapped.close();
}
checkExplanations(q1,s);
Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision )
@@ -36,7 +36,6 @@
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.ScoreDoc;
@@ -708,12 +707,12 @@
assertEquals("wrong number of hits", 34, hits.length);
// check decoding into field cache
- int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt");
+ int[] fci = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getInts("trieInt");
for (int val : fci) {
assertTrue("value in id bounds", val >= 0 && val < 35);
}
- long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong");
+ long[] fcl = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getLongs("trieLong");
for (long val : fcl) {
assertTrue("value in id bounds", val >= 0L && val < 35L);
}
Index: solr/core/src/java/org/apache/solr/core/SolrCore.java
===================================================================
--- solr/core/src/java/org/apache/solr/core/SolrCore.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/core/SolrCore.java (revision )
@@ -542,8 +542,6 @@
infoRegistry = new ConcurrentHashMap<String, SolrInfoMBean>();
}
- infoRegistry.put("fieldCache", new SolrFieldCacheMBean());
-
this.schema = schema;
this.dataDir = dataDir;
this.solrConfig = config;
@@ -624,7 +622,7 @@
}
infoRegistry.put("core", this);
-
+
// register any SolrInfoMBeans SolrResourceLoader initialized
//
// this must happen after the latch is released, because a JMX server impl may
Index: modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
===================================================================
--- modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java (revision 1175430)
+++ modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java (revision )
@@ -177,7 +177,7 @@
@Override
protected CustomScoreProvider getCustomScoreProvider(AtomicReaderContext context) throws IOException {
- final int[] values = FieldCache.DEFAULT.getInts(context.reader, INT_FIELD);
+ final int[] values = context.reader.getFieldCache().getInts(INT_FIELD);
return new CustomScoreProvider(context) {
@Override
public float customScore(int doc, float subScore, float valSrcScore) throws IOException {
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java (revision 1175430)
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java (revision )
@@ -18,7 +18,7 @@
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
@@ -49,7 +49,7 @@
private final SentinelIntSet ordSet;
private final List<BytesRef> groups;
- private FieldCache.DocTermsIndex index;
+ private DocTermsIndex index;
private final BytesRef spareBytesRef = new BytesRef();
/**
@@ -96,7 +96,7 @@
}
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
- index = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
+ index = context.reader.getFieldCache().getTermsIndex(groupField);
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
ordSet.clear();
Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision )
@@ -35,13 +35,10 @@
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.similarities.DefaultSimilarity;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.similarities.SimilarityProvider;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitVector;
@@ -1259,7 +1256,7 @@
IndexReader r = IndexReader.open(dir, false);
assertTrue(r instanceof DirectoryReader);
IndexReader r1 = getOnlySegmentReader(r);
- final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
+ final int[] ints = r1.getFieldCache().getInts("number");
assertEquals(1, ints.length);
assertEquals(17, ints[0]);
@@ -1277,7 +1274,7 @@
r.close();
assertTrue(((DirectoryReader) r2).readOnly);
IndexReader[] subs = r2.getSequentialSubReaders();
- final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
+ final int[] ints2 = subs[0].getFieldCache().getInts("number");
r2.close();
assertTrue(((SegmentReader) subs[0]).readOnly);
Index: lucene/src/java/org/apache/lucene/search/cache/DoubleValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/DoubleValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/DoubleValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.DoubleParser;
-import org.apache.lucene.search.FieldCache.Parser;
+import org.apache.lucene.search.cache.parser.DoubleParser;
import org.apache.lucene.search.cache.CachedArray.DoubleValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -102,13 +101,13 @@
{
if( parser == null ) {
try {
- parser = FieldCache.DEFAULT_DOUBLE_PARSER;
+ parser = DoubleParser.DEFAULT_DOUBLE_PARSER;
fillDoubleValues( vals, reader, field );
return;
}
catch (NumberFormatException ne) {
vals.parserHashCode = null; // wipe the previous one
- parser = FieldCache.NUMERIC_UTILS_DOUBLE_PARSER;
+ parser = DoubleParser.NUMERIC_UTILS_DOUBLE_PARSER;
fillDoubleValues( vals, reader, field );
return;
}
@@ -146,7 +145,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: lucene/src/java/org/apache/lucene/search/cache/LongValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/LongValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/LongValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.LongParser;
-import org.apache.lucene.search.FieldCache.Parser;
+import org.apache.lucene.search.cache.parser.LongParser;
import org.apache.lucene.search.cache.CachedArray.LongValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -103,13 +102,13 @@
{
if( parser == null ) {
try {
- parser = FieldCache.DEFAULT_LONG_PARSER;
+ parser = LongParser.DEFAULT_LONG_PARSER;
fillLongValues( vals, reader, field );
return;
}
catch (NumberFormatException ne) {
vals.parserHashCode = null; // wipe the previous one
- parser = FieldCache.NUMERIC_UTILS_LONG_PARSER;
+ parser = LongParser.NUMERIC_UTILS_LONG_PARSER;
fillLongValues( vals, reader, field );
return;
}
@@ -147,7 +146,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (revision )
@@ -49,7 +49,6 @@
import org.apache.lucene.index.codecs.TermsIndexWriterBase;
import org.apache.lucene.index.codecs.standard.StandardPostingsReader;
import org.apache.lucene.index.codecs.standard.StandardPostingsWriter;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.BytesRef;
@@ -316,7 +315,7 @@
}
verify(r, idToOrds, termsArray, null);
- FieldCache.DEFAULT.purge(r);
+ new SlowMultiReaderWrapper(r).getFieldCache().purgeCache();
r.close();
dir.close();
@@ -441,7 +440,7 @@
verify(r, idToOrdsPrefix, termsArray, prefixRef);
}
- FieldCache.DEFAULT.purge(r);
+ new SlowMultiReaderWrapper(r).getFieldCache().purgeCache();
r.close();
dir.close();
@@ -456,7 +455,8 @@
_TestUtil.nextInt(random, 2, 10));
- final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id");
+ // We don't know if the concrete Reader impl, so use SlowMultiReaderWrapper...
+ final int[] docIDToID = new SlowMultiReaderWrapper(r).getFieldCache().getInts("id");
/*
for(int docID=0;docID<subR.maxDoc();docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
Index: lucene/src/java/org/apache/lucene/search/cache/DocTerms.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/DocTerms.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/DocTerms.java (revision )
@@ -0,0 +1,40 @@
+package org.apache.lucene.search.cache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ *
+ */
+public abstract class DocTerms {
+
+ /** The BytesRef argument must not be null; the method
+ * returns the same BytesRef, or an empty (length=0)
+ * BytesRef if the doc did not have this field or was
+ * deleted. */
+ public abstract BytesRef getTerm(int docID, BytesRef ret);
+
+ /** Returns true if this doc has this field and is not
+ * deleted. */
+ public abstract boolean exists(int docID);
+
+ /** Number of documents */
+ public abstract int size();
+
+}
Index: solr/core/src/java/org/apache/solr/request/SimpleFacets.java
===================================================================
--- solr/core/src/java/org/apache/solr/request/SimpleFacets.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/request/SimpleFacets.java (revision )
@@ -18,13 +18,13 @@
package org.apache.solr.request;
import org.apache.lucene.index.*;
-import org.apache.lucene.queries.function.FunctionQuery;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.QueryValueSource;
import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
-import org.apache.lucene.search.grouping.TermAllGroupHeadsCollector;
import org.apache.lucene.util.*;
import org.apache.lucene.util.packed.Direct16;
import org.apache.lucene.util.packed.Direct32;
@@ -32,21 +32,21 @@
import org.apache.lucene.util.packed.PackedInts;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.FacetParams;
+import org.apache.solr.common.params.FacetParams.FacetRangeInclude;
+import org.apache.solr.common.params.FacetParams.FacetRangeOther;
import org.apache.solr.common.params.RequiredSolrParams;
import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.FacetParams.FacetRangeOther;
-import org.apache.solr.common.params.FacetParams.FacetRangeInclude;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrCore;
+import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.schema.*;
import org.apache.solr.search.*;
import org.apache.solr.util.BoundedTreeSet;
import org.apache.solr.util.DateMathParser;
-import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.util.LongPriorityQueue;
import java.io.IOException;
@@ -415,7 +415,7 @@
FieldType ft = searcher.getSchema().getFieldType(fieldName);
NamedList<Integer> res = new NamedList<Integer>();
- FieldCache.DocTermsIndex si = FieldCache.DEFAULT.getTermsIndex(searcher.getIndexReader(), fieldName);
+ DocTermsIndex si = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getTermsIndex(fieldName);
final BytesRef prefixRef;
if (prefix == null) {
Index: lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java
===================================================================
--- lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java (revision 1175430)
+++ lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java (revision )
@@ -20,11 +20,10 @@
import java.io.IOException;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.FilteredDocIdSet;
+import org.apache.lucene.search.cache.DocTerms;
import org.apache.lucene.spatial.DistanceUtils;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.spatial.tier.DistanceFilter;
@@ -59,7 +58,7 @@
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
- final DocTerms geoHashValues = FieldCache.DEFAULT.getTerms(context.reader, geoHashField);
+ final DocTerms geoHashValues = context.reader.getFieldCache().getTerms(geoHashField);
final BytesRef br = new BytesRef();
final int docBase = nextDocBase;
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision )
@@ -42,13 +42,13 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
@@ -1826,12 +1826,14 @@
w.close();
assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
- FieldCache.DocTermsIndex dti = FieldCache.DEFAULT.getTermsIndex(reader, "content", random.nextBoolean());
+ SlowMultiReaderWrapper slowMultiReaderWrapper = new SlowMultiReaderWrapper(reader);
+ DocTermsIndex dti = slowMultiReaderWrapper.getFieldCache().getTermsIndex("content", random.nextBoolean());
assertEquals(5, dti.numOrd()); // +1 for null ord
assertEquals(4, dti.size());
assertEquals(bigTermBytesRef, dti.lookup(3, new BytesRef()));
reader.close();
dir.close();
+ slowMultiReaderWrapper.getFieldCache().purgeCache();
}
// LUCENE-3183
Index: lucene/src/java/org/apache/lucene/search/FieldComparator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldComparator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldComparator.java (revision )
@@ -20,8 +20,6 @@
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.values.IndexDocValues;
import org.apache.lucene.index.values.IndexDocValues.Source;
-import org.apache.lucene.search.FieldCache.DocTerms;
-import org.apache.lucene.search.FieldCache.DocTermsIndex;
import org.apache.lucene.search.cache.*;
import org.apache.lucene.search.cache.CachedArray.*;
import org.apache.lucene.util.Bits;
@@ -70,11 +68,11 @@
* priority queue. The {@link FieldValueHitQueue}
* calls this method when a new hit is competitive.
*
- * <li> {@link #setNextReader(IndexReader.AtomicReaderContext)} Invoked
+ * <li> {@link #setNextReader(AtomicReaderContext)} Invoked
* when the search is switching to the next segment.
* You may need to update internal state of the
* comparator, for example retrieving new values from
- * the {@link FieldCache}.
+ * the {@link AtomicFieldCache}.
*
* <li> {@link #value} Return the sort value stored in
* the specified slot. This is only called at the end
@@ -208,7 +206,7 @@
}
/** Parses field's values as byte (using {@link
- * FieldCache#getBytes} and sorts by ascending value */
+ * AtomicFieldCache#getBytes} and sorts by ascending value */
public static final class ByteComparator extends NumericComparator<ByteValues,Byte> {
private byte[] docValues;
private final byte[] values;
@@ -247,7 +245,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getBytes(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getBytes(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -265,7 +263,7 @@
/** Parses field's values as double (using {@link
- * FieldCache#getDoubles} and sorts by ascending value */
+ * AtomicFieldCache#getDoubles} and sorts by ascending value */
public static final class DoubleComparator extends NumericComparator<DoubleValues,Double> {
private double[] docValues;
private final double[] values;
@@ -318,7 +316,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getDoubles(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getDoubles(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -397,7 +395,7 @@
}
/** Parses field's values as float (using {@link
- * FieldCache#getFloats} and sorts by ascending value */
+ * AtomicFieldCache#getFloats} and sorts by ascending value */
public static final class FloatComparator extends NumericComparator<FloatValues,Float> {
private float[] docValues;
private final float[] values;
@@ -454,7 +452,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getFloats(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getFloats(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -471,7 +469,7 @@
}
/** Parses field's values as short (using {@link
- * FieldCache#getShorts} and sorts by ascending value */
+ * AtomicFieldCache#getShorts} and sorts by ascending value */
public static final class ShortComparator extends NumericComparator<ShortValues,Short> {
private short[] docValues;
private final short[] values;
@@ -510,7 +508,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup( FieldCache.DEFAULT.getShorts(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getShorts(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -527,7 +525,7 @@
}
/** Parses field's values as int (using {@link
- * FieldCache#getInts} and sorts by ascending value */
+ * AtomicFieldCache#getInts} and sorts by ascending value */
public static final class IntComparator extends NumericComparator<IntValues,Integer> {
private int[] docValues;
private final int[] values;
@@ -588,7 +586,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getInts(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getInts(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -671,7 +669,7 @@
}
/** Parses field's values as long (using {@link
- * FieldCache#getLongs} and sorts by ascending value */
+ * AtomicFieldCache#getLongs} and sorts by ascending value */
public static final class LongComparator extends NumericComparator<LongValues,Long> {
private long[] docValues;
private final long[] values;
@@ -729,7 +727,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getLongs(context.reader, creator.field, creator));
+ setup(context.reader.getFieldCache().getLongs(creator.field, creator));
docValues = cached.values;
return this;
}
@@ -865,7 +863,7 @@
* ordinals. This is functionally equivalent to {@link
* TermValComparator}, but it first resolves the string
* to their relative ordinal positions (using the index
- * returned by {@link FieldCache#getTermsIndex}), and
+ * returned by {@link AtomicFieldCache#getTermsIndex}), and
* does most comparisons using the ordinals. For medium
* to large results, this comparator will be much faster
* than {@link TermValComparator}. For very small
@@ -1220,7 +1218,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
final int docBase = context.docBase;
- termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
+ termsIndex = context.reader.getFieldCache().getTermsIndex(field);
final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
FieldComparator perSegComp;
if (docToOrd instanceof Direct8) {
@@ -1335,7 +1333,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- docTerms = FieldCache.DEFAULT.getTerms(context.reader, field);
+ docTerms = context.reader.getFieldCache().getTerms(field);
return this;
}
Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
===================================================================
--- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 1175430)
+++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision )
@@ -38,21 +38,9 @@
import org.apache.lucene.benchmark.byTask.tasks.CountingSearchTestTask;
import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
import org.apache.lucene.collation.CollationKeyAnalyzer;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldsEnum;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LogDocMergePolicy;
-import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.SegmentInfos;
-import org.apache.lucene.index.SerialMergeScheduler;
-import org.apache.lucene.index.TermFreqVector;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.FieldCache.DocTermsIndex;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -329,7 +317,7 @@
Benchmark benchmark = execBenchmark(algLines);
IndexReader r = IndexReader.open(benchmark.getRunData().getDirectory(), true);
- DocTermsIndex idx = FieldCache.DEFAULT.getTermsIndex(r, "country");
+ DocTermsIndex idx = new SlowMultiReaderWrapper(r).getFieldCache().getTermsIndex("country");
final int maxDoc = r.maxDoc();
assertEquals(1000, maxDoc);
BytesRef br = new BytesRef();
Index: lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java
===================================================================
--- lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java (revision 1175430)
+++ lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java (revision )
@@ -21,9 +21,8 @@
import java.text.Collator;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.cache.DocTerms;
import org.apache.lucene.util.BytesRef;
/** Sorts by a field's value using the given Collator
@@ -91,7 +90,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- currentDocTerms = FieldCache.DEFAULT.getTerms(context.reader, field);
+ currentDocTerms = context.reader.getFieldCache().getTerms(field);
return this;
}
Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision )
@@ -41,7 +41,6 @@
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
@@ -1133,7 +1132,7 @@
// Open reader
IndexReader r = getOnlySegmentReader(IndexReader.open(dir, false));
- final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
+ final int[] ints = r.getFieldCache().getInts("number");
assertEquals(1, ints.length);
assertEquals(17, ints[0]);
@@ -1141,7 +1140,7 @@
IndexReader r2 = (IndexReader) r.clone();
r.close();
assertTrue(r2 != r);
- final int[] ints2 = FieldCache.DEFAULT.getInts(r2, "number");
+ final int[] ints2 = r2.getFieldCache().getInts("number");
r2.close();
assertEquals(1, ints2.length);
@@ -1169,7 +1168,7 @@
// Open reader1
IndexReader r = IndexReader.open(dir, false);
IndexReader r1 = getOnlySegmentReader(r);
- final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
+ final int[] ints = r1.getFieldCache().getInts("number");
assertEquals(1, ints.length);
assertEquals(17, ints[0]);
@@ -1181,7 +1180,7 @@
IndexReader r2 = r.reopen();
r.close();
IndexReader sub0 = r2.getSequentialSubReaders()[0];
- final int[] ints2 = FieldCache.DEFAULT.getInts(sub0, "number");
+ final int[] ints2 = sub0.getFieldCache().getInts("number");
r2.close();
assertTrue(ints == ints2);
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java (revision )
@@ -17,25 +17,25 @@
package org.apache.lucene.queries.function.valuesource;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.AtomicFieldCache;
/**
* A base class for ValueSource implementations that retrieve values for
- * a single field from the {@link org.apache.lucene.search.FieldCache}.
+ * a single field from the {@link org.apache.lucene.search.cache.AtomicFieldCache}.
*
*
*/
public abstract class FieldCacheSource extends ValueSource {
protected String field;
- protected FieldCache cache = FieldCache.DEFAULT;
public FieldCacheSource(String field) {
this.field=field;
}
- public FieldCache getFieldCache() {
- return cache;
+ public AtomicFieldCache getFieldCache(IndexReader indexReader) {
+ return indexReader.getFieldCache();
}
public String getField() {
@@ -51,13 +51,12 @@
public boolean equals(Object o) {
if (!(o instanceof FieldCacheSource)) return false;
FieldCacheSource other = (FieldCacheSource)o;
- return this.field.equals(other.field)
- && this.cache == other.cache;
+ return this.field.equals(other.field);
}
@Override
public int hashCode() {
- return cache.hashCode() + field.hashCode();
- };
+ return field.hashCode();
+ }
}
Index: modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java (revision )
@@ -17,12 +17,12 @@
package org.apache.lucene.queries.function.docvalues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.queries.function.DocValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.ValueSourceScorer;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.mutable.MutableValue;
@@ -34,7 +34,7 @@
* Serves as base class for DocValues based on StringIndex
**/
public abstract class StringIndexDocValues extends DocValues {
- protected final FieldCache.DocTermsIndex termsIndex;
+ protected final DocTermsIndex termsIndex;
protected final ValueSource vs;
protected final MutableValueStr val = new MutableValueStr();
protected final BytesRef spare = new BytesRef();
@@ -42,14 +42,14 @@
public StringIndexDocValues(ValueSource vs, AtomicReaderContext context, String field) throws IOException {
try {
- termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
+ termsIndex = context.reader.getFieldCache().getTermsIndex(field);
} catch (RuntimeException e) {
throw new StringIndexException(field, e);
}
this.vs = vs;
}
- public FieldCache.DocTermsIndex getDocTermsIndex() {
+ public DocTermsIndex getDocTermsIndex() {
return termsIndex;
}
Index: lucene/src/java/org/apache/lucene/search/cache/FloatValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/FloatValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/FloatValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.FloatParser;
-import org.apache.lucene.search.FieldCache.Parser;
+import org.apache.lucene.search.cache.parser.FloatParser;
import org.apache.lucene.search.cache.CachedArray.FloatValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -103,13 +102,13 @@
{
if( parser == null ) {
try {
- parser = FieldCache.DEFAULT_FLOAT_PARSER;
+ parser = FloatParser.DEFAULT_FLOAT_PARSER;
fillFloatValues( vals, reader, field );
return;
}
catch (NumberFormatException ne) {
vals.parserHashCode = null; // wipe the previous one
- parser = FieldCache.NUMERIC_UTILS_FLOAT_PARSER;
+ parser = FloatParser.NUMERIC_UTILS_FLOAT_PARSER;
fillFloatValues( vals, reader, field );
return;
}
@@ -147,7 +146,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: lucene/src/java/org/apache/lucene/search/cache/parser/ByteParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/ByteParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/ByteParser.java (revision )
@@ -0,0 +1,53 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+
+/** Interface to parse bytes from document fields.
+ * @see AtomicFieldCache#getBytes(String, ByteParser)
+ */
+public interface ByteParser extends Parser {
+
+ ByteParser DEFAULT_BYTE_PARSER = new DefaultByteParser();
+
+ /** Return a single Byte representation of this field's value. */
+ public byte parseByte(BytesRef term);
+
+ /** The default parser for byte values, which are encoded by {@link Byte#toString(byte)} */
+ public static class DefaultByteParser implements ByteParser {
+
+ public byte parseByte(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Byte.parseByte(term.utf8ToString());
+ }
+ protected Object readResolve() {
+ return DEFAULT_BYTE_PARSER;
+ }
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".DEFAULT_BYTE_PARSER";
+ }
+
+ }
+
+}
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java (revision )
@@ -32,7 +32,7 @@
import java.util.Map;
/**
- * Obtains int field values from the {@link org.apache.lucene.search.FieldCache}
+ * Obtains int field values from the {@link org.apache.lucene.search.cache.AtomicFieldCache}
* using <code>getInts()</code>
* and makes those values available as other numeric types, casting as needed. *
*
@@ -52,7 +52,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final IntValues vals = cache.getInts(readerContext.reader, field, creator);
+ final IntValues vals = readerContext.reader.getFieldCache().getInts(field, creator);
final int[] arr = vals.values;
final Bits valid = vals.valid;
Index: lucene/src/test/org/apache/lucene/index/TestSegmentFieldCacheImpl.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSegmentFieldCacheImpl.java (revision )
+++ lucene/src/test/org/apache/lucene/index/TestSegmentFieldCacheImpl.java (revision )
@@ -0,0 +1,334 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.search.cache.DocTerms;
+import org.apache.lucene.search.cache.DocTermsIndex;
+import org.apache.lucene.search.cache.parser.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util._TestUtil;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.*;
+
+public class TestSegmentFieldCacheImpl extends LuceneTestCase {
+
+ protected IndexReader reader;
+ private String[] unicodeStrings;
+ private BytesRef[][] multiValued;
+ private Directory directory;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ int NUM_DOCS = atLeast(1000);
+ int NUM_ORDS = atLeast(2);
+ directory = newDirectory();
+ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+ long theLong = Long.MAX_VALUE;
+ double theDouble = Double.MAX_VALUE;
+ byte theByte = Byte.MAX_VALUE;
+ short theShort = Short.MAX_VALUE;
+ int theInt = Integer.MAX_VALUE;
+ float theFloat = Float.MAX_VALUE;
+ unicodeStrings = new String[NUM_DOCS];
+ multiValued = new BytesRef[NUM_DOCS][NUM_ORDS];
+ if (VERBOSE) {
+ System.out.println("TEST: setUp");
+ }
+ writer.w.setInfoStream(VERBOSE ? System.out : null);
+ FieldType fieldType = new FieldType();
+ fieldType.setIndexed(true);
+ for (int i = 0; i < NUM_DOCS; i++){
+ Document doc = new Document();
+ doc.add(newField("theLong", String.valueOf(theLong--), fieldType));
+ doc.add(newField("theDouble", String.valueOf(theDouble--), fieldType));
+ doc.add(newField("theByte", String.valueOf(theByte--), fieldType));
+ doc.add(newField("theShort", String.valueOf(theShort--), fieldType));
+ doc.add(newField("theInt", String.valueOf(theInt--), fieldType));
+ doc.add(newField("theFloat", String.valueOf(theFloat--), fieldType));
+
+ // sometimes skip the field:
+ if (random.nextInt(40) != 17) {
+ unicodeStrings[i] = generateString(i);
+ doc.add(newField("theRandomUnicodeString", unicodeStrings[i], fieldType));
+ }
+
+ // sometimes skip the field:
+ if (random.nextInt(10) != 8) {
+ for (int j = 0; j < NUM_ORDS; j++) {
+ String newValue = generateString(i);
+ multiValued[i][j] = new BytesRef(newValue);
+ doc.add(newField("theRandomUnicodeMultiValuedField", newValue, fieldType));
+ }
+ Arrays.sort(multiValued[i]);
+ }
+ writer.addDocument(doc);
+ }
+ reader = writer.getReader();
+ writer.close();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
+ public void testInfoStream() throws Exception {
+ List<IndexReader> subReaders = new LinkedList<IndexReader>();
+ ReaderUtil.gatherSubReaders(subReaders, reader);
+ AtomicFieldCache cache = subReaders.get(0).getFieldCache();
+ try {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+ cache.setInfoStream(new PrintStream(bos));
+ cache.getDoubles("theDouble");
+ cache.getFloats("theDouble");
+ assertTrue(bos.toString().indexOf("WARNING") != -1);
+ } finally {
+ cache.purgeCache();
+ }
+ }
+
+ public void test() throws IOException {
+ IndexReader.ReaderContext readerContext = ReaderUtil.buildReaderContext(reader);
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ double [] doubles = atomicCache.getDoubles("theDouble");
+ assertSame("Second request to cache return same array", doubles, atomicCache.getDoubles("theDouble"));
+ assertSame("Second request with explicit parser return same array", doubles, atomicCache.getDoubles("theDouble", DoubleParser.DEFAULT_DOUBLE_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("doubles Size: " + doubles.length + " is not: " + expectedLength, doubles.length == expectedLength);
+ for (int i = 0; i < doubles.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(doubles[i] + " does not equal: " + (Double.MAX_VALUE - j), doubles[i] == (Double.MAX_VALUE - j));
+ }
+ }
+
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ long [] longs = atomicCache.getLongs("theLong");
+ assertSame("Second request to cache return same array", longs, atomicCache.getLongs("theLong"));
+ assertSame("Second request with explicit parser return same array", longs, atomicCache.getLongs("theLong", LongParser.DEFAULT_LONG_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("longs Size: " + longs.length + " is not: " + expectedLength, longs.length == expectedLength);
+ for (int i = 0; i < longs.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(longs[i] + " does not equal: " + (Long.MAX_VALUE - j) + " i=" + i, longs[i] == (Long.MAX_VALUE - j));
+ }
+ }
+
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ byte [] bytes = atomicCache.getBytes("theByte");
+ assertSame("Second request to cache return same array", bytes, atomicCache.getBytes("theByte"));
+ assertSame("Second request with explicit parser return same array", bytes, atomicCache.getBytes("theByte", ByteParser.DEFAULT_BYTE_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("bytes Size: " + bytes.length + " is not: " + expectedLength, bytes.length == expectedLength);
+ for (int i = 0; i < bytes.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(bytes[i] + " does not equal: " + (Byte.MAX_VALUE - j), bytes[i] == (byte) (Byte.MAX_VALUE - j));
+ }
+ }
+
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ short [] shorts = atomicCache.getShorts("theShort");
+ assertSame("Second request to cache return same array", shorts, atomicCache.getShorts("theShort"));
+ assertSame("Second request with explicit parser return same array", shorts, atomicCache.getShorts("theShort", ShortParser.DEFAULT_SHORT_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("shorts Size: " + shorts.length + " is not: " + expectedLength, shorts.length == expectedLength);
+ for (int i = 0; i < shorts.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(shorts[i] + " does not equal: " + (Short.MAX_VALUE - j), shorts[i] == (short) (Short.MAX_VALUE - j));
+ }
+ }
+
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ int [] ints = atomicCache.getInts("theInt");
+ assertSame("Second request to cache return same array", ints, atomicCache.getInts("theInt"));
+ assertSame("Second request with explicit parser return same array", ints, atomicCache.getInts("theInt", IntParser.DEFAULT_INT_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("ints Size: " + ints.length + " is not: " + expectedLength, ints.length == expectedLength);
+ for (int i = 0; i < ints.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(ints[i] + " does not equal: " + (Integer.MAX_VALUE - j), ints[i] == (Integer.MAX_VALUE - j));
+ }
+ }
+
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ float [] floats = atomicCache.getFloats("theFloat");
+ assertSame("Second request to cache return same array", floats, atomicCache.getFloats("theFloat"));
+ assertSame("Second request with explicit parser return same array", floats, atomicCache.getFloats("theFloat", FloatParser.DEFAULT_FLOAT_PARSER));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("floats Size: " + floats.length + " is not: " + expectedLength, floats.length == expectedLength);
+ for (int i = 0; i < floats.length; i++) {
+ int j = atomicReaderContext.docBase + i;
+ assertTrue(floats[i] + " does not equal: " + (Float.MAX_VALUE - j), floats[i] == (Float.MAX_VALUE - j));
+
+ }
+ }
+
+ final BytesRef br = new BytesRef();
+ // getTermsIndex
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ DocTermsIndex termsIndex = atomicCache.getTermsIndex("theRandomUnicodeString");
+ assertSame("Second request to cache return same array", termsIndex, atomicCache.getTermsIndex("theRandomUnicodeString"));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("doubles Size: " + termsIndex.size() + " is not: " + expectedLength, termsIndex.size() == expectedLength);
+ for (int i = 0; i < expectedLength; i++) {
+ int j = atomicReaderContext.docBase + i;
+ final BytesRef term = termsIndex.getTerm(i, br);
+ final String s = term == null ? null : term.utf8ToString();
+ assertTrue("for doc " + i + ": " + s + " does not equal: " + unicodeStrings[j], unicodeStrings[j] == null || unicodeStrings[j].equals(s));
+ }
+
+ int nTerms = termsIndex.numOrd();
+ // System.out.println("nTerms="+nTerms);
+
+ TermsEnum tenum = termsIndex.getTermsEnum();
+ BytesRef val = new BytesRef();
+ for (int i=1; i<nTerms; i++) {
+ BytesRef val1 = tenum.next();
+ BytesRef val2 = termsIndex.lookup(i,val);
+ // System.out.println("i="+i);
+ assertEquals(val2, val1);
+ }
+
+ // seek the enum around (note this isn't a great test here)
+ int num = atLeast(100);
+ for (int i = 0; i < num; i++) {
+ int k = _TestUtil.nextInt(random, 1, nTerms - 1);
+ BytesRef val1 = termsIndex.lookup(k, val);
+ assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seekCeil(val1));
+ assertEquals(val1, tenum.term());
+ }
+
+ // test bad field
+ termsIndex = atomicCache.getTermsIndex("bogusfield");
+ }
+
+ // getTerms
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ DocTerms terms = atomicCache.getTerms("theRandomUnicodeString");
+ assertSame("Second request to cache return same array", terms, atomicCache.getTerms("theRandomUnicodeString"));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ assertTrue("doubles Size: " + terms.size() + " is not: " + expectedLength, terms.size() == expectedLength);
+ for (int i = 0; i < expectedLength; i++) {
+ int j = atomicReaderContext.docBase + i;
+ final BytesRef term = terms.getTerm(i, br);
+ final String s = term == null ? null : term.utf8ToString();
+ assertTrue("for doc " + i + ": " + s + " does not equal: " + unicodeStrings[j], unicodeStrings[j] == null || unicodeStrings[j].equals(s));
+ }
+
+ // test bad field
+ terms = atomicCache.getTerms("bogusfield");
+ }
+
+ // getDocTermOrds
+ for (IndexReader.AtomicReaderContext atomicReaderContext : readerContext.leaves()) {
+ AtomicFieldCache atomicCache = atomicReaderContext.reader.getFieldCache();
+ DocTermOrds termOrds = atomicCache.getDocTermOrds("theRandomUnicodeMultiValuedField");
+ TermsEnum termsEnum = termOrds.getOrdTermsEnum(atomicReaderContext.reader);
+ assertSame("Second request to cache return same DocTermOrds", termOrds, atomicCache.getDocTermOrds("theRandomUnicodeMultiValuedField"));
+ int expectedLength = atomicReaderContext.reader.maxDoc();
+ DocTermOrds.TermOrdsIterator reuse = null;
+ for (int i = 0; i < expectedLength; i++) {
+ int j = atomicReaderContext.docBase + i;
+ reuse = termOrds.lookup(i, reuse);
+ final int[] buffer = new int[5];
+ // This will remove identical terms. A DocTermOrds doesn't return duplicate ords for a docId
+ List<BytesRef> values = new ArrayList<BytesRef>(new LinkedHashSet<BytesRef>(Arrays.asList(multiValued[j])));
+ for (;;) {
+ int chunk = reuse.read(buffer);
+ if (chunk == 0) {
+ for (int ord = 0; ord < values.size(); ord++) {
+ BytesRef term = values.get(ord);
+ assertNull(String.format("Document[%d] misses field must be null. Has value %s for ord %d", i, term, ord), term);
+ }
+ break;
+ }
+
+ for(int idx=0; idx < chunk; idx++) {
+ int key = buffer[idx];
+ termsEnum.seekExact((long) key);
+ String actual = termsEnum.term().utf8ToString();
+ String expected = values.get(idx).utf8ToString();
+ if (!expected.equals(actual)) {
+ reuse = termOrds.lookup(i, reuse);
+ reuse.read(buffer);
+ }
+ assertTrue(String.format("Expected value %s for doc %d and ord %d, but was %s", expected, i, idx, actual), expected.equals(actual));
+ }
+
+ if (chunk <= buffer.length) {
+ break;
+ }
+ }
+ }
+
+ // test bad field
+ termOrds = atomicCache.getDocTermOrds("bogusfield");
+ }
+ }
+
+ public void testEmptyIndex() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(500));
+ IndexReader r = IndexReader.open(writer, true);
+ List<IndexReader> subReaders = new LinkedList<IndexReader>();
+ ReaderUtil.gatherSubReaders(subReaders, reader);
+ AtomicFieldCache cache = subReaders.get(0).getFieldCache();
+ DocTerms terms = cache.getTerms("foobar");
+ DocTermsIndex termsIndex = cache.getTermsIndex("foobar");
+ writer.close();
+ r.close();
+ dir.close();
+ }
+
+ private String generateString(int i) {
+ String s = null;
+ if (i > 0 && random.nextInt(3) == 1) {
+ // reuse past string -- try to find one that's not null
+ for(int iter = 0; iter < 10 && s == null;iter++) {
+ s = unicodeStrings[random.nextInt(i)];
+ }
+ if (s == null) {
+ s = _TestUtil.randomUnicodeString(random);
+ }
+ } else {
+ s = _TestUtil.randomUnicodeString(random);
+ }
+ return s;
+ }
+
+}
\ No newline at end of file
Index: lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
===================================================================
--- lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision )
@@ -19,10 +19,13 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.search.cache.parser.DoubleParser;
+import org.apache.lucene.search.cache.parser.IntParser;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
import org.apache.lucene.util.FieldCacheSanityChecker.InsanityType;
@@ -84,35 +87,38 @@
}
public void testSanity() throws IOException {
- FieldCache cache = FieldCache.DEFAULT;
- cache.purgeAllCaches();
+ AtomicFieldCache cacheA = new SlowMultiReaderWrapper(readerA).getFieldCache();
+ AtomicFieldCache cacheB = new SlowMultiReaderWrapper(readerB).getFieldCache();
+ AtomicFieldCache cacheX = new SlowMultiReaderWrapper(readerX).getFieldCache();
- cache.getDoubles(readerA, "theDouble");
- cache.getDoubles(readerA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
- cache.getDoubles(readerB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
+ purgeFieldCache(cacheA);
- cache.getInts(readerX, "theInt");
- cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
+ cacheA.getDoubles("theDouble");
+ cacheA.getDoubles("theDouble", DoubleParser.DEFAULT_DOUBLE_PARSER);
+ cacheB.getDoubles("theDouble", DoubleParser.DEFAULT_DOUBLE_PARSER);
+ cacheX.getInts("theInt");
+ cacheX.getInts("theInt", IntParser.DEFAULT_INT_PARSER);
+
// // //
Insanity[] insanity =
- FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+ FieldCacheSanityChecker.checkSanity(cacheA.getCacheEntries());
if (0 < insanity.length)
dumpArray(getTestLabel() + " INSANITY", insanity, System.err);
assertEquals("shouldn't be any cache insanity", 0, insanity.length);
- cache.purgeAllCaches();
+ purgeFieldCache(cacheA);
}
public void testInsanity1() throws IOException {
- FieldCache cache = FieldCache.DEFAULT;
- cache.purgeAllCaches();
+ AtomicFieldCache cache = new SlowMultiReaderWrapper(readerX).getFieldCache();
+ purgeFieldCache(cache);
- cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
- cache.getTerms(readerX, "theInt");
- cache.getBytes(readerX, "theByte");
+ cache.getInts("theInt", IntParser.DEFAULT_INT_PARSER);
+ cache.getTerms("theInt");
+ cache.getBytes("theByte");
// // //
@@ -127,24 +133,27 @@
insanity[0].getCacheEntries().length);
// we expect bad things, don't let tearDown complain about them
- cache.purgeAllCaches();
+ purgeFieldCache(cache);
}
public void testInsanity2() throws IOException {
- FieldCache cache = FieldCache.DEFAULT;
- cache.purgeAllCaches();
+ AtomicFieldCache cacheA = new SlowMultiReaderWrapper(readerA).getFieldCache();
+ AtomicFieldCache cacheB = new SlowMultiReaderWrapper(readerB).getFieldCache();
+ AtomicFieldCache cacheX = new SlowMultiReaderWrapper(readerX).getFieldCache();
- cache.getTerms(readerA, "theString");
- cache.getTerms(readerB, "theString");
- cache.getTerms(readerX, "theString");
+ purgeFieldCache(cacheA);
- cache.getBytes(readerX, "theByte");
+ cacheA.getTerms("theString");
+ cacheB.getTerms("theString");
+ cacheX.getTerms("theString");
+ cacheX.getBytes("theByte");
+
// // //
Insanity[] insanity =
- FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+ FieldCacheSanityChecker.checkSanity(cacheA.getCacheEntries());
assertEquals("wrong number of cache errors", 1, insanity.length);
assertEquals("wrong type of cache error",
@@ -154,7 +163,7 @@
insanity[0].getCacheEntries().length);
// we expect bad things, don't let tearDown complain about them
- cache.purgeAllCaches();
+ purgeFieldCache(cacheA);
}
public void testInsanity3() throws IOException {
Index: lucene/src/java/org/apache/lucene/search/cache/CachedArrayCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/CachedArrayCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/CachedArrayCreator.java (revision )
@@ -25,8 +25,8 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache.Parser;
import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java (revision )
@@ -25,7 +25,7 @@
import java.util.Map;
/**
- * Obtains int field values from the {@link org.apache.lucene.search.FieldCache}
+ * Obtains int field values from the {@link org.apache.lucene.search.cache.AtomicFieldCache}
* using <code>getInts()</code>
* and makes those values available as other numeric types, casting as needed. *
*
@@ -45,7 +45,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final ByteValues vals = cache.getBytes(readerContext.reader, field, creator);
+ final ByteValues vals = readerContext.reader.getFieldCache().getBytes(field, creator);
final byte[] arr = vals.values;
return new DocValues() {
Index: lucene/src/test/org/apache/lucene/search/TestElevationComparator.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (revision )
@@ -23,6 +23,7 @@
import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
import org.apache.lucene.store.*;
import org.apache.lucene.util.LuceneTestCase;
@@ -144,7 +145,7 @@
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
return new FieldComparator<Integer>() {
- FieldCache.DocTermsIndex idIndex;
+ DocTermsIndex idIndex;
private final int[] values = new int[numHits];
private final BytesRef tempBR = new BytesRef();
int bottomVal;
@@ -182,7 +183,7 @@
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, fieldname);
+ idIndex = context.reader.getFieldCache().getTermsIndex(fieldname);
return this;
}
Index: solr/core/src/java/org/apache/solr/request/UnInvertedField.java
===================================================================
--- solr/core/src/java/org/apache/solr/request/UnInvertedField.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/request/UnInvertedField.java (revision )
@@ -17,12 +17,13 @@
package org.apache.solr.request;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.DocTermOrds;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.SolrException;
@@ -477,11 +478,11 @@
int i = 0;
final FieldFacetStats[] finfo = new FieldFacetStats[facet.length];
//Initialize facetstats, if facets have been passed in
- FieldCache.DocTermsIndex si;
+ DocTermsIndex si;
for (String f : facet) {
FieldType facet_ft = searcher.getSchema().getFieldType(f);
try {
- si = FieldCache.DEFAULT.getTermsIndex(searcher.getIndexReader(), f);
+ si = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getTermsIndex(f);
}
catch (IOException e) {
throw new RuntimeException("failed to open field cache for: " + f, e);
@@ -503,7 +504,7 @@
}
if (doNegative) {
- OpenBitSet bs = (OpenBitSet) ((BitDocSet) docs).getBits().clone();
+ OpenBitSet bs = (OpenBitSet) docs.getBits().clone();
bs.flip(0, maxDoc);
// TODO: when iterator across negative elements is available, use that
// instead of creating a new bitset and inverting.
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java (revision )
@@ -43,7 +43,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final ShortValues vals = cache.getShorts(readerContext.reader, field, creator);
+ final ShortValues vals = readerContext.reader.getFieldCache().getShorts(field, creator);
final short[] arr = vals.values;
return new DocValues() {
Index: solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
===================================================================
--- solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java (revision )
@@ -19,14 +19,16 @@
import java.net.URL;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
+import org.apache.lucene.search.cache.CacheEntry;
+import org.apache.lucene.util.ReaderUtil;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrInfoMBean;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.CacheEntry;
import org.apache.lucene.util.FieldCacheSanityChecker;
import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
@@ -37,6 +39,12 @@
*/
public class SolrFieldCacheMBean implements SolrInfoMBean {
+ private final SolrIndexSearcher indexSearcher;
+
+ public SolrFieldCacheMBean(SolrIndexSearcher indexSearcher) {
+ this.indexSearcher = indexSearcher;
+ }
+
protected FieldCacheSanityChecker checker = new FieldCacheSanityChecker();
public String getName() { return this.getClass().getName(); }
@@ -55,18 +63,22 @@
public URL[] getDocs() {
return null;
}
+
public NamedList getStatistics() {
- NamedList stats = new SimpleOrderedMap();
- CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
- stats.add("entries_count", entries.length);
+ NamedList<Object> stats = new SimpleOrderedMap<Object>();
+
+ NamedList<Object> topLevelStats = new SimpleOrderedMap<Object>();
+ IndexReader topReader = indexSearcher.getTopReaderContext().reader;
+ CacheEntry[] entries = new SlowMultiReaderWrapper(topReader).getFieldCache().getCacheEntries();
+ topLevelStats.add("entries_count", entries.length);
for (int i = 0; i < entries.length; i++) {
CacheEntry e = entries[i];
- stats.add("entry#" + i, e.toString());
+ topLevelStats.add("entry#" + i, e.toString());
}
Insanity[] insanity = checker.check(entries);
- stats.add("insanity_count", insanity.length);
+ topLevelStats.add("insanity_count", insanity.length);
for (int i = 0; i < insanity.length; i++) {
/** RAM estimation is both CPU and memory intensive... we don't want to do it unless asked.
@@ -77,8 +89,33 @@
}
**/
- stats.add("insanity#" + i, insanity[i].toString());
+ topLevelStats.add("insanity#" + i, insanity[i].toString());
}
+ stats.add("top_entries", topLevelStats);
+
+ NamedList<Object> leaveEntriesStats = new SimpleOrderedMap<Object>();
+ IndexReader.ReaderContext[] leaves = ReaderUtil.leaves(indexSearcher.getTopReaderContext());
+ for (IndexReader.ReaderContext leave : leaves) {
+ NamedList<Object> leaveEntryStats = new SimpleOrderedMap<Object>();
+
+ CacheEntry[] leaveEntries = leave.reader.getFieldCache().getCacheEntries();
+ leaveEntryStats.add("entries_count", leaveEntries.length);
+ for (int i = 0; i < leaveEntries.length; i++) {
+ CacheEntry e = leaveEntries[i];
+ leaveEntryStats.add("entry#" + i, e.toString());
+ }
+
+ Insanity[] leaveInsanity = checker.check(leaveEntries);
+
+ leaveEntryStats.add("insanity_count", leaveInsanity.length);
+ for (int i = 0; i < leaveInsanity.length; i++) {
+ leaveEntryStats.add("insanity#" + i, leaveInsanity[i].toString());
+ }
+
+
+ leaveEntriesStats.add(leave.reader.getCoreCacheKey().toString(), leaveEntryStats);
+ }
+ stats.add("leave_entries", leaveEntriesStats);
return stats;
}
Index: lucene/src/java/org/apache/lucene/search/FieldCache.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldCache.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldCache.java (revision 1175430)
@@ -1,772 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocTermOrds;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.cache.EntryCreator;
-import org.apache.lucene.search.cache.CachedArray.*;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.document.NumericField; // for javadocs
-import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
-import org.apache.lucene.util.packed.PackedInts;
-
-import java.io.IOException;
-import java.io.PrintStream;
-
-import java.text.DecimalFormat;
-
-/**
- * Expert: Maintains caches of term values.
- *
- * <p>Created: May 19, 2004 11:13:14 AM
- *
- * @since lucene 1.4
- * @see org.apache.lucene.util.FieldCacheSanityChecker
- */
-public interface FieldCache {
-
- public static final class CreationPlaceholder {
- Object value;
- }
-
- /**
- * Hack: When thrown from a Parser (NUMERIC_UTILS_* ones), this stops
- * processing terms and returns the current FieldCache
- * array.
- */
- public static final class StopFillCacheException extends RuntimeException {
- }
-
- /**
- * Marker interface as super-interface to all parsers. It
- * is used to specify a custom parser to {@link
- * SortField#SortField(String, FieldCache.Parser)}.
- */
- public interface Parser {
- }
-
- /** Interface to parse bytes from document fields.
- * @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser)
- */
- public interface ByteParser extends Parser {
- /** Return a single Byte representation of this field's value. */
- public byte parseByte(BytesRef term);
- }
-
- /** Interface to parse shorts from document fields.
- * @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser)
- */
- public interface ShortParser extends Parser {
- /** Return a short representation of this field's value. */
- public short parseShort(BytesRef term);
- }
-
- /** Interface to parse ints from document fields.
- * @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser)
- */
- public interface IntParser extends Parser {
- /** Return an integer representation of this field's value. */
- public int parseInt(BytesRef term);
- }
-
- /** Interface to parse floats from document fields.
- * @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser)
- */
- public interface FloatParser extends Parser {
- /** Return an float representation of this field's value. */
- public float parseFloat(BytesRef term);
- }
-
- /** Interface to parse long from document fields.
- * @see FieldCache#getLongs(IndexReader, String, FieldCache.LongParser)
- */
- public interface LongParser extends Parser {
- /** Return an long representation of this field's value. */
- public long parseLong(BytesRef term);
- }
-
- /** Interface to parse doubles from document fields.
- * @see FieldCache#getDoubles(IndexReader, String, FieldCache.DoubleParser)
- */
- public interface DoubleParser extends Parser {
- /** Return an long representation of this field's value. */
- public double parseDouble(BytesRef term);
- }
-
- /** Expert: The cache used internally by sorting and range query classes. */
- public static FieldCache DEFAULT = new FieldCacheImpl();
-
- /** The default parser for byte values, which are encoded by {@link Byte#toString(byte)} */
- public static final ByteParser DEFAULT_BYTE_PARSER = new ByteParser() {
- public byte parseByte(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Byte.parseByte(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_BYTE_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_BYTE_PARSER";
- }
- };
-
- /** The default parser for short values, which are encoded by {@link Short#toString(short)} */
- public static final ShortParser DEFAULT_SHORT_PARSER = new ShortParser() {
- public short parseShort(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Short.parseShort(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_SHORT_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_SHORT_PARSER";
- }
- };
-
- /** The default parser for int values, which are encoded by {@link Integer#toString(int)} */
- public static final IntParser DEFAULT_INT_PARSER = new IntParser() {
- public int parseInt(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Integer.parseInt(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_INT_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_INT_PARSER";
- }
- };
-
- /** The default parser for float values, which are encoded by {@link Float#toString(float)} */
- public static final FloatParser DEFAULT_FLOAT_PARSER = new FloatParser() {
- public float parseFloat(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Float.parseFloat(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_FLOAT_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_FLOAT_PARSER";
- }
- };
-
- /** The default parser for long values, which are encoded by {@link Long#toString(long)} */
- public static final LongParser DEFAULT_LONG_PARSER = new LongParser() {
- public long parseLong(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Long.parseLong(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_LONG_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_LONG_PARSER";
- }
- };
-
- /** The default parser for double values, which are encoded by {@link Double#toString(double)} */
- public static final DoubleParser DEFAULT_DOUBLE_PARSER = new DoubleParser() {
- public double parseDouble(BytesRef term) {
- // TODO: would be far better to directly parse from
- // UTF8 bytes... but really users should use
- // NumericField, instead, which already decodes
- // directly from byte[]
- return Double.parseDouble(term.utf8ToString());
- }
- protected Object readResolve() {
- return DEFAULT_DOUBLE_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".DEFAULT_DOUBLE_PARSER";
- }
- };
-
- /**
- * A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
- * via {@link NumericField}/{@link NumericTokenStream}.
- */
- public static final IntParser NUMERIC_UTILS_INT_PARSER=new IntParser(){
- public int parseInt(BytesRef term) {
- if (NumericUtils.getPrefixCodedIntShift(term) > 0)
- throw new FieldCacheImpl.StopFillCacheException();
- return NumericUtils.prefixCodedToInt(term);
- }
- protected Object readResolve() {
- return NUMERIC_UTILS_INT_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER";
- }
- };
-
- /**
- * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
- * via {@link NumericField}/{@link NumericTokenStream}.
- */
- public static final FloatParser NUMERIC_UTILS_FLOAT_PARSER=new FloatParser(){
- public float parseFloat(BytesRef term) {
- if (NumericUtils.getPrefixCodedIntShift(term) > 0)
- throw new FieldCacheImpl.StopFillCacheException();
- return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term));
- }
- protected Object readResolve() {
- return NUMERIC_UTILS_FLOAT_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER";
- }
- };
-
- /**
- * A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
- * via {@link NumericField}/{@link NumericTokenStream}.
- */
- public static final LongParser NUMERIC_UTILS_LONG_PARSER = new LongParser(){
- public long parseLong(BytesRef term) {
- if (NumericUtils.getPrefixCodedLongShift(term) > 0)
- throw new FieldCacheImpl.StopFillCacheException();
- return NumericUtils.prefixCodedToLong(term);
- }
- protected Object readResolve() {
- return NUMERIC_UTILS_LONG_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER";
- }
- };
-
- /**
- * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
- * via {@link NumericField}/{@link NumericTokenStream}.
- */
- public static final DoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new DoubleParser(){
- public double parseDouble(BytesRef term) {
- if (NumericUtils.getPrefixCodedLongShift(term) > 0)
- throw new FieldCacheImpl.StopFillCacheException();
- return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term));
- }
- protected Object readResolve() {
- return NUMERIC_UTILS_DOUBLE_PARSER;
- }
- @Override
- public String toString() {
- return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER";
- }
- };
-
- /** Checks the internal cache for an appropriate entry, and if none is
- * found, reads the terms in <code>field</code> as a single byte and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the single byte values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public byte[] getBytes (IndexReader reader, String field)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as bytes and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the bytes.
- * @param parser Computes byte for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public byte[] getBytes (IndexReader reader, String field, ByteParser parser)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as bytes and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the bytes.
- * @param creator Used to make the ByteValues
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public ByteValues getBytes(IndexReader reader, String field, EntryCreator<ByteValues> creator ) throws IOException;
-
-
- /** Checks the internal cache for an appropriate entry, and if none is
- * found, reads the terms in <code>field</code> as shorts and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the shorts.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public short[] getShorts (IndexReader reader, String field)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as shorts and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the shorts.
- * @param parser Computes short for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public short[] getShorts (IndexReader reader, String field, ShortParser parser)
- throws IOException;
-
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as shorts and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the shorts.
- * @param creator Computes short for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public ShortValues getShorts(IndexReader reader, String field, EntryCreator<ShortValues> creator ) throws IOException;
-
-
- /** Checks the internal cache for an appropriate entry, and if none is
- * found, reads the terms in <code>field</code> as integers and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the integers.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public int[] getInts (IndexReader reader, String field)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as integers and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the integers.
- * @param parser Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public int[] getInts (IndexReader reader, String field, IntParser parser)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as integers and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- * @param reader Used to get field values.
- * @param field Which field contains the integers.
- * @param creator Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public IntValues getInts(IndexReader reader, String field, EntryCreator<IntValues> creator ) throws IOException;
-
-
- /** Checks the internal cache for an appropriate entry, and if
- * none is found, reads the terms in <code>field</code> as floats and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the floats.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public float[] getFloats (IndexReader reader, String field)
- throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if
- * none is found, reads the terms in <code>field</code> as floats and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the floats.
- * @param parser Computes float for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public float[] getFloats (IndexReader reader, String field,
- FloatParser parser) throws IOException;
-
- /** Checks the internal cache for an appropriate entry, and if
- * none is found, reads the terms in <code>field</code> as floats and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- * @param reader Used to get field values.
- * @param field Which field contains the floats.
- * @param creator Computes float for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public FloatValues getFloats(IndexReader reader, String field, EntryCreator<FloatValues> creator ) throws IOException;
-
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is
- * found, reads the terms in <code>field</code> as longs and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the longs.
- * @return The values in the given field for each document.
- * @throws java.io.IOException If any error occurs.
- */
- public long[] getLongs(IndexReader reader, String field)
- throws IOException;
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as longs and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the longs.
- * @param parser Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public long[] getLongs(IndexReader reader, String field, LongParser parser)
- throws IOException;
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as longs and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the longs.
- * @param creator Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public LongValues getLongs(IndexReader reader, String field, EntryCreator<LongValues> creator ) throws IOException;
-
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is
- * found, reads the terms in <code>field</code> as integers and returns an array
- * of size <code>reader.maxDoc()</code> of the value each document
- * has in the given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the doubles.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public double[] getDoubles(IndexReader reader, String field)
- throws IOException;
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as doubles and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the doubles.
- * @param parser Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public double[] getDoubles(IndexReader reader, String field, DoubleParser parser)
- throws IOException;
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is found,
- * reads the terms in <code>field</code> as doubles and returns an array of
- * size <code>reader.maxDoc()</code> of the value each document has in the
- * given field.
- *
- * @param reader Used to get field values.
- * @param field Which field contains the doubles.
- * @param creator Computes integer for string values.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public DoubleValues getDoubles(IndexReader reader, String field, EntryCreator<DoubleValues> creator ) throws IOException;
-
-
- /** Returned by {@link #getTerms} */
- public abstract static class DocTerms {
- /** The BytesRef argument must not be null; the method
- * returns the same BytesRef, or an empty (length=0)
- * BytesRef if the doc did not have this field or was
- * deleted. */
- public abstract BytesRef getTerm(int docID, BytesRef ret);
-
- /** Returns true if this doc has this field and is not
- * deleted. */
- public abstract boolean exists(int docID);
-
- /** Number of documents */
- public abstract int size();
- }
-
- /** Checks the internal cache for an appropriate entry, and if none
- * is found, reads the term values in <code>field</code>
- * and returns a {@link DocTerms} instance, providing a
- * method to retrieve the term (as a BytesRef) per document.
- * @param reader Used to get field values.
- * @param field Which field contains the strings.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public DocTerms getTerms (IndexReader reader, String field)
- throws IOException;
-
- /** Expert: just like {@link #getTerms(IndexReader,String)},
- * but you can specify whether more RAM should be consumed in exchange for
- * faster lookups (default is "true"). Note that the
- * first call for a given reader and field "wins",
- * subsequent calls will share the same cache entry. */
- public DocTerms getTerms (IndexReader reader, String field, boolean fasterButMoreRAM)
- throws IOException;
-
- /** Returned by {@link #getTermsIndex} */
- public abstract static class DocTermsIndex {
-
- public int binarySearchLookup(BytesRef key, BytesRef spare) {
- // this special case is the reason that Arrays.binarySearch() isn't useful.
- if (key == null)
- return 0;
-
- int low = 1;
- int high = numOrd()-1;
-
- while (low <= high) {
- int mid = (low + high) >>> 1;
- int cmp = lookup(mid, spare).compareTo(key);
-
- if (cmp < 0)
- low = mid + 1;
- else if (cmp > 0)
- high = mid - 1;
- else
- return mid; // key found
- }
- return -(low + 1); // key not found.
- }
-
- /** The BytesRef argument must not be null; the method
- * returns the same BytesRef, or an empty (length=0)
- * BytesRef if this ord is the null ord (0). */
- public abstract BytesRef lookup(int ord, BytesRef reuse);
-
- /** Convenience method, to lookup the Term for a doc.
- * If this doc is deleted or did not have this field,
- * this will return an empty (length=0) BytesRef. */
- public BytesRef getTerm(int docID, BytesRef reuse) {
- return lookup(getOrd(docID), reuse);
- }
-
- /** Returns sort ord for this document. Ord 0 is
- * reserved for docs that are deleted or did not have
- * this field. */
- public abstract int getOrd(int docID);
-
- /** Returns total unique ord count; this includes +1 for
- * the null ord (always 0). */
- public abstract int numOrd();
-
- /** Number of documents */
- public abstract int size();
-
- /** Returns a TermsEnum that can iterate over the values in this index entry */
- public abstract TermsEnum getTermsEnum();
-
- /** @lucene.internal */
- public abstract PackedInts.Reader getDocToOrd();
- }
-
- /** Checks the internal cache for an appropriate entry, and if none
- * is found, reads the term values in <code>field</code>
- * and returns a {@link DocTerms} instance, providing a
- * method to retrieve the term (as a BytesRef) per document.
- * @param reader Used to get field values.
- * @param field Which field contains the strings.
- * @return The values in the given field for each document.
- * @throws IOException If any error occurs.
- */
- public DocTermsIndex getTermsIndex (IndexReader reader, String field)
- throws IOException;
-
-
- /** Expert: just like {@link
- * #getTermsIndex(IndexReader,String)}, but you can specify
- * whether more RAM should be consumed in exchange for
- * faster lookups (default is "true"). Note that the
- * first call for a given reader and field "wins",
- * subsequent calls will share the same cache entry. */
- public DocTermsIndex getTermsIndex (IndexReader reader, String field, boolean fasterButMoreRAM)
- throws IOException;
-
- /**
- * Checks the internal cache for an appropriate entry, and if none is found, reads the term values
- * in <code>field</code> and returns a {@link DocTermOrds} instance, providing a method to retrieve
- * the terms (as ords) per document.
- *
- * @param reader Used to build a {@link DocTermOrds} instance
- * @param field Which field contains the strings.
- * @return a {@link DocTermOrds} instance
- * @throws IOException If any error occurs.
- */
- public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException;
-
- /**
- * EXPERT: A unique Identifier/Description for each item in the FieldCache.
- * Can be useful for logging/debugging.
- * @lucene.experimental
- */
- public static abstract class CacheEntry {
- public abstract Object getReaderKey();
- public abstract String getFieldName();
- public abstract Class<?> getCacheType();
- public abstract Object getCustom();
- public abstract Object getValue();
- private String size = null;
- protected final void setEstimatedSize(String size) {
- this.size = size;
- }
- /**
- * @see #estimateSize(RamUsageEstimator)
- */
- public void estimateSize() {
- estimateSize(new RamUsageEstimator(false)); // doesn't check for interned
- }
- /**
- * Computes (and stores) the estimated size of the cache Value
- * @see #getEstimatedSize
- */
- public void estimateSize(RamUsageEstimator ramCalc) {
- long size = ramCalc.estimateRamUsage(getValue());
- setEstimatedSize(RamUsageEstimator.humanReadableUnits
- (size, new DecimalFormat("0.#")));
-
- }
- /**
- * The most recently estimated size of the value, null unless
- * estimateSize has been called.
- */
- public final String getEstimatedSize() {
- return size;
- }
-
-
- @Override
- public String toString() {
- StringBuilder b = new StringBuilder();
- b.append("'").append(getReaderKey()).append("'=>");
- b.append("'").append(getFieldName()).append("',");
- b.append(getCacheType()).append(",").append(getCustom());
- b.append("=>").append(getValue().getClass().getName()).append("#");
- b.append(System.identityHashCode(getValue()));
-
- String s = getEstimatedSize();
- if(null != s) {
- b.append(" (size =~ ").append(s).append(')');
- }
-
- return b.toString();
- }
-
- }
-
- /**
- * EXPERT: Generates an array of CacheEntry objects representing all items
- * currently in the FieldCache.
- * <p>
- * NOTE: These CacheEntry objects maintain a strong reference to the
- * Cached Values. Maintaining references to a CacheEntry the IndexReader
- * associated with it has garbage collected will prevent the Value itself
- * from being garbage collected when the Cache drops the WeakReference.
- * </p>
- * @lucene.experimental
- */
- public abstract CacheEntry[] getCacheEntries();
-
- /**
- * <p>
- * EXPERT: Instructs the FieldCache to forcibly expunge all entries
- * from the underlying caches. This is intended only to be used for
- * test methods as a way to ensure a known base state of the Cache
- * (with out needing to rely on GC to free WeakReferences).
- * It should not be relied on for "Cache maintenance" in general
- * application code.
- * </p>
- * @lucene.experimental
- */
- public abstract void purgeAllCaches();
-
- /**
- * Expert: drops all cache entries associated with this
- * reader. NOTE: this reader must precisely match the
- * reader that the cache entry is keyed on. If you pass a
- * top-level reader, it usually will have no effect as
- * Lucene now caches at the segment reader level.
- */
- public abstract void purge(IndexReader r);
-
- /**
- * If non-null, FieldCacheImpl will warn whenever
- * entries are created that are not sane according to
- * {@link org.apache.lucene.util.FieldCacheSanityChecker}.
- */
- public void setInfoStream(PrintStream stream);
-
- /** counterpart of {@link #setInfoStream(PrintStream)} */
- public PrintStream getInfoStream();
-}
Index: solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java (revision )
@@ -21,7 +21,8 @@
import java.util.HashMap;
import java.util.Map;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.solr.common.params.SolrParams;
@@ -246,10 +247,10 @@
public NamedList<?> getFieldCacheStats(String fieldName, String[] facet ) {
FieldType ft = searcher.getSchema().getFieldType(fieldName);
- FieldCache.DocTermsIndex si = null;
+ DocTermsIndex si = null;
try {
- si = FieldCache.DEFAULT.getTermsIndex(searcher.getIndexReader(), fieldName);
+ si = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getTermsIndex(fieldName);
- }
+ }
catch (IOException e) {
throw new RuntimeException( "failed to open field cache for: "+fieldName, e );
}
@@ -263,8 +264,8 @@
for( String f : facet ) {
ft = searcher.getSchema().getFieldType(f);
try {
- si = FieldCache.DEFAULT.getTermsIndex(searcher.getIndexReader(), f);
+ si = new SlowMultiReaderWrapper(searcher.getIndexReader()).getFieldCache().getTermsIndex(f);
- }
+ }
catch (IOException e) {
throw new RuntimeException( "failed to open field cache for: "+f, e );
}
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java (revision )
@@ -22,9 +22,10 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.queries.function.DocValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache.DocTerms;
+import org.apache.lucene.search.cache.DocTerms;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
@@ -52,9 +53,9 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException
{
- final DocTerms terms = cache.getTerms(readerContext.reader, field, true );
+ final DocTerms terms = readerContext.reader.getFieldCache().getTerms(field, true);
final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader;
-
+
return new IntDocValues(this) {
BytesRef ref = new BytesRef();
Index: lucene/src/java/org/apache/lucene/search/cache/ByteValuesCreator.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/ByteValuesCreator.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/cache/ByteValuesCreator.java (revision )
@@ -25,11 +25,10 @@
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.FieldCache.ByteParser;
-import org.apache.lucene.search.FieldCache.Parser;
+import org.apache.lucene.search.cache.parser.ByteParser;
import org.apache.lucene.search.cache.CachedArray.ByteValues;
+import org.apache.lucene.search.cache.parser.Parser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@@ -101,7 +100,7 @@
protected void fillByteValues( ByteValues vals, IndexReader reader, String field ) throws IOException
{
if( parser == null ) {
- parser = FieldCache.DEFAULT_BYTE_PARSER;
+ parser = ByteParser.DEFAULT_BYTE_PARSER;
}
setParserAndResetCounts(vals, parser);
@@ -133,7 +132,7 @@
}
vals.numTerms++;
}
- } catch (FieldCache.StopFillCacheException stop) {}
+ } catch (AtomicFieldCache.StopFillCacheException stop) {}
if( vals.valid == null ) {
vals.valid = checkMatchAllBits( validBits, vals.numDocs, maxDoc );
Index: lucene/src/test/org/apache/lucene/search/FieldCacheRewriteMethod.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/FieldCacheRewriteMethod.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/FieldCacheRewriteMethod.java (revision )
@@ -24,6 +24,7 @@
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.OpenBitSet;
@@ -110,7 +111,7 @@
*/
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
- final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader, query.field);
+ final DocTermsIndex fcsi = context.reader.getFieldCache().getTermsIndex(query.field);
// Cannot use FixedBitSet because we require long index (ord):
final OpenBitSet termSet = new OpenBitSet(fcsi.numOrd());
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision )
@@ -29,7 +29,6 @@
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
@@ -283,7 +282,7 @@
assertEquals(numDocs, r.numDocs());
for(IndexReader sub : r.getSequentialSubReaders()) {
- final int[] ids = FieldCache.DEFAULT.getInts(sub, "id");
+ final int[] ids = sub.getFieldCache().getInts("id");
for(int docID=0;docID<sub.numDocs();docID++) {
final Document doc = sub.document(docID);
final Field f = (Field) doc.getField("nf");
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java (revision )
@@ -30,7 +30,7 @@
import org.apache.lucene.util.mutable.MutableValueFloat;
/**
- * Obtains float field values from the {@link org.apache.lucene.search.FieldCache}
+ * Obtains float field values from the {@link org.apache.lucene.search.cache.AtomicFieldCache}
* using <code>getFloats()</code>
* and makes those values available as other numeric types, casting as needed.
*
@@ -50,7 +50,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final FloatValues vals = cache.getFloats(readerContext.reader, field, creator);
+ final FloatValues vals = readerContext.reader.getFieldCache().getFloats(field, creator);
final float[] arr = vals.values;
final Bits valid = vals.valid;
Index: lucene/src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/index/IndexReader.java (revision )
@@ -31,8 +31,8 @@
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.index.codecs.PerDocValues;
import org.apache.lucene.index.values.IndexDocValues;
-import org.apache.lucene.search.FieldCache; // javadocs
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.cache.AtomicFieldCache;
import org.apache.lucene.store.*;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
@@ -92,7 +92,7 @@
* closed. At this point it is safe for apps to evict
* this reader from any caches keyed on {@link
* #getCoreCacheKey}. This is the same interface that
- * {@link FieldCache} uses, internally, to evict
+ * {@link AtomicFieldCache} uses, internally, to evict
* entries.</p>
*
* <p>For other readers, this listener is called when they
@@ -1576,7 +1576,17 @@
public int getTermInfosIndexDivisor() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
-
+
+ /**
+ * Returns an {@link AtomicFieldCache} instance for this reader.
+ * Not all {@link IndexReader} subclasses implements this method.
+ *
+ * @return {@link AtomicFieldCache} instance for this reader
+ */
+ public AtomicFieldCache getFieldCache() {
+ throw new UnsupportedOperationException("This reader does not support this method.");
+ }
+
public final IndexDocValues docValues(String field) throws IOException {
ensureOpen();
final PerDocValues perDoc = perDocValues();
Index: modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
===================================================================
--- modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (revision 1175430)
+++ modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (revision )
@@ -30,6 +30,7 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
@@ -559,8 +560,10 @@
w.close();
// NOTE: intentional but temporary field cache insanity!
- final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id");
+ SlowMultiReaderWrapper smrw1 = new SlowMultiReaderWrapper(r);
+ final int[] docIDToID = smrw1.getFieldCache().getInts("id");
IndexReader r2 = null;
+ SlowMultiReaderWrapper smrw2 = null;
Directory dir2 = null;
try {
@@ -586,8 +589,9 @@
// group, so we can use single pass collector
dir2 = newDirectory();
r2 = getDocBlockReader(dir2, groupDocs);
+ smrw2 = new SlowMultiReaderWrapper(r2);
final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
- final int[] docIDToID2 = FieldCache.DEFAULT.getInts(r2, "id");
+ final int[] docIDToID2 = smrw2.getFieldCache().getInts("id");
final IndexSearcher s2 = newSearcher(r2);
final ShardState shards2 = new ShardState(s2);
@@ -868,9 +872,9 @@
s.close();
s2.close();
} finally {
- FieldCache.DEFAULT.purge(r);
+ smrw1.getFieldCache().purgeCache();
if (r2 != null) {
- FieldCache.DEFAULT.purge(r2);
+ smrw2.getFieldCache().purgeCache();
}
}
Index: lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 1175430)
@@ -1,396 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocTermOrds;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.cache.*;
-import org.apache.lucene.search.cache.CachedArray.*;
-import org.apache.lucene.util.FieldCacheSanityChecker;
-
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.*;
-
-/**
- * Expert: The default cache implementation, storing all values in memory.
- * A WeakHashMap is used for storage.
- *
- * <p>Created: May 19, 2004 4:40:36 PM
- *
- * @lucene.internal -- this is now public so that the tests can use reflection
- * to call methods. It will likely be removed without (much) notice.
- *
- * @since lucene 1.4
- */
-public class FieldCacheImpl implements FieldCache { // Made Public so that
-
- private Map<Class<?>,Cache> caches;
- FieldCacheImpl() {
- init();
- }
- private synchronized void init() {
- caches = new HashMap<Class<?>,Cache>(9);
- caches.put(Byte.TYPE, new Cache<ByteValues>(this));
- caches.put(Short.TYPE, new Cache<ShortValues>(this));
- caches.put(Integer.TYPE, new Cache<IntValues>(this));
- caches.put(Float.TYPE, new Cache<FloatValues>(this));
- caches.put(Long.TYPE, new Cache<LongValues>(this));
- caches.put(Double.TYPE, new Cache<DoubleValues>(this));
- caches.put(DocTermsIndex.class, new Cache<DocTermsIndex>(this));
- caches.put(DocTerms.class, new Cache<DocTerms>(this));
- caches.put(DocTermOrds.class, new Cache<DocTermOrds>(this));
- }
-
- public synchronized void purgeAllCaches() {
- init();
- }
-
- public synchronized void purge(IndexReader r) {
- for(Cache c : caches.values()) {
- c.purge(r);
- }
- }
-
- public synchronized CacheEntry[] getCacheEntries() {
- List<CacheEntry> result = new ArrayList<CacheEntry>(17);
- for(final Map.Entry<Class<?>,Cache> cacheEntry: caches.entrySet()) {
- final Cache<?> cache = cacheEntry.getValue();
- final Class<?> cacheType = cacheEntry.getKey();
- synchronized(cache.readerCache) {
- for( Object readerKey : cache.readerCache.keySet() ) {
- Map<?, Object> innerCache = cache.readerCache.get(readerKey);
- for (final Map.Entry<?, Object> mapEntry : innerCache.entrySet()) {
- Entry entry = (Entry)mapEntry.getKey();
- result.add(new CacheEntryImpl(readerKey, entry.field,
- cacheType, entry.creator,
- mapEntry.getValue()));
- }
- }
- }
- }
- return result.toArray(new CacheEntry[result.size()]);
- }
-
- private static final class CacheEntryImpl extends CacheEntry {
- private final Object readerKey;
- private final String fieldName;
- private final Class<?> cacheType;
- private final EntryCreator custom;
- private final Object value;
- CacheEntryImpl(Object readerKey, String fieldName,
- Class<?> cacheType,
- EntryCreator custom,
- Object value) {
- this.readerKey = readerKey;
- this.fieldName = fieldName;
- this.cacheType = cacheType;
- this.custom = custom;
- this.value = value;
-
- // :HACK: for testing.
-// if (null != locale || SortField.CUSTOM != sortFieldType) {
-// throw new RuntimeException("Locale/sortFieldType: " + this);
-// }
-
- }
- @Override
- public Object getReaderKey() { return readerKey; }
- @Override
- public String getFieldName() { return fieldName; }
- @Override
- public Class<?> getCacheType() { return cacheType; }
- @Override
- public Object getCustom() { return custom; }
- @Override
- public Object getValue() { return value; }
- }
-
- final static IndexReader.ReaderFinishedListener purgeReader = new IndexReader.ReaderFinishedListener() {
- @Override
- public void finished(IndexReader reader) {
- FieldCache.DEFAULT.purge(reader);
- }
- };
-
- /** Expert: Internal cache. */
- final static class Cache<T> {
- Cache() {
- this.wrapper = null;
- }
-
- Cache(FieldCache wrapper) {
- this.wrapper = wrapper;
- }
-
- final FieldCache wrapper;
-
- final Map<Object,Map<Entry<T>,Object>> readerCache = new WeakHashMap<Object,Map<Entry<T>,Object>>();
-
- protected Object createValue(IndexReader reader, Entry entryKey) throws IOException {
- return entryKey.creator.create( reader );
- }
-
- /** Remove this reader from the cache, if present. */
- public void purge(IndexReader r) {
- Object readerKey = r.getCoreCacheKey();
- synchronized(readerCache) {
- readerCache.remove(readerKey);
- }
- }
-
- @SuppressWarnings("unchecked")
- public Object get(IndexReader reader, Entry<T> key) throws IOException {
- Map<Entry<T>,Object> innerCache;
- Object value;
- final Object readerKey = reader.getCoreCacheKey();
- synchronized (readerCache) {
- innerCache = readerCache.get(readerKey);
- if (innerCache == null) {
- // First time this reader is using FieldCache
- innerCache = new HashMap<Entry<T>,Object>();
- readerCache.put(readerKey, innerCache);
- reader.addReaderFinishedListener(purgeReader);
- value = null;
- } else {
- value = innerCache.get(key);
- }
- if (value == null) {
- value = new CreationPlaceholder();
- innerCache.put(key, value);
- }
- }
- if (value instanceof CreationPlaceholder) {
- synchronized (value) {
- CreationPlaceholder progress = (CreationPlaceholder) value;
- if (progress.value == null) {
- progress.value = createValue(reader, key);
- synchronized (readerCache) {
- innerCache.put(key, progress.value);
- }
-
- // Only check if key.custom (the parser) is
- // non-null; else, we check twice for a single
- // call to FieldCache.getXXX
- if (key.creator != null && wrapper != null) {
- final PrintStream infoStream = wrapper.getInfoStream();
- if (infoStream != null) {
- printNewInsanity(infoStream, progress.value);
- }
- }
- }
- return progress.value;
- }
- }
-
- // Validate new entries
- if( key.creator.shouldValidate() ) {
- key.creator.validate( (T)value, reader);
- }
- return value;
- }
-
- private void printNewInsanity(PrintStream infoStream, Object value) {
- final FieldCacheSanityChecker.Insanity[] insanities = FieldCacheSanityChecker.checkSanity(wrapper);
- for(int i=0;i<insanities.length;i++) {
- final FieldCacheSanityChecker.Insanity insanity = insanities[i];
- final CacheEntry[] entries = insanity.getCacheEntries();
- for(int j=0;j<entries.length;j++) {
- if (entries[j].getValue() == value) {
- // OK this insanity involves our entry
- infoStream.println("WARNING: new FieldCache insanity created\nDetails: " + insanity.toString());
- infoStream.println("\nStack:\n");
- new Throwable().printStackTrace(infoStream);
- break;
- }
- }
- }
- }
- }
-
- /** Expert: Every composite-key in the internal cache is of this type. */
- static class Entry<T> {
- final String field; // which Fieldable
- final EntryCreator<T> creator; // which custom comparator or parser
-
- /** Creates one of these objects for a custom comparator/parser. */
- Entry (String field, EntryCreator<T> custom) {
- this.field = field;
- this.creator = custom;
- }
-
- /** Two of these are equal iff they reference the same field and type. */
- @Override
- public boolean equals (Object o) {
- if (o instanceof Entry) {
- Entry other = (Entry) o;
- if (other.field.equals(field)) {
- if (other.creator == null) {
- if (creator == null) return true;
- } else if (other.creator.equals (creator)) {
- return true;
- }
- }
- }
- return false;
- }
-
- /** Composes a hashcode based on the field and type. */
- @Override
- public int hashCode() {
- return field.hashCode() ^ (creator==null ? 0 : creator.hashCode());
- }
- }
-
- // inherit javadocs
- public byte[] getBytes (IndexReader reader, String field) throws IOException {
- return getBytes(reader, field, new ByteValuesCreator(field, null)).values;
- }
-
- // inherit javadocs
- public byte[] getBytes(IndexReader reader, String field, ByteParser parser) throws IOException {
- return getBytes(reader, field, new ByteValuesCreator(field, parser)).values;
- }
-
- @SuppressWarnings("unchecked")
- public ByteValues getBytes(IndexReader reader, String field, EntryCreator<ByteValues> creator ) throws IOException
- {
- return (ByteValues)caches.get(Byte.TYPE).get(reader, new Entry(field, creator));
- }
-
- // inherit javadocs
- public short[] getShorts (IndexReader reader, String field) throws IOException {
- return getShorts(reader, field, new ShortValuesCreator(field,null)).values;
- }
-
- // inherit javadocs
- public short[] getShorts(IndexReader reader, String field, ShortParser parser) throws IOException {
- return getShorts(reader, field, new ShortValuesCreator(field,parser)).values;
- }
-
- @SuppressWarnings("unchecked")
- public ShortValues getShorts(IndexReader reader, String field, EntryCreator<ShortValues> creator ) throws IOException
- {
- return (ShortValues)caches.get(Short.TYPE).get(reader, new Entry(field, creator));
- }
-
- // inherit javadocs
- public int[] getInts (IndexReader reader, String field) throws IOException {
- return getInts(reader, field, new IntValuesCreator( field, null )).values;
- }
-
- // inherit javadocs
- public int[] getInts(IndexReader reader, String field, IntParser parser) throws IOException {
- return getInts(reader, field, new IntValuesCreator( field, parser )).values;
- }
-
- @SuppressWarnings("unchecked")
- public IntValues getInts(IndexReader reader, String field, EntryCreator<IntValues> creator ) throws IOException {
- return (IntValues)caches.get(Integer.TYPE).get(reader, new Entry(field, creator));
- }
-
- // inherit javadocs
- public float[] getFloats (IndexReader reader, String field) throws IOException {
- return getFloats(reader, field, new FloatValuesCreator( field, null ) ).values;
- }
-
- // inherit javadocs
- public float[] getFloats(IndexReader reader, String field, FloatParser parser) throws IOException {
- return getFloats(reader, field, new FloatValuesCreator( field, parser ) ).values;
- }
-
- @SuppressWarnings("unchecked")
- public FloatValues getFloats(IndexReader reader, String field, EntryCreator<FloatValues> creator ) throws IOException {
- return (FloatValues)caches.get(Float.TYPE).get(reader, new Entry(field, creator));
- }
-
- public long[] getLongs(IndexReader reader, String field) throws IOException {
- return getLongs(reader, field, new LongValuesCreator( field, null ) ).values;
- }
-
- // inherit javadocs
- public long[] getLongs(IndexReader reader, String field, FieldCache.LongParser parser) throws IOException {
- return getLongs(reader, field, new LongValuesCreator( field, parser ) ).values;
- }
-
- @SuppressWarnings("unchecked")
- public LongValues getLongs(IndexReader reader, String field, EntryCreator<LongValues> creator ) throws IOException {
- return (LongValues)caches.get(Long.TYPE).get(reader, new Entry(field, creator));
- }
-
- // inherit javadocs
- public double[] getDoubles(IndexReader reader, String field) throws IOException {
- return getDoubles(reader, field, new DoubleValuesCreator( field, null ) ).values;
- }
-
- // inherit javadocs
- public double[] getDoubles(IndexReader reader, String field, FieldCache.DoubleParser parser) throws IOException {
- return getDoubles(reader, field, new DoubleValuesCreator( field, parser ) ).values;
- }
-
- @SuppressWarnings("unchecked")
- public DoubleValues getDoubles(IndexReader reader, String field, EntryCreator<DoubleValues> creator ) throws IOException {
- return (DoubleValues)caches.get(Double.TYPE).get(reader, new Entry(field, creator));
- }
-
- public DocTermsIndex getTermsIndex(IndexReader reader, String field) throws IOException {
- return getTermsIndex(reader, field, new DocTermsIndexCreator(field));
- }
-
- public DocTermsIndex getTermsIndex(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
- return getTermsIndex(reader, field, new DocTermsIndexCreator(field,
- fasterButMoreRAM ? DocTermsIndexCreator.FASTER_BUT_MORE_RAM : 0));
- }
-
- @SuppressWarnings("unchecked")
- public DocTermsIndex getTermsIndex(IndexReader reader, String field, EntryCreator<DocTermsIndex> creator) throws IOException {
- return (DocTermsIndex)caches.get(DocTermsIndex.class).get(reader, new Entry(field, creator));
- }
-
- // TODO: this if DocTermsIndex was already created, we
- // should share it...
- public DocTerms getTerms(IndexReader reader, String field) throws IOException {
- return getTerms(reader, field, new DocTermsCreator(field));
- }
-
- public DocTerms getTerms(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
- return getTerms(reader, field, new DocTermsCreator(field,
- fasterButMoreRAM ? DocTermsCreator.FASTER_BUT_MORE_RAM : 0));
- }
-
- @SuppressWarnings("unchecked")
- public DocTerms getTerms(IndexReader reader, String field, EntryCreator<DocTerms> creator) throws IOException {
- return (DocTerms)caches.get(DocTerms.class).get(reader, new Entry(field, creator));
- }
-
- @SuppressWarnings("unchecked")
- public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException {
- return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, new DocTermOrdsCreator(field, 0)));
- }
-
- private volatile PrintStream infoStream;
-
- public void setInfoStream(PrintStream stream) {
- infoStream = stream;
- }
-
- public PrintStream getInfoStream() {
- return infoStream;
- }
-}
-
Index: lucene/src/java/org/apache/lucene/search/cache/parser/FloatParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/FloatParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/FloatParser.java (revision )
@@ -0,0 +1,78 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * Interface to parse floats from document fields.
+ * @see AtomicFieldCache#getFloats(String, FloatParser)
+ */
+public interface FloatParser extends Parser {
+
+ FloatParser DEFAULT_FLOAT_PARSER = new DefaultFloatParser();
+ FloatParser NUMERIC_UTILS_FLOAT_PARSER=new NumericFloatParser();
+
+ /** Return an float representation of this field's value. */
+ public float parseFloat(BytesRef term);
+
+
+ /** The default parser for float values, which are encoded by {@link Float#toString(float)} */
+ public static class DefaultFloatParser implements FloatParser {
+
+ public float parseFloat(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Float.parseFloat(term.utf8ToString());
+ }
+ protected Object readResolve() {
+ return DEFAULT_FLOAT_PARSER;
+ }
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".DEFAULT_FLOAT_PARSER";
+ }
+
+ }
+
+ /**
+ * A parser instance for float values encoded with {@link org.apache.lucene.util.NumericUtils}, e.g. when indexed
+ * via {@link org.apache.lucene.document.NumericField}/{@link org.apache.lucene.analysis.NumericTokenStream}.
+ */
+ public static class NumericFloatParser implements FloatParser {
+
+ public float parseFloat(BytesRef term) {
+ if (NumericUtils.getPrefixCodedIntShift(term) > 0)
+ throw new AtomicFieldCache.StopFillCacheException();
+ return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term));
+ }
+ protected Object readResolve() {
+ return NUMERIC_UTILS_FLOAT_PARSER;
+ }
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER";
+ }
+
+ }
+
+}
Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1175430)
+++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision )
@@ -48,8 +48,8 @@
import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
import org.apache.lucene.index.codecs.pulsing.PulsingCodec;
import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.CacheEntry;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.search.cache.CacheEntry;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.RandomSimilarityProvider;
@@ -650,11 +650,15 @@
* method can be overridden to do nothing.
* </p>
*
- * @see FieldCache#purgeAllCaches()
+ * @see org.apache.lucene.index.SlowMultiReaderWrapper.InsaneNonAtomicFieldCache#purgeAllCaches()
*/
- protected void purgeFieldCache(final FieldCache fc) {
- fc.purgeAllCaches();
+ protected void purgeFieldCache(AtomicFieldCache fieldCache) {
+ if (SlowMultiReaderWrapper.InsaneNonAtomicFieldCache.class.isInstance(fieldCache)) {
+ ((SlowMultiReaderWrapper.InsaneNonAtomicFieldCache) fieldCache).purgeAllCaches();
+ } else {
+ fieldCache.purgeCache();
- }
+ }
+ }
protected String getTestLabel() {
return getClass().getName() + "." + getName();
@@ -720,8 +724,8 @@
} catch (Throwable t) {
if (problem == null) problem = t;
}
-
+
- purgeFieldCache(FieldCache.DEFAULT);
+ purgeFieldCache(SlowMultiReaderWrapper.getNonAtomicFieldCache());
if (problem != null) {
testsFailed = true;
@@ -842,7 +846,7 @@
* @see org.apache.lucene.util.FieldCacheSanityChecker
*/
protected void assertSaneFieldCaches(final String msg) {
- final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
+ final CacheEntry[] entries = SlowMultiReaderWrapper.getNonAtomicFieldCache().getCacheEntries();
Insanity[] insanity = null;
try {
try {
Index: solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java (revision )
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.schema.FieldType;
@@ -37,7 +37,7 @@
public class FieldFacetStats {
public final String name;
- final FieldCache.DocTermsIndex si;
+ final DocTermsIndex si;
final FieldType ft;
final int startTermIndex;
@@ -52,7 +52,7 @@
private final BytesRef tempBR = new BytesRef();
- public FieldFacetStats(String name, FieldCache.DocTermsIndex si, FieldType ft, int numStatsTerms) {
+ public FieldFacetStats(String name, DocTermsIndex si, FieldType ft, int numStatsTerms) {
this.name = name;
this.si = si;
this.ft = ft;
Index: lucene/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java (revision 1175430)
+++ lucene/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java (revision )
@@ -22,6 +22,8 @@
import org.apache.lucene.index.DocsEnum; // javadoc @link
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.BytesRef;
@@ -42,10 +44,10 @@
* <p/>
*
* The first invocation of this filter on a given field will
- * be slower, since a {@link FieldCache.DocTermsIndex} must be
+ * be slower, since a {@link DocTermsIndex} must be
* created. Subsequent invocations using the same field
* will re-use this cache. However, as with all
- * functionality based on {@link FieldCache}, persistent RAM
+ * functionality based on {@link AtomicFieldCache}, persistent RAM
* is consumed to hold the cache, and is not freed until the
* {@link IndexReader} is closed. In contrast, TermsFilter
* has no persistent RAM consumption.
@@ -111,21 +113,21 @@
this.terms[i] = new BytesRef(terms[i]);
}
- public FieldCache getFieldCache() {
- return FieldCache.DEFAULT;
+ public AtomicFieldCache getFieldCache(IndexReader reader) {
+ return reader.getFieldCache();
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
- return new FieldCacheTermsFilterDocIdSet(getFieldCache().getTermsIndex(context.reader, field));
+ return new FieldCacheTermsFilterDocIdSet(context.reader.getFieldCache().getTermsIndex(field));
}
protected class FieldCacheTermsFilterDocIdSet extends DocIdSet {
- private FieldCache.DocTermsIndex fcsi;
+ private DocTermsIndex fcsi;
private FixedBitSet bits;
- public FieldCacheTermsFilterDocIdSet(FieldCache.DocTermsIndex fcsi) {
+ public FieldCacheTermsFilterDocIdSet(DocTermsIndex fcsi) {
this.fcsi = fcsi;
bits = new FixedBitSet(this.fcsi.numOrd());
final BytesRef spare = new BytesRef();
Index: lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (revision )
@@ -20,11 +20,10 @@
import java.io.IOException;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.cache.parser.DoubleParser;
+import org.apache.lucene.search.cache.parser.LongParser;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.SimilarityProvider;
-import org.apache.lucene.search.similarities.Similarity.ExactDocScorer;
-import org.apache.lucene.search.similarities.Similarity.SloppyDocScorer;
-import org.apache.lucene.search.similarities.Similarity.Stats;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.TermContext;
import org.apache.lucene.index.FieldInvertState;
@@ -93,7 +92,7 @@
}
}
- static final class JustCompileExtendedFieldCacheLongParser implements FieldCache.LongParser {
+ static final class JustCompileExtendedFieldCacheLongParser implements LongParser {
public long parseLong(BytesRef string) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
@@ -101,7 +100,7 @@
}
- static final class JustCompileExtendedFieldCacheDoubleParser implements FieldCache.DoubleParser {
+ static final class JustCompileExtendedFieldCacheDoubleParser implements DoubleParser {
public double parseDouble(BytesRef term) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
Index: lucene/src/java/org/apache/lucene/search/cache/parser/IntParser.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/parser/IntParser.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/parser/IntParser.java (revision )
@@ -0,0 +1,87 @@
+package org.apache.lucene.search.cache.parser;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.cache.AtomicFieldCache;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * Interface to parse ints from document fields.
+ *
+ * @see AtomicFieldCache#getInts(String, IntParser)
+ */
+public interface IntParser extends Parser {
+
+ IntParser DEFAULT_INT_PARSER = new DefaultIntParser();
+ IntParser NUMERIC_UTILS_INT_PARSER = new NumericIntParser();
+
+ /**
+ * Return an integer representation of this field's value.
+ */
+ public int parseInt(BytesRef term);
+
+
+ /**
+ * The default parser for int values, which are encoded by {@link Integer#toString(int)}
+ */
+ public static class DefaultIntParser implements IntParser {
+
+ public int parseInt(BytesRef term) {
+ // TODO: would be far better to directly parse from
+ // UTF8 bytes... but really users should use
+ // NumericField, instead, which already decodes
+ // directly from byte[]
+ return Integer.parseInt(term.utf8ToString());
+ }
+
+ protected Object readResolve() {
+ return DEFAULT_INT_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName() + ".DEFAULT_INT_PARSER";
+ }
+
+ }
+
+ /**
+ * A parser instance for int values encoded by {@link org.apache.lucene.util.NumericUtils}, e.g. when indexed
+ * via {@link org.apache.lucene.document.NumericField}/{@link org.apache.lucene.analysis.NumericTokenStream}.
+ */
+ public static class NumericIntParser implements IntParser {
+
+ public int parseInt(BytesRef term) {
+ if (NumericUtils.getPrefixCodedIntShift(term) > 0)
+ throw new AtomicFieldCache.StopFillCacheException();
+ return NumericUtils.prefixCodedToInt(term);
+ }
+
+ protected Object readResolve() {
+ return NUMERIC_UTILS_INT_PARSER;
+ }
+
+ @Override
+ public String toString() {
+ return AtomicFieldCache.class.getName() + ".NUMERIC_UTILS_INT_PARSER";
+ }
+
+ }
+
+}
\ No newline at end of file
Index: solr/core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
===================================================================
--- solr/core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java (revision 1175430)
+++ solr/core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java (revision )
@@ -19,9 +19,9 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.FieldComparatorSource;
+import org.apache.lucene.search.cache.DocTermsIndex;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.packed.Direct16;
@@ -65,7 +65,7 @@
private final BytesRef[] values;
private final int[] readerGen;
- private FieldCache.DocTermsIndex termsIndex;
+ private DocTermsIndex termsIndex;
private final String field;
private final BytesRef NULL_VAL;
@@ -135,7 +135,7 @@
protected final int[] readerGen;
protected int currentReaderGen = -1;
- protected FieldCache.DocTermsIndex termsIndex;
+ protected DocTermsIndex termsIndex;
protected int bottomSlot = -1;
protected int bottomOrd;
@@ -436,7 +436,7 @@
}
public static FieldComparator createComparator(IndexReader reader, TermOrdValComparator_SML parent) throws IOException {
- parent.termsIndex = FieldCache.DEFAULT.getTermsIndex(reader, parent.field);
+ parent.termsIndex = reader.getFieldCache().getTermsIndex(parent.field);
final PackedInts.Reader docToOrd = parent.termsIndex.getDocToOrd();
PerSegmentComparator perSegComp;
Index: lucene/src/test/org/apache/lucene/index/TestTermsEnum.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestTermsEnum.java (revision 1175430)
+++ lucene/src/test/org/apache/lucene/index/TestTermsEnum.java (revision )
@@ -35,7 +35,6 @@
import org.apache.lucene.document.NumericField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.search.FieldCache;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LineFileDocs;
@@ -230,7 +229,7 @@
w.close();
// NOTE: intentional insanity!!
- final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id");
+ final int[] docIDToID = new SlowMultiReaderWrapper(r).getFieldCache().getInts("id");
for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
Index: lucene/src/java/org/apache/lucene/search/cache/AtomicFieldCache.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/cache/AtomicFieldCache.java (revision )
+++ lucene/src/java/org/apache/lucene/search/cache/AtomicFieldCache.java (revision )
@@ -0,0 +1,328 @@
+package org.apache.lucene.search.cache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocTermOrds;
+import org.apache.lucene.search.cache.parser.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
+
+/**
+ * Expert: Maintains caches of term values.
+ */
+public interface AtomicFieldCache {
+
+ /** Checks the internal cache for an appropriate entry, and if none is
+ * found, reads the terms in <code>field</code> as a single byte and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the single byte values.
+ * @return The values in the given field for each document.
+ * @throws java.io.IOException If any error occurs.
+ */
+ public byte[] getBytes (String field) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as bytes and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the bytes.
+ * @param parser Computes byte for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public byte[] getBytes (String field, ByteParser parser) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as bytes and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the bytes.
+ * @param creator Used to make the ByteValues
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.ByteValues getBytes(String field, EntryCreator<CachedArray.ByteValues> creator) throws IOException;
+
+
+ /** Checks the internal cache for an appropriate entry, and if none is
+ * found, reads the terms in <code>field</code> as shorts and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the shorts.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public short[] getShorts(String field) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as shorts and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the shorts.
+ * @param parser Computes short for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public short[] getShorts(String field, ShortParser parser) throws IOException;
+
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as shorts and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the shorts.
+ * @param creator Computes short for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.ShortValues getShorts(String field, EntryCreator<CachedArray.ShortValues> creator) throws IOException;
+
+
+ /** Checks the internal cache for an appropriate entry, and if none is
+ * found, reads the terms in <code>field</code> as integers and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the integers.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public int[] getInts(String field) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as integers and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the integers.
+ * @param parser Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public int[] getInts(String field, IntParser parser) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as integers and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ * @param field Which field contains the integers.
+ * @param creator Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.IntValues getInts(String field, EntryCreator<CachedArray.IntValues> creator) throws IOException;
+
+
+ /** Checks the internal cache for an appropriate entry, and if
+ * none is found, reads the terms in <code>field</code> as floats and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the floats.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public float[] getFloats(String field) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if
+ * none is found, reads the terms in <code>field</code> as floats and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the floats.
+ * @param parser Computes float for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public float[] getFloats(String field, FloatParser parser) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if
+ * none is found, reads the terms in <code>field</code> as floats and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ * @param field Which field contains the floats.
+ * @param creator Computes float for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.FloatValues getFloats(String field, EntryCreator<CachedArray.FloatValues> creator) throws IOException;
+
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is
+ * found, reads the terms in <code>field</code> as longs and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ *
+ * @param field Which field contains the longs.
+ * @return The values in the given field for each document.
+ * @throws java.io.IOException If any error occurs.
+ */
+ public long[] getLongs(String field) throws IOException;
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as longs and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ *
+ * @param field Which field contains the longs.
+ * @param parser Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public long[] getLongs(String field, LongParser parser) throws IOException;
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as longs and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ *
+ * @param field Which field contains the longs.
+ * @param creator Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.LongValues getLongs(String field, EntryCreator<CachedArray.LongValues> creator) throws IOException;
+
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is
+ * found, reads the terms in <code>field</code> as integers and returns an array
+ * of size <code>reader.maxDoc()</code> of the value each document
+ * has in the given field.
+ *
+ * @param field Which field contains the doubles.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public double[] getDoubles(String field) throws IOException;
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as doubles and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ *
+ * @param field Which field contains the doubles.
+ * @param parser Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public double[] getDoubles(String field, DoubleParser parser) throws IOException;
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is found,
+ * reads the terms in <code>field</code> as doubles and returns an array of
+ * size <code>reader.maxDoc()</code> of the value each document has in the
+ * given field.
+ *
+ * @param field Which field contains the doubles.
+ * @param creator Computes integer for string values.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public CachedArray.DoubleValues getDoubles(String field, EntryCreator<CachedArray.DoubleValues> creator ) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none
+ * is found, reads the term values in <code>field</code>
+ * and returns a {@link DocTerms} instance, providing a
+ * method to retrieve the term (as a BytesRef) per document.
+ * @param field Which field contains the strings.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public DocTerms getTerms(String field) throws IOException;
+
+ /** Expert: just like {@link #getTerms(String)},
+ * but you can specify whether more RAM should be consumed in exchange for
+ * faster lookups (default is "true"). Note that the
+ * first call for a given reader and field "wins",
+ * subsequent calls will share the same cache entry. */
+ public DocTerms getTerms(String field, boolean fasterButMoreRAM) throws IOException;
+
+ /** Checks the internal cache for an appropriate entry, and if none
+ * is found, reads the term values in <code>field</code>
+ * and returns a {@link DocTerms} instance, providing a
+ * method to retrieve the term (as a BytesRef) per document.
+ * @param field Which field contains the strings.
+ * @return The values in the given field for each document.
+ * @throws IOException If any error occurs.
+ */
+ public DocTermsIndex getTermsIndex(String field) throws IOException;
+
+
+ /** Expert: just like {@link
+ * #getTermsIndex(String)}, but you can specify
+ * whether more RAM should be consumed in exchange for
+ * faster lookups (default is "true"). Note that the
+ * first call for a given reader and field "wins",
+ * subsequent calls will share the same cache entry. */
+ public DocTermsIndex getTermsIndex (String field, boolean fasterButMoreRAM) throws IOException;
+
+ /**
+ * Checks the internal cache for an appropriate entry, and if none is found, reads the term values
+ * in <code>field</code> and returns a {@link DocTermOrds} instance, providing a method to retrieve
+ * the terms (as ords) per document.
+ *
+ * @param field Which field contains the strings.
+ * @return a {@link DocTermOrds} instance
+ * @throws IOException If any error occurs.
+ */
+ public DocTermOrds getDocTermOrds(String field) throws IOException;
+
+ /**
+ * EXPERT: Generates an array of CacheEntry objects representing all items
+ * currently in the FieldCache.
+ * <p>
+ * NOTE: These CacheEntry objects maintain a strong reference to the
+ * Cached Values. Maintaining references to a CacheEntry the IndexReader
+ * associated with it has garbage collected will prevent the Value itself
+ * from being garbage collected when the Cache drops the WeakReference.
+ * </p>
+ * @lucene.experimental
+ */
+ public abstract CacheEntry[] getCacheEntries();
+
+ /**
+ * Expert: drops all cache entries associated with this
+ * field cache.
+ */
+ public abstract void purgeCache();
+
+ /**
+ * If non-null, FieldCacheImpl will warn whenever
+ * entries are created that are not sane according to
+ * {@link org.apache.lucene.util.FieldCacheSanityChecker}.
+ */
+ public void setInfoStream(PrintStream stream);
+
+ /** counterpart of {@link #setInfoStream(PrintStream)} */
+ public PrintStream getInfoStream();
+
+ final class CreationPlaceholder {
+ public Object value;
+ }
+
+ /**
+ * Hack: When thrown from a Parser (NUMERIC_UTILS_* ones), this stops
+ * processing terms and returns the current FieldCache
+ * array.
+ */
+ final class StopFillCacheException extends RuntimeException {
+ }
+}
Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java
===================================================================
--- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java (revision 1175430)
+++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java (revision )
@@ -33,7 +33,7 @@
import java.util.Map;
/**
- * Obtains float field values from the {@link org.apache.lucene.search.FieldCache}
+ * Obtains float field values from the {@link org.apache.lucene.search.cache.AtomicFieldCache}
* using <code>getFloats()</code>
* and makes those values available as other numeric types, casting as needed.
*
@@ -61,7 +61,7 @@
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
- final LongValues vals = cache.getLongs(readerContext.reader, field, creator);
+ final LongValues vals = readerContext.reader.getFieldCache().getLongs(field, creator);
final long[] arr = vals.values;
final Bits valid = vals.valid;