LUCENE-5666: merge trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene5666@1595228 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 702ddbe..052e890 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -65,6 +65,18 @@
   as tokens anymore, and now iterates cells on-demand during indexing instead of
   building a collection.  RPT now has more setters. (David Smiley)
 
+* LUCENE-5666: Change uninverted access (sorting, faceting, grouping, etc)
+  to use the DocValues API instead of FieldCache. For FieldCache functionality,
+  use UninvertingReader in lucene/misc (or implement your own FilterReader).
+  UninvertingReader is more efficient: supports multi-valued numeric fields,
+  detects when a multi-valued field is single-valued, reuses caches
+  of compatible types (e.g. SORTED also supports BINARY and SORTED_SET access
+  without insanity).  "Insanity" is no longer possible unless you explicitly want it. 
+  Rename FieldCache* and DocTermOrds* classes in the search package to DocValues*. 
+  Move SortedSetSortField to core and add SortedSetFieldSource to queries/, which
+  takes the same selectors. Add helper methods to DocValues.java that are better 
+  suited for search code (never return null, etc).  (Mike McCandless, Robert Muir)
+
 Documentation
 
 * LUCENE-5392: Add/improve analysis package documentation to reflect
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java b/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java
new file mode 100644
index 0000000..3910e74
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java
@@ -0,0 +1,70 @@
+package org.apache.lucene.collation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.Collator;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.search.DocValuesRangeFilter;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * Indexes collation keys as a single-valued {@link SortedDocValuesField}.
+ * <p>
+ * This is more efficient that {@link CollationKeyAnalyzer} if the field 
+ * only has one value: no uninversion is necessary to sort on the field, 
+ * locale-sensitive range queries can still work via {@link DocValuesRangeFilter}, 
+ * and the underlying data structures built at index-time are likely more efficient 
+ * and use less memory than FieldCache.
+ */
+public final class CollationDocValuesField extends Field {
+  private final String name;
+  private final Collator collator;
+  private final BytesRef bytes = new BytesRef();
+  
+  /**
+   * Create a new ICUCollationDocValuesField.
+   * <p>
+   * NOTE: you should not create a new one for each document, instead
+   * just make one and reuse it during your indexing process, setting
+   * the value via {@link #setStringValue(String)}.
+   * @param name field name
+   * @param collator Collator for generating collation keys.
+   */
+  // TODO: can we make this trap-free? maybe just synchronize on the collator
+  // instead? 
+  public CollationDocValuesField(String name, Collator collator) {
+    super(name, SortedDocValuesField.TYPE);
+    this.name = name;
+    this.collator = (Collator) collator.clone();
+    fieldsData = bytes; // so wrong setters cannot be called
+  }
+
+  @Override
+  public String name() {
+    return name;
+  }
+  
+  @Override
+  public void setStringValue(String value) {
+    bytes.bytes = collator.getCollationKey(value).toByteArray();
+    bytes.offset = 0;
+    bytes.length = bytes.bytes.length;
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java
new file mode 100644
index 0000000..054f64b
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java
@@ -0,0 +1,143 @@
+package org.apache.lucene.collation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.Collator;
+import java.util.Locale;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.DocValuesRangeFilter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+
+/**
+ * trivial test of CollationDocValuesField
+ */
+@SuppressCodecs("Lucene3x")
+public class TestCollationDocValuesField extends LuceneTestCase {
+  
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    Field field = newField("field", "", StringField.TYPE_STORED);
+    CollationDocValuesField collationField = new CollationDocValuesField("collated", Collator.getInstance(Locale.ENGLISH));
+    doc.add(field);
+    doc.add(collationField);
+
+    field.setStringValue("ABC");
+    collationField.setStringValue("ABC");
+    iw.addDocument(doc);
+    
+    field.setStringValue("abc");
+    collationField.setStringValue("abc");
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.shutdown();
+    
+    IndexSearcher is = newSearcher(ir);
+    
+    SortField sortField = new SortField("collated", SortField.Type.STRING);
+    
+    TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField));
+    assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field"));
+    assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field"));
+    ir.close();
+    dir.close();
+  }
+  
+  public void testRanges() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    Field field = newField("field", "", StringField.TYPE_STORED);
+    Collator collator = Collator.getInstance(Locale.getDefault()); // uses -Dtests.locale
+    if (random().nextBoolean()) {
+      collator.setStrength(Collator.PRIMARY);
+    }
+    CollationDocValuesField collationField = new CollationDocValuesField("collated", collator);
+    doc.add(field);
+    doc.add(collationField);
+    
+    int numDocs = atLeast(500);
+    for (int i = 0; i < numDocs; i++) {
+      String value = TestUtil.randomSimpleString(random());
+      field.setStringValue(value);
+      collationField.setStringValue(value);
+      iw.addDocument(doc);
+    }
+    
+    IndexReader ir = iw.getReader();
+    iw.shutdown();
+    IndexSearcher is = newSearcher(ir);
+    
+    int numChecks = atLeast(100);
+    for (int i = 0; i < numChecks; i++) {
+      String start = TestUtil.randomSimpleString(random());
+      String end = TestUtil.randomSimpleString(random());
+      BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray());
+      BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray());
+      Query query = new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
+      doTestRanges(is, start, end, query, collator);
+    }
+    
+    ir.close();
+    dir.close();
+  }
+  
+  private void doTestRanges(IndexSearcher is, String startPoint, String endPoint, Query query, Collator collator) throws Exception { 
+    QueryUtils.check(query);
+    
+    // positive test
+    TopDocs docs = is.search(query, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).get("field");
+      assertTrue(collator.compare(value, startPoint) >= 0);
+      assertTrue(collator.compare(value, endPoint) <= 0);
+    }
+    
+    // negative test
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    bq.add(query, Occur.MUST_NOT);
+    docs = is.search(bq, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).get("field");
+      assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0);
+    }
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
index 20768f2..a3547f2 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
@@ -60,23 +60,6 @@
        secondRangeBeginning, secondRangeEnd);
   }
   
-  public void testCollationKeySort() throws Exception {
-    Analyzer usAnalyzer 
-      = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, Collator.getInstance(Locale.US));
-    Analyzer franceAnalyzer 
-      = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, Collator.getInstance(Locale.FRANCE));
-    Analyzer swedenAnalyzer 
-      = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, Collator.getInstance(new Locale("sv", "se")));
-    Analyzer denmarkAnalyzer 
-      = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, Collator.getInstance(new Locale("da", "dk")));
-    
-    // The ICU Collator and Sun java.text.Collator implementations differ in their
-    // orderings - "BFJDH" is the ordering for java.text.Collator for Locale.US.
-    testCollationKeySort
-    (usAnalyzer, franceAnalyzer, swedenAnalyzer, denmarkAnalyzer, 
-     oStrokeFirst ? "BFJHD" : "BFJDH", "EACGI", "BJDFH", "BJDHF");
-  }
-  
   public void testThreadSafe() throws Exception {
     int iters = 20 * RANDOM_MULTIPLIER;
     for (int i = 0; i < iters; i++) {
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java b/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
index 693a5aa..b70b0ae 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
@@ -19,7 +19,7 @@
 
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.search.FieldCacheRangeFilter;
+import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.util.BytesRef;
 
 import com.ibm.icu.text.Collator;
@@ -30,7 +30,7 @@
  * <p>
  * This is more efficient that {@link ICUCollationKeyAnalyzer} if the field 
  * only has one value: no uninversion is necessary to sort on the field, 
- * locale-sensitive range queries can still work via {@link FieldCacheRangeFilter}, 
+ * locale-sensitive range queries can still work via {@link DocValuesRangeFilter}, 
  * and the underlying data structures built at index-time are likely more efficient 
  * and use less memory than FieldCache.
  */
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
index 88d93a6..34114b0 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
@@ -24,7 +24,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.FieldCacheRangeFilter;
+import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
@@ -111,7 +111,7 @@
       String end = TestUtil.randomSimpleString(random());
       BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray());
       BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray());
-      Query query = new ConstantScoreQuery(FieldCacheRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
+      Query query = new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
       doTestRanges(is, start, end, query, collator);
     }
     
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java
index 0c3a992..57b769c 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyAnalyzer.java
@@ -56,29 +56,6 @@
        secondRangeBeginning, secondRangeEnd);
   }
 
-  // Test using various international locales with accented characters (which
-  // sort differently depending on locale)
-  //
-  // Copied (and slightly modified) from 
-  // org.apache.lucene.search.TestSort.testInternationalSort()
-  //  
-  public void testCollationKeySort() throws Exception {
-    Analyzer usAnalyzer = new ICUCollationKeyAnalyzer
-      (TEST_VERSION_CURRENT, Collator.getInstance(Locale.ROOT));
-    Analyzer franceAnalyzer = new ICUCollationKeyAnalyzer
-      (TEST_VERSION_CURRENT, Collator.getInstance(Locale.FRANCE));
-    Analyzer swedenAnalyzer = new ICUCollationKeyAnalyzer
-      (TEST_VERSION_CURRENT, Collator.getInstance(new Locale("sv", "se")));
-    Analyzer denmarkAnalyzer = new ICUCollationKeyAnalyzer
-      (TEST_VERSION_CURRENT, Collator.getInstance(new Locale("da", "dk")));
-
-    // The ICU Collator and java.text.Collator implementations differ in their
-    // orderings - "BFJHD" is the ordering for the ICU Collator for Locale.ROOT.
-    testCollationKeySort
-    (usAnalyzer, franceAnalyzer, swedenAnalyzer, denmarkAnalyzer, 
-     "BFJHD", "ECAGI", "BJDFH", "BJDHF");
-  }
-  
   public void testThreadSafe() throws Exception {
     int iters = 20 * RANDOM_MULTIPLIER;
     for (int i = 0; i < iters; i++) {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java
index ae0ae9c..f735c12 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/BenchmarkTestCase.java
@@ -31,6 +31,7 @@
 import org.junit.BeforeClass;
 
 /** Base class for all Benchmark unit tests. */
+@SuppressSysoutChecks(bugUrl = "very noisy")
 public abstract class BenchmarkTestCase extends LuceneTestCase {
   private static File WORKDIR;
   
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index 5fc7f82..6d284e6 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -52,17 +52,12 @@
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.SegmentInfos;
 import org.apache.lucene.index.SerialMergeScheduler;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
 
 /**
  * Test very simply that perf tasks - simple algorithms - are doing what they should.
@@ -328,7 +323,7 @@
         "content.source.forever=true",
         "directory=RAMDirectory",
         "doc.reuse.fields=false",
-        "doc.stored=false",
+        "doc.stored=true",
         "doc.tokenized=false",
         "doc.index.props=true",
         "# ----- alg ",
@@ -344,11 +339,11 @@
     Benchmark benchmark = execBenchmark(algLines);
 
     DirectoryReader r = DirectoryReader.open(benchmark.getRunData().getDirectory());
-    SortedDocValues idx = FieldCache.DEFAULT.getTermsIndex(SlowCompositeReaderWrapper.wrap(r), "country");
+    
     final int maxDoc = r.maxDoc();
     assertEquals(1000, maxDoc);
     for(int i=0;i<1000;i++) {
-      assertTrue("doc " + i + " has null country", idx.getOrd(i) != -1);
+      assertNotNull("doc " + i + " has null country", r.document(i).getField("country"));
     }
     r.close();
   }
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
index 6754522..dfca802 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
@@ -42,6 +42,7 @@
 import conf.ConfLoader;
 
 /** Test very simply that perf tasks are parses as expected. */
+@SuppressSysoutChecks(bugUrl = "very noisy")
 public class TestPerfTasksParse extends LuceneTestCase {
 
   static final String NEW_LINE = System.getProperty("line.separator");
diff --git a/lucene/core/build.xml b/lucene/core/build.xml
index 8b7d018..7d80c70 100644
--- a/lucene/core/build.xml
+++ b/lucene/core/build.xml
@@ -31,7 +31,6 @@
   "/>
 
   <property name="forbidden-rue-excludes" value="
-    org/apache/lucene/search/FieldCache$CacheEntry.class
     org/apache/lucene/util/RamUsageEstimator.class
     org/apache/lucene/search/CachingWrapperFilter.class
   "/>
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java
index dca1043..a9ff000 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java
@@ -18,14 +18,13 @@
  */
 
 import org.apache.lucene.index.AtomicReader; // javadocs
-import org.apache.lucene.search.FieldCache; // javadocs
 
 /**
  * Syntactic sugar for encoding doubles as NumericDocValues
  * via {@link Double#doubleToRawLongBits(double)}.
  * <p>
  * Per-document double values can be retrieved via
- * {@link FieldCache#getDoubles(AtomicReader, String, boolean)}.
+ * {@link AtomicReader#getNumericDocValues(String)}.
  * <p>
  * <b>NOTE</b>: In most all cases this will be rather inefficient,
  * requiring eight bytes per document. Consider encoding double
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoubleField.java b/lucene/core/src/java/org/apache/lucene/document/DoubleField.java
index 1a56c7c..d58d4bb 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DoubleField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DoubleField.java
@@ -18,8 +18,8 @@
  */
 
 import org.apache.lucene.analysis.NumericTokenStream; // javadocs
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.search.FieldCache; // javadocs
 import org.apache.lucene.search.NumericRangeFilter; // javadocs
 import org.apache.lucene.search.NumericRangeQuery; // javadocs
 import org.apache.lucene.util.NumericUtils;
@@ -57,7 +57,7 @@
  * NumericRangeFilter}.  To sort according to a
  * <code>DoubleField</code>, use the normal numeric sort types, eg
  * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>DoubleField</code> 
- * values can also be loaded directly from {@link FieldCache}.</p>
+ * values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
  *
  * <p>You may add the same field name as an <code>DoubleField</code> to
  * the same document more than once.  Range querying and
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java
index c4635d8..5260c97 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java
@@ -18,14 +18,13 @@
  */
 
 import org.apache.lucene.index.AtomicReader; // javadocs
-import org.apache.lucene.search.FieldCache; // javadocs
 
 /**
  * Syntactic sugar for encoding floats as NumericDocValues
  * via {@link Float#floatToRawIntBits(float)}.
  * <p>
  * Per-document floating point values can be retrieved via
- * {@link FieldCache#getFloats(AtomicReader, String, boolean)}.
+ * {@link AtomicReader#getNumericDocValues(String)}.
  * <p>
  * <b>NOTE</b>: In most all cases this will be rather inefficient,
  * requiring four bytes per document. Consider encoding floating
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatField.java b/lucene/core/src/java/org/apache/lucene/document/FloatField.java
index 5b326cf..f44a355 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FloatField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FloatField.java
@@ -18,8 +18,8 @@
  */
 
 import org.apache.lucene.analysis.NumericTokenStream; // javadocs
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.search.FieldCache; // javadocs
 import org.apache.lucene.search.NumericRangeFilter; // javadocs
 import org.apache.lucene.search.NumericRangeQuery; // javadocs
 import org.apache.lucene.util.NumericUtils;
@@ -57,7 +57,7 @@
  * NumericRangeFilter}.  To sort according to a
  * <code>FloatField</code>, use the normal numeric sort types, eg
  * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>FloatField</code> 
- * values can also be loaded directly from {@link FieldCache}.</p>
+ * values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
  *
  * <p>You may add the same field name as an <code>FloatField</code> to
  * the same document more than once.  Range querying and
diff --git a/lucene/core/src/java/org/apache/lucene/document/IntField.java b/lucene/core/src/java/org/apache/lucene/document/IntField.java
index 9188f3c..97bd9f1 100644
--- a/lucene/core/src/java/org/apache/lucene/document/IntField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/IntField.java
@@ -18,8 +18,8 @@
  */
 
 import org.apache.lucene.analysis.NumericTokenStream; // javadocs
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.search.FieldCache; // javadocs
 import org.apache.lucene.search.NumericRangeFilter; // javadocs
 import org.apache.lucene.search.NumericRangeQuery; // javadocs
 import org.apache.lucene.util.NumericUtils;
@@ -57,7 +57,7 @@
  * NumericRangeFilter}.  To sort according to a
  * <code>IntField</code>, use the normal numeric sort types, eg
  * {@link org.apache.lucene.search.SortField.Type#INT}. <code>IntField</code> 
- * values can also be loaded directly from {@link FieldCache}.</p>
+ * values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
  *
  * <p>You may add the same field name as an <code>IntField</code> to
  * the same document more than once.  Range querying and
diff --git a/lucene/core/src/java/org/apache/lucene/document/LongField.java b/lucene/core/src/java/org/apache/lucene/document/LongField.java
index 6d9dc3c..3e4dfb8 100644
--- a/lucene/core/src/java/org/apache/lucene/document/LongField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/LongField.java
@@ -18,8 +18,8 @@
  */
 
 import org.apache.lucene.analysis.NumericTokenStream; // javadocs
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.search.FieldCache; // javadocs
 import org.apache.lucene.search.NumericRangeFilter; // javadocs
 import org.apache.lucene.search.NumericRangeQuery; // javadocs
 import org.apache.lucene.util.NumericUtils;
@@ -67,7 +67,7 @@
  * NumericRangeFilter}.  To sort according to a
  * <code>LongField</code>, use the normal numeric sort types, eg
  * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LongField</code> 
- * values can also be loaded directly from {@link FieldCache}.</p>
+ * values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
  *
  * <p>You may add the same field name as an <code>LongField</code> to
  * the same document more than once.  Range querying and
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocValues.java b/lucene/core/src/java/org/apache/lucene/index/DocValues.java
index e894a51..0f6e127 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocValues.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
@@ -159,4 +161,72 @@
       }
     };
   }
+  
+  // some helpers, for transition from fieldcache apis.
+  // as opposed to the AtomicReader apis (which must be strict for consistency), these are lenient
+  
+  /**
+   * Returns NumericDocValues for the reader, or {@link #EMPTY_NUMERIC} if it has none. 
+   */
+  public static NumericDocValues getNumeric(AtomicReader in, String field) throws IOException {
+    NumericDocValues dv = in.getNumericDocValues(field);
+    if (dv == null) {
+      return EMPTY_NUMERIC;
+    } else {
+      return dv;
+    }
+  }
+  
+  /**
+   * Returns BinaryDocValues for the reader, or {@link #EMPTY_BINARY} if it has none. 
+   */
+  public static BinaryDocValues getBinary(AtomicReader in, String field) throws IOException {
+    BinaryDocValues dv = in.getBinaryDocValues(field);
+    if (dv == null) {
+      dv = in.getSortedDocValues(field);
+      if (dv == null) {
+        return EMPTY_BINARY;
+      }
+    }
+    return dv;
+  }
+  
+  /**
+   * Returns SortedDocValues for the reader, or {@link #EMPTY_SORTED} if it has none. 
+   */
+  public static SortedDocValues getSorted(AtomicReader in, String field) throws IOException {
+    SortedDocValues dv = in.getSortedDocValues(field);
+    if (dv == null) {
+      return EMPTY_SORTED;
+    } else {
+      return dv;
+    }
+  }
+  
+  /**
+   * Returns SortedSetDocValues for the reader, or {@link #EMPTY_SORTED_SET} if it has none. 
+   */
+  public static SortedSetDocValues getSortedSet(AtomicReader in, String field) throws IOException {
+    SortedSetDocValues dv = in.getSortedSetDocValues(field);
+    if (dv == null) {
+      SortedDocValues sorted = in.getSortedDocValues(field);
+      if (sorted == null) {
+        return EMPTY_SORTED_SET;
+      }
+      return singleton(sorted);
+    }
+    return dv;
+  }
+  
+  /**
+   * Returns Bits for the reader, or {@link Bits} matching nothing if it has none. 
+   */
+  public static Bits getDocsWithField(AtomicReader in, String field) throws IOException {
+    Bits dv = in.getDocsWithField(field);
+    if (dv == null) {
+      return new Bits.MatchNoBits(in.maxDoc());
+    } else {
+      return dv;
+    }
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
index 36d2251..f2f9f82 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java
@@ -21,7 +21,6 @@
 import java.util.Iterator;
 
 import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -38,8 +37,8 @@
  * to override {@link #numDocs()} as well and vice-versa.
  * <p><b>NOTE</b>: If this {@link FilterAtomicReader} does not change the
  * content the contained reader, you could consider overriding
- * {@link #getCoreCacheKey()} so that {@link FieldCache} and
- * {@link CachingWrapperFilter} share the same entries for this atomic reader
+ * {@link #getCoreCacheKey()} so that
+ * {@link CachingWrapperFilter} shares the same entries for this atomic reader
  * and the wrapped one. {@link #getCombinedCoreAndDeletesKey()} could be
  * overridden as well if the {@link #getLiveDocs() live docs} are not changed
  * either.
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
index cd05335..a2b1a4f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
@@ -426,7 +426,7 @@
     return getContext().leaves();
   }
 
-  /** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find
+  /** Expert: Returns a key for this IndexReader, so CachingWrapperFilter can find
    * it again.
    * This key must not have equals()/hashCode() methods, so &quot;equals&quot; means &quot;identical&quot;. */
   public Object getCoreCacheKey() {
@@ -436,7 +436,7 @@
   }
 
   /** Expert: Returns a key for this IndexReader that also includes deletions,
-   * so FieldCache/CachingWrapperFilter can find it again.
+   * so CachingWrapperFilter can find it again.
    * This key must not have equals()/hashCode() methods, so &quot;equals&quot; means &quot;identical&quot;. */
   public Object getCombinedCoreAndDeletesKey() {
     // Don't call ensureOpen since FC calls this (to evict)
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
index dda589a..03f4043 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
@@ -34,7 +34,7 @@
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.CachingWrapperFilter;
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -361,7 +361,7 @@
 
   // This is necessary so that cloned SegmentReaders (which
   // share the underlying postings data) will map to the
-  // same entry in the FieldCache.  See LUCENE-1579.
+  // same entry for CachingWrapperFilter.  See LUCENE-1579.
   @Override
   public Object getCoreCacheKey() {
     // NOTE: if this ever changes, be sure to fix
@@ -525,7 +525,7 @@
    * sharing the same core are closed.  At this point it 
    * is safe for apps to evict this reader from any caches 
    * keyed on {@link #getCoreCacheKey}.  This is the same 
-   * interface that {@link FieldCache} uses, internally, 
+   * interface that {@link CachingWrapperFilter} uses, internally, 
    * to evict entries.</p>
    * 
    * @lucene.experimental
diff --git a/lucene/core/src/java/org/apache/lucene/index/SingletonSortedSetDocValues.java b/lucene/core/src/java/org/apache/lucene/index/SingletonSortedSetDocValues.java
index 3454b09..c170d74 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SingletonSortedSetDocValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SingletonSortedSetDocValues.java
@@ -23,8 +23,7 @@
  * Exposes multi-valued view over a single-valued instance.
  * <p>
  * This can be used if you want to have one multi-valued implementation
- * against e.g. FieldCache.getDocTermOrds that also works for single-valued 
- * fields.
+ * that works for single or multi-valued types.
  */
 final class SingletonSortedSetDocValues extends SortedSetDocValues {
   private final SortedDocValues in;
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRangeFilter.java
index d296b64..ff6ca17 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRangeFilter.java
@@ -18,15 +18,17 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
 /**
- * A range filter built on top of a cached multi-valued term field (in {@link FieldCache}).
+ * A range filter built on top of a cached multi-valued term field (from {@link AtomicReader#getSortedSetDocValues}).
  * 
- * <p>Like {@link FieldCacheRangeFilter}, this is just a specialized range query versus
+ * <p>Like {@link DocValuesRangeFilter}, this is just a specialized range query versus
  *    using a TermRangeQuery with {@link DocTermOrdsRewriteMethod}: it will only do
  *    two ordinal to term lookups.</p>
  */
@@ -51,7 +53,7 @@
   public abstract DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException;
   
   /**
-   * Creates a BytesRef range filter using {@link FieldCache#getTermsIndex}. This works with all
+   * Creates a BytesRef range filter using {@link AtomicReader#getSortedSetDocValues}. This works with all
    * fields containing zero or one term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
@@ -59,7 +61,7 @@
     return new DocTermOrdsRangeFilter(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-        final SortedSetDocValues docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
+        final SortedSetDocValues docTermOrds = DocValues.getSortedSet(context.reader(), field);
         final long lowerPoint = lowerVal == null ? -1 : docTermOrds.lookupTerm(lowerVal);
         final long upperPoint = upperVal == null ? -1 : docTermOrds.lookupTerm(upperVal);
 
@@ -95,7 +97,7 @@
         
         assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
         
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected final boolean matchDoc(int doc) {
             docTermOrds.setDocument(doc);
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRewriteMethod.java b/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRewriteMethod.java
index 01705a4..eef62fb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRewriteMethod.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocTermOrdsRewriteMethod.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
@@ -83,7 +84,7 @@
      */
     @Override
     public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
-      final SortedSetDocValues docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), query.field);
+      final SortedSetDocValues docTermOrds = DocValues.getSortedSet(context.reader(), query.field);
       // Cannot use FixedBitSet because we require long index (ord):
       final LongBitSet termSet = new LongBitSet(docTermOrds.getValueCount());
       TermsEnum termsEnum = query.getTermsEnum(new Terms() {
@@ -144,7 +145,7 @@
         return null;
       }
       
-      return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+      return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
         @Override
         protected final boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException {
           docTermOrds.setDocument(doc);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheDocIdSet.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesDocIdSet.java
similarity index 94%
rename from lucene/core/src/java/org/apache/lucene/search/FieldCacheDocIdSet.java
rename to lucene/core/src/java/org/apache/lucene/search/DocValuesDocIdSet.java
index 1a4d8e8..3ff533e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheDocIdSet.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesDocIdSet.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.util.OpenBitSet;
 
 /**
- * Base class for DocIdSet to be used with FieldCache. The implementation
+ * Base class for DocIdSet to be used with DocValues. The implementation
  * of its iterator is very stupid and slow if the implementation of the
  * {@link #matchDoc} method is not optimized, as iterators simply increment
  * the document id until {@code matchDoc(int)} returns true. Because of this
@@ -30,12 +30,12 @@
  * I/O.
  * @lucene.internal
  */
-public abstract class FieldCacheDocIdSet extends DocIdSet {
+public abstract class DocValuesDocIdSet extends DocIdSet {
 
   protected final int maxDoc;
   protected final Bits acceptDocs;
 
-  public FieldCacheDocIdSet(int maxDoc, Bits acceptDocs) {
+  public DocValuesDocIdSet(int maxDoc, Bits acceptDocs) {
     this.maxDoc = maxDoc;
     this.acceptDocs = acceptDocs;
   }
@@ -123,7 +123,7 @@
       return new FilteredDocIdSetIterator(((DocIdSet) acceptDocs).iterator()) {
         @Override
         protected boolean match(int doc) {
-          return FieldCacheDocIdSet.this.matchDoc(doc);
+          return DocValuesDocIdSet.this.matchDoc(doc);
         }
       };
     } else {
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
similarity index 66%
rename from lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
rename to lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
index c262496..336bda2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
@@ -24,19 +24,22 @@
 import org.apache.lucene.document.LongField; // for javadocs
 import org.apache.lucene.index.AtomicReader; // for javadocs
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 
 /**
- * A range filter built on top of a cached single term field (in {@link FieldCache}).
+ * A range filter built on top of numeric doc values field 
+ * (from {@link AtomicReader#getNumericDocValues(String)}).
  * 
- * <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used.
- * Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache,
+ * <p>{@code DocValuesRangeFilter} builds a single cache for the field the first time it is used.
+ * Each subsequent {@code DocValuesRangeFilter} on the same field then reuses this cache,
  * even if the range itself changes. 
  * 
- * <p>This means that {@code FieldCacheRangeFilter} is much faster (sometimes more than 100x as fast) 
+ * <p>This means that {@code DocValuesRangeFilter} is much faster (sometimes more than 100x as fast) 
  * as building a {@link TermRangeFilter}, if using a {@link #newStringRange}.
  * However, if the range never changes it is slower (around 2x as slow) than building
  * a CachingWrapperFilter on top of a single {@link TermRangeFilter}.
@@ -47,9 +50,10 @@
  * LongField} or {@link DoubleField}. But
  * it has the problem that it only works with exact one value/document (see below).
  *
- * <p>As with all {@link FieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for 
+ * <p>As with all {@link AtomicReader#getNumericDocValues} based functionality, 
+ * {@code DocValuesRangeFilter} is only valid for 
  * fields which exact one term for each document (except for {@link #newStringRange}
- * where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges
+ * where 0 terms are also allowed). Due to historical reasons, for numeric ranges
  * all terms that do not have a numeric value, 0 is assumed.
  *
  * <p>Thus it works on dates, prices and other single value fields but will not work on
@@ -57,20 +61,18 @@
  * there is only a single term. 
  *
  * <p>This class does not have an constructor, use one of the static factory methods available,
- * that create a correct instance for different data types supported by {@link FieldCache}.
+ * that create a correct instance for different data types.
  */
-
-public abstract class FieldCacheRangeFilter<T> extends Filter {
+// TODO: use docsWithField to handle empty properly
+public abstract class DocValuesRangeFilter<T> extends Filter {
   final String field;
-  final FieldCache.Parser parser;
   final T lowerVal;
   final T upperVal;
   final boolean includeLower;
   final boolean includeUpper;
   
-  private FieldCacheRangeFilter(String field, FieldCache.Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
+  private DocValuesRangeFilter(String field, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
     this.field = field;
-    this.parser = parser;
     this.lowerVal = lowerVal;
     this.upperVal = upperVal;
     this.includeLower = includeLower;
@@ -82,15 +84,15 @@
   public abstract DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException;
 
   /**
-   * Creates a string range filter using {@link FieldCache#getTermsIndex}. This works with all
+   * Creates a string range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
    * fields containing zero or one term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<String>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-        final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
+        final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
         final int lowerPoint = lowerVal == null ? -1 : fcsi.lookupTerm(new BytesRef(lowerVal));
         final int upperPoint = upperVal == null ? -1 : fcsi.lookupTerm(new BytesRef(upperVal));
 
@@ -126,7 +128,7 @@
         
         assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
         
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected final boolean matchDoc(int doc) {
             final int docOrd = fcsi.getOrd(doc);
@@ -138,16 +140,16 @@
   }
   
   /**
-   * Creates a BytesRef range filter using {@link FieldCache#getTermsIndex}. This works with all
+   * Creates a BytesRef range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
    * fields containing zero or one term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
   // TODO: bogus that newStringRange doesnt share this code... generics hell
-  public static FieldCacheRangeFilter<BytesRef> newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<BytesRef>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<BytesRef> newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<BytesRef>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-        final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
+        final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
         final int lowerPoint = lowerVal == null ? -1 : fcsi.lookupTerm(lowerVal);
         final int upperPoint = upperVal == null ? -1 : fcsi.lookupTerm(upperVal);
 
@@ -183,7 +185,7 @@
         
         assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
         
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected final boolean matchDoc(int doc) {
             final int docOrd = fcsi.getOrd(doc);
@@ -195,21 +197,12 @@
   }
 
   /**
-   * Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,boolean)}. This works with all
+   * Creates a numeric range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
    * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
-    return newIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)}. This works with all
-   * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<Integer>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
         final int inclusiveLowerPoint, inclusiveUpperPoint;
@@ -233,11 +226,11 @@
         if (inclusiveLowerPoint > inclusiveUpperPoint)
           return null;
         
-        final FieldCache.Ints values = FieldCache.DEFAULT.getInts(context.reader(), field, (FieldCache.IntParser) parser, false);
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected boolean matchDoc(int doc) {
-            final int value = values.get(doc);
+            final int value = (int) values.get(doc);
             return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
           }
         };
@@ -246,21 +239,12 @@
   }
   
   /**
-   * Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,boolean)}. This works with all
+   * Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
    * long fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
-    return newLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)}. This works with all
-   * long fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<Long>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
         final long inclusiveLowerPoint, inclusiveUpperPoint;
@@ -284,8 +268,8 @@
         if (inclusiveLowerPoint > inclusiveUpperPoint)
           return null;
         
-        final FieldCache.Longs values = FieldCache.DEFAULT.getLongs(context.reader(), field, (FieldCache.LongParser) parser, false);
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected boolean matchDoc(int doc) {
             final long value = values.get(doc);
@@ -297,21 +281,12 @@
   }
   
   /**
-   * Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,boolean)}. This works with all
+   * Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
    * float fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
-    return newFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)}. This works with all
-   * float fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<Float>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
         // we transform the floating point numbers to sortable integers
@@ -339,11 +314,11 @@
         if (inclusiveLowerPoint > inclusiveUpperPoint)
           return null;
         
-        final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), field, (FieldCache.FloatParser) parser, false);
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected boolean matchDoc(int doc) {
-            final float value = values.get(doc);
+            final float value = Float.intBitsToFloat((int)values.get(doc));
             return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
           }
         };
@@ -352,21 +327,12 @@
   }
   
   /**
-   * Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,boolean)}. This works with all
+   * Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
    * double fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
-    return newDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)}. This works with all
-   * double fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
+    return new DocValuesRangeFilter<Double>(field, lowerVal, upperVal, includeLower, includeUpper) {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
         // we transform the floating point numbers to sortable integers
@@ -394,12 +360,12 @@
         if (inclusiveLowerPoint > inclusiveUpperPoint)
           return null;
         
-        final FieldCache.Doubles values = FieldCache.DEFAULT.getDoubles(context.reader(), field, (FieldCache.DoubleParser) parser, false);
+        final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
         // ignore deleted docs if range doesn't contain 0
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
           @Override
           protected boolean matchDoc(int doc) {
-            final double value = values.get(doc);
+            final double value = Double.longBitsToDouble(values.get(doc));
             return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
           }
         };
@@ -422,8 +388,8 @@
   @SuppressWarnings({"rawtypes"})
   public final boolean equals(Object o) {
     if (this == o) return true;
-    if (!(o instanceof FieldCacheRangeFilter)) return false;
-    FieldCacheRangeFilter other = (FieldCacheRangeFilter) o;
+    if (!(o instanceof DocValuesRangeFilter)) return false;
+    DocValuesRangeFilter other = (DocValuesRangeFilter) o;
 
     if (!this.field.equals(other.field)
         || this.includeLower != other.includeLower
@@ -431,7 +397,6 @@
     ) { return false; }
     if (this.lowerVal != null ? !this.lowerVal.equals(other.lowerVal) : other.lowerVal != null) return false;
     if (this.upperVal != null ? !this.upperVal.equals(other.upperVal) : other.upperVal != null) return false;
-    if (this.parser != null ? !this.parser.equals(other.parser) : other.parser != null) return false;
     return true;
   }
   
@@ -441,7 +406,6 @@
     h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204;
     h = (h << 1) | (h >>> 31);  // rotate to distinguish lower from upper
     h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163;
-    h ^= (parser != null) ? parser.hashCode() : -1572457324;
     h ^= (includeLower ? 1549299360 : -365038026) ^ (includeUpper ? 1721088258 : 1948649653);
     return h;
   }
@@ -460,7 +424,4 @@
 
   /** Returns the upper value of this range filter */
   public T getUpperVal() { return upperVal; }
-  
-  /** Returns the current numeric parser ({@code null} for {@code T} is {@code String}} */
-  public FieldCache.Parser getParser() { return parser; }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRewriteMethod.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
similarity index 86%
rename from lucene/core/src/java/org/apache/lucene/search/FieldCacheRewriteMethod.java
rename to lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
index fb5f5d7..fffcfec 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRewriteMethod.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Terms;
@@ -28,28 +29,28 @@
 import org.apache.lucene.util.LongBitSet;
 
 /**
- * Rewrites MultiTermQueries into a filter, using the FieldCache for term enumeration.
+ * Rewrites MultiTermQueries into a filter, using DocValues for term enumeration.
  * <p>
  * This can be used to perform these queries against an unindexed docvalues field.
  * @lucene.experimental
  */
-public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod {
+public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
   
   @Override
   public Query rewrite(IndexReader reader, MultiTermQuery query) {
-    Query result = new ConstantScoreQuery(new MultiTermQueryFieldCacheWrapperFilter(query));
+    Query result = new ConstantScoreQuery(new MultiTermQueryDocValuesWrapperFilter(query));
     result.setBoost(query.getBoost());
     return result;
   }
   
-  static class MultiTermQueryFieldCacheWrapperFilter extends Filter {
+  static class MultiTermQueryDocValuesWrapperFilter extends Filter {
     
     protected final MultiTermQuery query;
     
     /**
      * Wrap a {@link MultiTermQuery} as a Filter.
      */
-    protected MultiTermQueryFieldCacheWrapperFilter(MultiTermQuery query) {
+    protected MultiTermQueryDocValuesWrapperFilter(MultiTermQuery query) {
       this.query = query;
     }
     
@@ -64,7 +65,7 @@
       if (o==this) return true;
       if (o==null) return false;
       if (this.getClass().equals(o.getClass())) {
-        return this.query.equals( ((MultiTermQueryFieldCacheWrapperFilter)o).query );
+        return this.query.equals( ((MultiTermQueryDocValuesWrapperFilter)o).query );
       }
       return false;
     }
@@ -83,7 +84,7 @@
      */
     @Override
     public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
-      final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), query.field);
+      final SortedDocValues fcsi = DocValues.getSorted(context.reader(), query.field);
       // Cannot use FixedBitSet because we require long index (ord):
       final LongBitSet termSet = new LongBitSet(fcsi.getValueCount());
       TermsEnum termsEnum = query.getTermsEnum(new Terms() {
@@ -147,7 +148,7 @@
         return null;
       }
       
-      return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+      return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
         @Override
         protected final boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException {
           int ord = fcsi.getOrd(doc);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
similarity index 83%
rename from lucene/core/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java
rename to lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
index dbdd181..86d7011 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum; // javadoc @link
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.SortedDocValues;
@@ -41,17 +42,6 @@
  * also have different performance characteristics, as
  * described below.
  * 
- * <p/>
- * 
- * The first invocation of this filter on a given field will
- * be slower, since a {@link SortedDocValues} must be
- * created.  Subsequent invocations using the same field
- * will re-use this cache.  However, as with all
- * functionality based on {@link FieldCache}, persistent RAM
- * is consumed to hold the cache, and is not freed until the
- * {@link IndexReader} is closed.  In contrast, TermsFilter
- * has no persistent RAM consumption.
- * 
  * 
  * <p/>
  * 
@@ -97,29 +87,25 @@
  * Which filter is best is very application dependent.
  */
 
-public class FieldCacheTermsFilter extends Filter {
+public class DocValuesTermsFilter extends Filter {
   private String field;
   private BytesRef[] terms;
 
-  public FieldCacheTermsFilter(String field, BytesRef... terms) {
+  public DocValuesTermsFilter(String field, BytesRef... terms) {
     this.field = field;
     this.terms = terms;
   }
 
-  public FieldCacheTermsFilter(String field, String... terms) {
+  public DocValuesTermsFilter(String field, String... terms) {
     this.field = field;
     this.terms = new BytesRef[terms.length];
     for (int i = 0; i < terms.length; i++)
       this.terms[i] = new BytesRef(terms[i]);
   }
 
-  public FieldCache getFieldCache() {
-    return FieldCache.DEFAULT;
-  }
-
   @Override
   public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-    final SortedDocValues fcsi = getFieldCache().getTermsIndex(context.reader(), field);
+    final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
     final FixedBitSet bits = new FixedBitSet(fcsi.getValueCount());
     for (int i=0;i<terms.length;i++) {
       int ord = fcsi.lookupTerm(terms[i]);
@@ -127,7 +113,7 @@
         bits.set(ord);
       }
     }
-    return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+    return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
       @Override
       protected final boolean matchDoc(int doc) {
         int ord = fcsi.getOrd(doc);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCache.java b/lucene/core/src/java/org/apache/lucene/search/FieldCache.java
deleted file mode 100644
index f34b98c..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCache.java
+++ /dev/null
@@ -1,571 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.io.PrintStream;
-
-import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.AtomicReader;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocTermOrds;
-import org.apache.lucene.index.IndexReader; // javadocs
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.RamUsageEstimator;
-
-/**
- * Expert: Maintains caches of term values.
- *
- * <p>Created: May 19, 2004 11:13:14 AM
- *
- * @since   lucene 1.4
- * @see org.apache.lucene.util.FieldCacheSanityChecker
- *
- * @lucene.internal
- */
-public interface FieldCache {
-
-  /** Field values as 32-bit signed integers */
-  public static abstract class Ints {
-    /** Return an integer representation of this field's value. */
-    public abstract int get(int docID);
-    
-    /** Zero value for every document */
-    public static final Ints EMPTY = new Ints() {
-      @Override
-      public int get(int docID) {
-        return 0;
-      }
-    };
-  }
-
-  /** Field values as 64-bit signed long integers */
-  public static abstract class Longs {
-    /** Return an long representation of this field's value. */
-    public abstract long get(int docID);
-    
-    /** Zero value for every document */
-    public static final Longs EMPTY = new Longs() {
-      @Override
-      public long get(int docID) {
-        return 0;
-      }
-    };
-  }
-
-  /** Field values as 32-bit floats */
-  public static abstract class Floats {
-    /** Return an float representation of this field's value. */
-    public abstract float get(int docID);
-    
-    /** Zero value for every document */
-    public static final Floats EMPTY = new Floats() {
-      @Override
-      public float get(int docID) {
-        return 0;
-      }
-    };
-  }
-
-  /** Field values as 64-bit doubles */
-  public static abstract class Doubles {
-    /** Return an double representation of this field's value. */
-    public abstract double get(int docID);
-    
-    /** Zero value for every document */
-    public static final Doubles EMPTY = new Doubles() {
-      @Override
-      public double get(int docID) {
-        return 0;
-      }
-    };
-  }
-
-  /**
-   * Placeholder indicating creation of this cache is currently in-progress.
-   */
-  public static final class CreationPlaceholder {
-    Object value;
-  }
-
-  /**
-   * Marker interface as super-interface to all parsers. It
-   * is used to specify a custom parser to {@link
-   * SortField#SortField(String, FieldCache.Parser)}.
-   */
-  public interface Parser {
-    
-    /**
-     * Pulls a {@link TermsEnum} from the given {@link Terms}. This method allows certain parsers
-     * to filter the actual TermsEnum before the field cache is filled.
-     * 
-     * @param terms the {@link Terms} instance to create the {@link TermsEnum} from.
-     * @return a possibly filtered {@link TermsEnum} instance, this method must not return <code>null</code>.
-     * @throws IOException if an {@link IOException} occurs
-     */
-    public TermsEnum termsEnum(Terms terms) throws IOException;
-  }
-
-  /** Interface to parse ints from document fields.
-   * @see FieldCache#getInts(AtomicReader, String, FieldCache.IntParser, boolean)
-   */
-  public interface IntParser extends Parser {
-    /** Return an integer representation of this field's value. */
-    public int parseInt(BytesRef term);
-  }
-
-  /** Interface to parse floats from document fields.
-   * @see FieldCache#getFloats(AtomicReader, String, FieldCache.FloatParser, boolean)
-   */
-  public interface FloatParser extends Parser {
-    /** Return an float representation of this field's value. */
-    public float parseFloat(BytesRef term);
-  }
-
-  /** Interface to parse long from document fields.
-   * @see FieldCache#getLongs(AtomicReader, String, FieldCache.LongParser, boolean)
-   */
-  public interface LongParser extends Parser {
-    /** Return an long representation of this field's value. */
-    public long parseLong(BytesRef term);
-  }
-
-  /** Interface to parse doubles from document fields.
-   * @see FieldCache#getDoubles(AtomicReader, String, FieldCache.DoubleParser, boolean)
-   */
-  public interface DoubleParser extends Parser {
-    /** Return an double representation of this field's value. */
-    public double parseDouble(BytesRef term);
-  }
-
-  /** Expert: The cache used internally by sorting and range query classes. */
-  public static FieldCache DEFAULT = new FieldCacheImpl();
-
-  /**
-   * A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
-   * via {@link IntField}/{@link NumericTokenStream}.
-   */
-  public static final IntParser NUMERIC_UTILS_INT_PARSER=new IntParser(){
-    @Override
-    public int parseInt(BytesRef term) {
-      return NumericUtils.prefixCodedToInt(term);
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER"; 
-    }
-  };
-
-  /**
-   * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
-   * via {@link FloatField}/{@link NumericTokenStream}.
-   */
-  public static final FloatParser NUMERIC_UTILS_FLOAT_PARSER=new FloatParser(){
-    @Override
-    public float parseFloat(BytesRef term) {
-      return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term));
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER"; 
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
-    }
-  };
-
-  /**
-   * A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
-   * via {@link LongField}/{@link NumericTokenStream}.
-   */
-  public static final LongParser NUMERIC_UTILS_LONG_PARSER = new LongParser(){
-    @Override
-    public long parseLong(BytesRef term) {
-      return NumericUtils.prefixCodedToLong(term);
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER"; 
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
-    }
-  };
-
-  /**
-   * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
-   * via {@link DoubleField}/{@link NumericTokenStream}.
-   */
-  public static final DoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new DoubleParser(){
-    @Override
-    public double parseDouble(BytesRef term) {
-      return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term));
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER"; 
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
-    }
-  };
-  
-  /** Checks the internal cache for an appropriate entry, and if none is found,
-   *  reads the terms in <code>field</code> and returns a bit set at the size of
-   *  <code>reader.maxDoc()</code>, with turned on bits for each docid that 
-   *  does have a value for this field.
-   */
-  public Bits getDocsWithField(AtomicReader reader, String field) throws IOException;
-
-  /**
-   * Returns an {@link Ints} over the values found in documents in the given
-   * field.
-   *
-   * @see #getInts(AtomicReader, String, IntParser, boolean)
-   */
-  public Ints getInts(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns an {@link Ints} over the values found in documents in the given
-   * field. If the field was indexed as {@link NumericDocValuesField}, it simply
-   * uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
-   * Otherwise, it checks the internal cache for an appropriate entry, and if
-   * none is found, reads the terms in <code>field</code> as ints and returns
-   * an array of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * 
-   * @param reader
-   *          Used to get field values.
-   * @param field
-   *          Which field contains the longs.
-   * @param parser
-   *          Computes int for string values. May be {@code null} if the
-   *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link IntField}.
-   * @param setDocsWithField
-   *          If true then {@link #getDocsWithField} will also be computed and
-   *          stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException
-   *           If any error occurs.
-   */
-  public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Floats} over the values found in documents in the given
-   * field.
-   *
-   * @see #getFloats(AtomicReader, String, FloatParser, boolean)
-   */
-  public Floats getFloats(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Floats} over the values found in documents in the given
-   * field. If the field was indexed as {@link NumericDocValuesField}, it simply
-   * uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
-   * Otherwise, it checks the internal cache for an appropriate entry, and if
-   * none is found, reads the terms in <code>field</code> as floats and returns
-   * an array of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * 
-   * @param reader
-   *          Used to get field values.
-   * @param field
-   *          Which field contains the floats.
-   * @param parser
-   *          Computes float for string values. May be {@code null} if the
-   *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link FloatField}.
-   * @param setDocsWithField
-   *          If true then {@link #getDocsWithField} will also be computed and
-   *          stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException
-   *           If any error occurs.
-   */
-  public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Longs} over the values found in documents in the given
-   * field.
-   *
-   * @see #getLongs(AtomicReader, String, LongParser, boolean)
-   */
-  public Longs getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Longs} over the values found in documents in the given
-   * field. If the field was indexed as {@link NumericDocValuesField}, it simply
-   * uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
-   * Otherwise, it checks the internal cache for an appropriate entry, and if
-   * none is found, reads the terms in <code>field</code> as longs and returns
-   * an array of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * 
-   * @param reader
-   *          Used to get field values.
-   * @param field
-   *          Which field contains the longs.
-   * @param parser
-   *          Computes long for string values. May be {@code null} if the
-   *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link LongField}.
-   * @param setDocsWithField
-   *          If true then {@link #getDocsWithField} will also be computed and
-   *          stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException
-   *           If any error occurs.
-   */
-  public Longs getLongs(AtomicReader reader, String field, LongParser parser, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Doubles} over the values found in documents in the given
-   * field.
-   *
-   * @see #getDoubles(AtomicReader, String, DoubleParser, boolean)
-   */
-  public Doubles getDoubles(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /**
-   * Returns a {@link Doubles} over the values found in documents in the given
-   * field. If the field was indexed as {@link NumericDocValuesField}, it simply
-   * uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
-   * Otherwise, it checks the internal cache for an appropriate entry, and if
-   * none is found, reads the terms in <code>field</code> as doubles and returns
-   * an array of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * 
-   * @param reader
-   *          Used to get field values.
-   * @param field
-   *          Which field contains the longs.
-   * @param parser
-   *          Computes double for string values. May be {@code null} if the
-   *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link DoubleField}.
-   * @param setDocsWithField
-   *          If true then {@link #getDocsWithField} will also be computed and
-   *          stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException
-   *           If any error occurs.
-   */
-  public Doubles getDoubles(AtomicReader reader, String field, DoubleParser parser, boolean setDocsWithField) throws IOException;
-
-  /** Checks the internal cache for an appropriate entry, and if none
-   * is found, reads the term values in <code>field</code>
-   * and returns a {@link BinaryDocValues} instance, providing a
-   * method to retrieve the term (as a BytesRef) per document.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the strings.
-   * @param setDocsWithField  If true then {@link #getDocsWithField} will
-   *        also be computed and stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public BinaryDocValues getTerms(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /** Expert: just like {@link #getTerms(AtomicReader,String,boolean)},
-   *  but you can specify whether more RAM should be consumed in exchange for
-   *  faster lookups (default is "true").  Note that the
-   *  first call for a given reader and field "wins",
-   *  subsequent calls will share the same cache entry. */
-  public BinaryDocValues getTerms(AtomicReader reader, String field, boolean setDocsWithField, float acceptableOverheadRatio) throws IOException;
-
-  /** Checks the internal cache for an appropriate entry, and if none
-   * is found, reads the term values in <code>field</code>
-   * and returns a {@link SortedDocValues} instance,
-   * providing methods to retrieve sort ordinals and terms
-   * (as a ByteRef) per document.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the strings.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public SortedDocValues getTermsIndex(AtomicReader reader, String field) throws IOException;
-
-  /** Expert: just like {@link
-   *  #getTermsIndex(AtomicReader,String)}, but you can specify
-   *  whether more RAM should be consumed in exchange for
-   *  faster lookups (default is "true").  Note that the
-   *  first call for a given reader and field "wins",
-   *  subsequent calls will share the same cache entry. */
-  public SortedDocValues getTermsIndex(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException;
-
-  /**
-   * Checks the internal cache for an appropriate entry, and if none is found, reads the term values
-   * in <code>field</code> and returns a {@link DocTermOrds} instance, providing a method to retrieve
-   * the terms (as ords) per document.
-   *
-   * @param reader  Used to build a {@link DocTermOrds} instance
-   * @param field   Which field contains the strings.
-   * @return a {@link DocTermOrds} instance
-   * @throws IOException  If any error occurs.
-   */
-  public SortedSetDocValues getDocTermOrds(AtomicReader reader, String field) throws IOException;
-
-  /**
-   * EXPERT: A unique Identifier/Description for each item in the FieldCache. 
-   * Can be useful for logging/debugging.
-   * @lucene.experimental
-   */
-  public final class CacheEntry {
-
-    private final Object readerKey;
-    private final String fieldName;
-    private final Class<?> cacheType;
-    private final Object custom;
-    private final Object value;
-    private String size;
-
-    public CacheEntry(Object readerKey, String fieldName,
-                      Class<?> cacheType,
-                      Object custom,
-                      Object value) {
-      this.readerKey = readerKey;
-      this.fieldName = fieldName;
-      this.cacheType = cacheType;
-      this.custom = custom;
-      this.value = value;
-    }
-
-    public Object getReaderKey() {
-      return readerKey;
-    }
-
-    public String getFieldName() {
-      return fieldName;
-    }
-
-    public Class<?> getCacheType() {
-      return cacheType;
-    }
-
-    public Object getCustom() {
-      return custom;
-    }
-
-    public Object getValue() {
-      return value;
-    }
-
-    /** 
-     * Computes (and stores) the estimated size of the cache Value 
-     * @see #getEstimatedSize
-     */
-    public void estimateSize() {
-      long bytesUsed = RamUsageEstimator.sizeOf(getValue());
-      size = RamUsageEstimator.humanReadableUnits(bytesUsed);
-    }
-
-    /**
-     * The most recently estimated size of the value, null unless 
-     * estimateSize has been called.
-     */
-    public String getEstimatedSize() {
-      return size;
-    }
-    
-    @Override
-    public String toString() {
-      StringBuilder b = new StringBuilder();
-      b.append("'").append(getReaderKey()).append("'=>");
-      b.append("'").append(getFieldName()).append("',");
-      b.append(getCacheType()).append(",").append(getCustom());
-      b.append("=>").append(getValue().getClass().getName()).append("#");
-      b.append(System.identityHashCode(getValue()));
-      
-      String s = getEstimatedSize();
-      if(null != s) {
-        b.append(" (size =~ ").append(s).append(')');
-      }
-
-      return b.toString();
-    }
-  }
-  
-  /**
-   * EXPERT: Generates an array of CacheEntry objects representing all items 
-   * currently in the FieldCache.
-   * <p>
-   * NOTE: These CacheEntry objects maintain a strong reference to the 
-   * Cached Values.  Maintaining references to a CacheEntry the AtomicIndexReader 
-   * associated with it has garbage collected will prevent the Value itself
-   * from being garbage collected when the Cache drops the WeakReference.
-   * </p>
-   * @lucene.experimental
-   */
-  public CacheEntry[] getCacheEntries();
-
-  /**
-   * <p>
-   * EXPERT: Instructs the FieldCache to forcibly expunge all entries 
-   * from the underlying caches.  This is intended only to be used for 
-   * test methods as a way to ensure a known base state of the Cache 
-   * (with out needing to rely on GC to free WeakReferences).  
-   * It should not be relied on for "Cache maintenance" in general 
-   * application code.
-   * </p>
-   * @lucene.experimental
-   */
-  public void purgeAllCaches();
-
-  /**
-   * Expert: drops all cache entries associated with this
-   * reader {@link IndexReader#getCoreCacheKey}.  NOTE: this cache key must
-   * precisely match the reader that the cache entry is
-   * keyed on. If you pass a top-level reader, it usually
-   * will have no effect as Lucene now caches at the segment
-   * reader level.
-   */
-  public void purgeByCacheKey(Object coreCacheKey);
-
-  /**
-   * If non-null, FieldCacheImpl will warn whenever
-   * entries are created that are not sane according to
-   * {@link org.apache.lucene.util.FieldCacheSanityChecker}.
-   */
-  public void setInfoStream(PrintStream stream);
-
-  /** counterpart of {@link #setInfoStream(PrintStream)} */
-  public PrintStream getInfoStream();
-}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
index 5c81ab5..64fe078 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
@@ -19,13 +19,12 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.FieldCache.DoubleParser;
-import org.apache.lucene.search.FieldCache.FloatParser;
-import org.apache.lucene.search.FieldCache.IntParser;
-import org.apache.lucene.search.FieldCache.LongParser;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
@@ -82,7 +81,7 @@
  *       when the search is switching to the next segment.
  *       You may need to update internal state of the
  *       comparator, for example retrieving new values from
- *       the {@link FieldCache}.
+ *       DocValues.
  *
  *  <li> {@link #value} Return the sort value stored in
  *       the specified slot.  This is only called at the end
@@ -236,7 +235,7 @@
     @Override
     public FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
       if (missingValue != null) {
-        docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
+        docsWithField = DocValues.getDocsWithField(context.reader(), field);
         // optimization to remove unneeded checks on the bit interface:
         if (docsWithField instanceof Bits.MatchAllBits) {
           docsWithField = null;
@@ -249,18 +248,16 @@
   }
 
   /** Parses field's values as double (using {@link
-   *  FieldCache#getDoubles} and sorts by ascending value */
+   *  AtomicReader#getNumericDocValues} and sorts by ascending value */
   public static final class DoubleComparator extends NumericComparator<Double> {
     private final double[] values;
-    private final DoubleParser parser;
-    private FieldCache.Doubles currentReaderValues;
+    private NumericDocValues currentReaderValues;
     private double bottom;
     private double topValue;
 
-    DoubleComparator(int numHits, String field, FieldCache.Parser parser, Double missingValue) {
+    DoubleComparator(int numHits, String field, Double missingValue) {
       super(field, missingValue);
       values = new double[numHits];
-      this.parser = (DoubleParser) parser;
     }
 
     @Override
@@ -270,7 +267,7 @@
 
     @Override
     public int compareBottom(int doc) {
-      double v2 = currentReaderValues.get(doc);
+      double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -282,7 +279,7 @@
 
     @Override
     public void copy(int slot, int doc) {
-      double v2 = currentReaderValues.get(doc);
+      double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -294,9 +291,7 @@
 
     @Override
     public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader(), field, parser, missingValue != null);
+      currentReaderValues = DocValues.getNumeric(context.reader(), field);
       return super.setNextReader(context);
     }
     
@@ -317,7 +312,7 @@
 
     @Override
     public int compareTop(int doc) {
-      double docValue = currentReaderValues.get(doc);
+      double docValue = Double.longBitsToDouble(currentReaderValues.get(doc));
       // Test for docValue == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
@@ -328,18 +323,16 @@
   }
 
   /** Parses field's values as float (using {@link
-   *  FieldCache#getFloats} and sorts by ascending value */
+   *  AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
   public static final class FloatComparator extends NumericComparator<Float> {
     private final float[] values;
-    private final FloatParser parser;
-    private FieldCache.Floats currentReaderValues;
+    private NumericDocValues currentReaderValues;
     private float bottom;
     private float topValue;
 
-    FloatComparator(int numHits, String field, FieldCache.Parser parser, Float missingValue) {
+    FloatComparator(int numHits, String field, Float missingValue) {
       super(field, missingValue);
       values = new float[numHits];
-      this.parser = (FloatParser) parser;
     }
     
     @Override
@@ -350,7 +343,7 @@
     @Override
     public int compareBottom(int doc) {
       // TODO: are there sneaky non-branch ways to compute sign of float?
-      float v2 = currentReaderValues.get(doc);
+      float v2 = Float.intBitsToFloat((int)currentReaderValues.get(doc));
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -362,7 +355,7 @@
 
     @Override
     public void copy(int slot, int doc) {
-      float v2 = currentReaderValues.get(doc);
+      float v2 =  Float.intBitsToFloat((int)currentReaderValues.get(doc));
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -374,9 +367,7 @@
 
     @Override
     public FieldComparator<Float> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader(), field, parser, missingValue != null);
+      currentReaderValues = DocValues.getNumeric(context.reader(), field);
       return super.setNextReader(context);
     }
     
@@ -397,7 +388,7 @@
 
     @Override
     public int compareTop(int doc) {
-      float docValue = currentReaderValues.get(doc);
+      float docValue = Float.intBitsToFloat((int)currentReaderValues.get(doc));
       // Test for docValue == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
@@ -408,18 +399,16 @@
   }
 
   /** Parses field's values as int (using {@link
-   *  FieldCache#getInts} and sorts by ascending value */
+   *  AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
   public static final class IntComparator extends NumericComparator<Integer> {
     private final int[] values;
-    private final IntParser parser;
-    private FieldCache.Ints currentReaderValues;
+    private NumericDocValues currentReaderValues;
     private int bottom;                           // Value of bottom of queue
     private int topValue;
 
-    IntComparator(int numHits, String field, FieldCache.Parser parser, Integer missingValue) {
+    IntComparator(int numHits, String field, Integer missingValue) {
       super(field, missingValue);
       values = new int[numHits];
-      this.parser = (IntParser) parser;
     }
         
     @Override
@@ -429,7 +418,7 @@
 
     @Override
     public int compareBottom(int doc) {
-      int v2 = currentReaderValues.get(doc);
+      int v2 = (int) currentReaderValues.get(doc);
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -441,7 +430,7 @@
 
     @Override
     public void copy(int slot, int doc) {
-      int v2 = currentReaderValues.get(doc);
+      int v2 = (int) currentReaderValues.get(doc);
       // Test for v2 == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
@@ -453,9 +442,7 @@
 
     @Override
     public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getInts(context.reader(), field, parser, missingValue != null);
+      currentReaderValues = DocValues.getNumeric(context.reader(), field);
       return super.setNextReader(context);
     }
     
@@ -476,7 +463,7 @@
 
     @Override
     public int compareTop(int doc) {
-      int docValue = currentReaderValues.get(doc);
+      int docValue = (int) currentReaderValues.get(doc);
       // Test for docValue == 0 to save Bits.get method call for
       // the common case (doc has value and value is non-zero):
       if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
@@ -487,18 +474,16 @@
   }
 
   /** Parses field's values as long (using {@link
-   *  FieldCache#getLongs} and sorts by ascending value */
+   *  AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
   public static final class LongComparator extends NumericComparator<Long> {
     private final long[] values;
-    private final LongParser parser;
-    private FieldCache.Longs currentReaderValues;
+    private NumericDocValues currentReaderValues;
     private long bottom;
     private long topValue;
 
-    LongComparator(int numHits, String field, FieldCache.Parser parser, Long missingValue) {
+    LongComparator(int numHits, String field, Long missingValue) {
       super(field, missingValue);
       values = new long[numHits];
-      this.parser = (LongParser) parser;
     }
 
     @Override
@@ -534,9 +519,7 @@
 
     @Override
     public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader(), field, parser, missingValue != null);
+      currentReaderValues = DocValues.getNumeric(context.reader(), field);
       return super.setNextReader(context);
     }
     
@@ -712,7 +695,7 @@
    *  ordinals.  This is functionally equivalent to {@link
    *  org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the string
    *  to their relative ordinal positions (using the index
-   *  returned by {@link FieldCache#getTermsIndex}), and
+   *  returned by {@link AtomicReader#getSortedDocValues(String)}), and
    *  does most comparisons using the ordinals.  For medium
    *  to large results, this comparator will be much faster
    *  than {@link org.apache.lucene.search.FieldComparator.TermValComparator}.  For very small
@@ -856,7 +839,7 @@
     
     /** Retrieves the SortedDocValues for the field in this segment */
     protected SortedDocValues getSortedDocValues(AtomicReaderContext context, String field) throws IOException {
-      return FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
+      return DocValues.getSorted(context.reader(), field);
     }
     
     @Override
@@ -1029,8 +1012,8 @@
 
     @Override
     public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
-      docTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, true);
-      docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
+      docTerms = DocValues.getBinary(context.reader(), field);
+      docsWithField = DocValues.getDocsWithField(context.reader(), field);
       return this;
     }
     
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueFilter.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueFilter.java
index 0770da1..045b0b2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldValueFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueFilter.java
@@ -18,15 +18,17 @@
  */
 import java.io.IOException;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.Bits.MatchAllBits;
 import org.apache.lucene.util.Bits.MatchNoBits;
 
 /**
  * A {@link Filter} that accepts all documents that have one or more values in a
- * given field. This {@link Filter} request {@link Bits} from the
- * {@link FieldCache} and build the bits if not present.
+ * given field. This {@link Filter} request {@link Bits} from
+ * {@link AtomicReader#getDocsWithField}
  */
 public class FieldValueFilter extends Filter {
   private final String field;
@@ -76,13 +78,13 @@
   @Override
   public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
       throws IOException {
-    final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(
+    final Bits docsWithField = DocValues.getDocsWithField(
         context.reader(), field);
     if (negate) {
       if (docsWithField instanceof MatchAllBits) {
         return null;
       }
-      return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+      return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
         @Override
         protected final boolean matchDoc(int doc) {
           return !docsWithField.get(doc);
@@ -97,7 +99,7 @@
         // :-)
         return BitsFilteredDocIdSet.wrap((DocIdSet) docsWithField, acceptDocs);
       }
-      return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
+      return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
         @Override
         protected final boolean matchDoc(int doc) {
           return docsWithField.get(doc);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index a4a73de..6dae17b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -23,13 +23,10 @@
 
 /**
  * Expert: A hit queue for sorting by hits by terms in more than one field.
- * Uses <code>FieldCache.DEFAULT</code> for maintaining
- * internal term lookup tables.
  * 
  * @lucene.experimental
  * @since 2.9
  * @see IndexSearcher#search(Query,Filter,int,Sort)
- * @see FieldCache
  */
 public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends PriorityQueue<T> {
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortField.java b/lucene/core/src/java/org/apache/lucene/search/SortField.java
index 2987f95..82cb414 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortField.java
@@ -94,7 +94,6 @@
   private String field;
   private Type type;  // defaults to determining type dynamically
   boolean reverse = false;  // defaults to natural order
-  private FieldCache.Parser parser;
 
   // Used for CUSTOM sort
   private FieldComparatorSource comparatorSource;
@@ -124,44 +123,6 @@
     this.reverse = reverse;
   }
 
-  /** Creates a sort by terms in the given field, parsed
-   * to numeric values using a custom {@link FieldCache.Parser}.
-   * @param field  Name of field to sort by.  Must not be null.
-   * @param parser Instance of a {@link FieldCache.Parser},
-   *  which must subclass one of the existing numeric
-   *  parsers from {@link FieldCache}. Sort type is inferred
-   *  by testing which numeric parser the parser subclasses.
-   * @throws IllegalArgumentException if the parser fails to
-   *  subclass an existing numeric parser, or field is null
-   */
-  public SortField(String field, FieldCache.Parser parser) {
-    this(field, parser, false);
-  }
-
-  /** Creates a sort, possibly in reverse, by terms in the given field, parsed
-   * to numeric values using a custom {@link FieldCache.Parser}.
-   * @param field  Name of field to sort by.  Must not be null.
-   * @param parser Instance of a {@link FieldCache.Parser},
-   *  which must subclass one of the existing numeric
-   *  parsers from {@link FieldCache}. Sort type is inferred
-   *  by testing which numeric parser the parser subclasses.
-   * @param reverse True if natural order should be reversed.
-   * @throws IllegalArgumentException if the parser fails to
-   *  subclass an existing numeric parser, or field is null
-   */
-  public SortField(String field, FieldCache.Parser parser, boolean reverse) {
-    if (parser instanceof FieldCache.IntParser) initFieldType(field, Type.INT);
-    else if (parser instanceof FieldCache.FloatParser) initFieldType(field, Type.FLOAT);
-    else if (parser instanceof FieldCache.LongParser) initFieldType(field, Type.LONG);
-    else if (parser instanceof FieldCache.DoubleParser) initFieldType(field, Type.DOUBLE);
-    else {
-      throw new IllegalArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")");
-    }
-
-    this.reverse = reverse;
-    this.parser = parser;
-  }
-
   /** Pass this to {@link #setMissingValue} to have missing
    *  string values sort first. */
   public final static Object STRING_FIRST = new Object() {
@@ -239,14 +200,6 @@
     return type;
   }
 
-  /** Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
-   * May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
-   * @return An instance of a {@link FieldCache} parser, or <code>null</code>.
-   */
-  public FieldCache.Parser getParser() {
-    return parser;
-  }
-
   /** Returns whether the sort should be reversed.
    * @return  True if natural order should be reversed.
    */
@@ -320,8 +273,7 @@
   }
 
   /** Returns true if <code>o</code> is equal to this.  If a
-   *  {@link FieldComparatorSource} or {@link
-   *  FieldCache.Parser} was provided, it must properly
+   *  {@link FieldComparatorSource} was provided, it must properly
    *  implement equals (unless a singleton is always used). */
   @Override
   public boolean equals(Object o) {
@@ -337,8 +289,7 @@
   }
 
   /** Returns true if <code>o</code> is equal to this.  If a
-   *  {@link FieldComparatorSource} or {@link
-   *  FieldCache.Parser} was provided, it must properly
+   *  {@link FieldComparatorSource} was provided, it must properly
    *  implement hashCode (unless a singleton is always
    *  used). */
   @Override
@@ -381,16 +332,16 @@
       return new FieldComparator.DocComparator(numHits);
 
     case INT:
-      return new FieldComparator.IntComparator(numHits, field, parser, (Integer) missingValue);
+      return new FieldComparator.IntComparator(numHits, field, (Integer) missingValue);
 
     case FLOAT:
-      return new FieldComparator.FloatComparator(numHits, field, parser, (Float) missingValue);
+      return new FieldComparator.FloatComparator(numHits, field, (Float) missingValue);
 
     case LONG:
-      return new FieldComparator.LongComparator(numHits, field, parser, (Long) missingValue);
+      return new FieldComparator.LongComparator(numHits, field, (Long) missingValue);
 
     case DOUBLE:
-      return new FieldComparator.DoubleComparator(numHits, field, parser, (Double) missingValue);
+      return new FieldComparator.DoubleComparator(numHits, field, (Double) missingValue);
 
     case CUSTOM:
       assert comparatorSource != null;
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java b/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java
new file mode 100644
index 0000000..5ae4feb
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java
@@ -0,0 +1,228 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.RandomAccessOrds;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.util.BytesRef;
+
+/** Selects a value from the document's set to use as the representative value */
+public class SortedSetSelector {
+  
+  /** 
+   * Type of selection to perform.
+   * <p>
+   * Limitations:
+   * <ul>
+   *   <li>Fields containing {@link Integer#MAX_VALUE} or more unique values
+   *       are unsupported.
+   *   <li>Selectors other than ({@link Type#MIN}) require 
+   *       optional codec support. However several codecs provided by Lucene, 
+   *       including the current default codec, support this.
+   * </ul>
+   */
+  public enum Type {
+    /** 
+     * Selects the minimum value in the set 
+     */
+    MIN,
+    /** 
+     * Selects the maximum value in the set 
+     */
+    MAX,
+    /** 
+     * Selects the middle value in the set.
+     * <p>
+     * If the set has an even number of values, the lower of the middle two is chosen.
+     */
+    MIDDLE_MIN,
+    /** 
+     * Selects the middle value in the set.
+     * <p>
+     * If the set has an even number of values, the higher of the middle two is chosen
+     */
+    MIDDLE_MAX
+  }
+  
+  /** Wraps a multi-valued SortedSetDocValues as a single-valued view, using the specified selector */
+  public static SortedDocValues wrap(SortedSetDocValues sortedSet, Type selector) {
+    if (sortedSet.getValueCount() >= Integer.MAX_VALUE) {
+      throw new UnsupportedOperationException("fields containing more than " + (Integer.MAX_VALUE-1) + " unique terms are unsupported");
+    }
+    
+    SortedDocValues singleton = DocValues.unwrapSingleton(sortedSet);
+    if (singleton != null) {
+      // it's actually single-valued in practice, but indexed as multi-valued,
+      // so just sort on the underlying single-valued dv directly.
+      // regardless of selector type, this optimization is safe!
+      return singleton;
+    } else if (selector == Type.MIN) {
+      return new MinValue(sortedSet);
+    } else {
+      if (sortedSet instanceof RandomAccessOrds == false) {
+        throw new UnsupportedOperationException("codec does not support random access ordinals, cannot use selector: " + selector);
+      }
+      RandomAccessOrds randomOrds = (RandomAccessOrds) sortedSet;
+      switch(selector) {
+        case MAX: return new MaxValue(randomOrds);
+        case MIDDLE_MIN: return new MiddleMinValue(randomOrds);
+        case MIDDLE_MAX: return new MiddleMaxValue(randomOrds);
+        case MIN: 
+        default: 
+          throw new AssertionError();
+      }
+    }
+  }
+  
+  /** Wraps a SortedSetDocValues and returns the first ordinal (min) */
+  static class MinValue extends SortedDocValues {
+    final SortedSetDocValues in;
+    
+    MinValue(SortedSetDocValues in) {
+      this.in = in;
+    }
+
+    @Override
+    public int getOrd(int docID) {
+      in.setDocument(docID);
+      return (int) in.nextOrd();
+    }
+
+    @Override
+    public void lookupOrd(int ord, BytesRef result) {
+      in.lookupOrd(ord, result);
+    }
+
+    @Override
+    public int getValueCount() {
+      return (int) in.getValueCount();
+    }
+
+    @Override
+    public int lookupTerm(BytesRef key) {
+      return (int) in.lookupTerm(key);
+    }
+  }
+  
+  /** Wraps a SortedSetDocValues and returns the last ordinal (max) */
+  static class MaxValue extends SortedDocValues {
+    final RandomAccessOrds in;
+    
+    MaxValue(RandomAccessOrds in) {
+      this.in = in;
+    }
+
+    @Override
+    public int getOrd(int docID) {
+      in.setDocument(docID);
+      final int count = in.cardinality();
+      if (count == 0) {
+        return -1;
+      } else {
+        return (int) in.ordAt(count-1);
+      }
+    }
+
+    @Override
+    public void lookupOrd(int ord, BytesRef result) {
+      in.lookupOrd(ord, result);
+    }
+
+    @Override
+    public int getValueCount() {
+      return (int) in.getValueCount();
+    }
+    
+    @Override
+    public int lookupTerm(BytesRef key) {
+      return (int) in.lookupTerm(key);
+    }
+  }
+  
+  /** Wraps a SortedSetDocValues and returns the middle ordinal (or min of the two) */
+  static class MiddleMinValue extends SortedDocValues {
+    final RandomAccessOrds in;
+    
+    MiddleMinValue(RandomAccessOrds in) {
+      this.in = in;
+    }
+
+    @Override
+    public int getOrd(int docID) {
+      in.setDocument(docID);
+      final int count = in.cardinality();
+      if (count == 0) {
+        return -1;
+      } else {
+        return (int) in.ordAt((count-1) >>> 1);
+      }
+    }
+
+    @Override
+    public void lookupOrd(int ord, BytesRef result) {
+      in.lookupOrd(ord, result);
+    }
+
+    @Override
+    public int getValueCount() {
+      return (int) in.getValueCount();
+    }
+    
+    @Override
+    public int lookupTerm(BytesRef key) {
+      return (int) in.lookupTerm(key);
+    }
+  }
+  
+  /** Wraps a SortedSetDocValues and returns the middle ordinal (or max of the two) */
+  static class MiddleMaxValue extends SortedDocValues {
+    final RandomAccessOrds in;
+    
+    MiddleMaxValue(RandomAccessOrds in) {
+      this.in = in;
+    }
+
+    @Override
+    public int getOrd(int docID) {
+      in.setDocument(docID);
+      final int count = in.cardinality();
+      if (count == 0) {
+        return -1;
+      } else {
+        return (int) in.ordAt(count >>> 1);
+      }
+    }
+
+    @Override
+    public void lookupOrd(int ord, BytesRef result) {
+      in.lookupOrd(ord, result);
+    }
+
+    @Override
+    public int getValueCount() {
+      return (int) in.getValueCount();
+    }
+    
+    @Override
+    public int lookupTerm(BytesRef key) {
+      return (int) in.lookupTerm(key);
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java b/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
new file mode 100644
index 0000000..157d4a3
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
@@ -0,0 +1,133 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+
+/** 
+ * SortField for {@link SortedSetDocValues}.
+ * <p>
+ * A SortedSetDocValues contains multiple values for a field, so sorting with
+ * this technique "selects" a value as the representative sort value for the document.
+ * <p>
+ * By default, the minimum value in the set is selected as the sort value, but
+ * this can be customized. Selectors other than the default do have some limitations
+ * to ensure that all selections happen in constant-time for performance.
+ * <p>
+ * Like sorting by string, this also supports sorting missing values as first or last,
+ * via {@link #setMissingValue(Object)}.
+ * <p>
+ * @see SortedSetSelector
+ */
+public class SortedSetSortField extends SortField {
+  
+  private final SortedSetSelector.Type selector;
+  
+  /**
+   * Creates a sort, possibly in reverse, by the minimum value in the set 
+   * for the document.
+   * @param field Name of field to sort by.  Must not be null.
+   * @param reverse True if natural order should be reversed.
+   */
+  public SortedSetSortField(String field, boolean reverse) {
+    this(field, reverse, SortedSetSelector.Type.MIN);
+  }
+
+  /**
+   * Creates a sort, possibly in reverse, specifying how the sort value from 
+   * the document's set is selected.
+   * @param field Name of field to sort by.  Must not be null.
+   * @param reverse True if natural order should be reversed.
+   * @param selector custom selector type for choosing the sort value from the set.
+   * <p>
+   * NOTE: selectors other than {@link SortedSetSelector.Type#MIN} require optional codec support.
+   */
+  public SortedSetSortField(String field, boolean reverse, SortedSetSelector.Type selector) {
+    super(field, SortField.Type.CUSTOM, reverse);
+    if (selector == null) {
+      throw new NullPointerException();
+    }
+    this.selector = selector;
+  }
+  
+  /** Returns the selector in use for this sort */
+  public SortedSetSelector.Type getSelector() {
+    return selector;
+  }
+
+  @Override
+  public int hashCode() {
+    return 31 * super.hashCode() + selector.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!super.equals(obj)) return false;
+    if (getClass() != obj.getClass()) return false;
+    SortedSetSortField other = (SortedSetSortField) obj;
+    if (selector != other.selector) return false;
+    return true;
+  }
+  
+  @Override
+  public String toString() {
+    StringBuilder buffer = new StringBuilder();
+    buffer.append("<sortedset" + ": \"").append(getField()).append("\">");
+    if (getReverse()) buffer.append('!');
+    if (missingValue != null) {
+      buffer.append(" missingValue=");
+      buffer.append(missingValue);
+    }
+    buffer.append(" selector=");
+    buffer.append(selector);
+
+    return buffer.toString();
+  }
+
+  /**
+   * Set how missing values (the empty set) are sorted.
+   * <p>
+   * Note that this must be {@link #STRING_FIRST} or {@link #STRING_LAST}.
+   */
+  @Override
+  public void setMissingValue(Object missingValue) {
+    if (missingValue != STRING_FIRST && missingValue != STRING_LAST) {
+      throw new IllegalArgumentException("For SORTED_SET type, missing value must be either STRING_FIRST or STRING_LAST");
+    }
+    this.missingValue = missingValue;
+  }
+  
+  @Override
+  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+    return new FieldComparator.TermOrdValComparator(numHits, getField(), missingValue == STRING_LAST) {
+      @Override
+      protected SortedDocValues getSortedDocValues(AtomicReaderContext context, String field) throws IOException {
+        SortedSetDocValues sortedSet = DocValues.getSortedSet(context.reader(), field);
+        return SortedSetSelector.wrap(sortedSet, selector);
+      }
+    };
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
index c41e839..9789a32 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
@@ -29,7 +29,7 @@
  * for numerical ranges; use {@link NumericRangeFilter} instead.
  *
  * <p>If you construct a large number of range filters with different ranges but on the 
- * same field, {@link FieldCacheRangeFilter} may have significantly better performance. 
+ * same field, {@link DocValuesRangeFilter} may have significantly better performance. 
  * @since 2.9
  */
 public class TermRangeFilter extends MultiTermQueryWrapperFilter<TermRangeQuery> {
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 889501a..1be51fb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -337,8 +337,8 @@
 <p>
 Finally, you can extend the low level {@link org.apache.lucene.search.similarities.Similarity Similarity} directly
 to implement a new retrieval model, or to use external scoring factors particular to your application. For example,
-a custom Similarity can access per-document values via {@link org.apache.lucene.search.FieldCache FieldCache} or
-{@link org.apache.lucene.index.NumericDocValues} and integrate them into the score.
+a custom Similarity can access per-document values via {@link org.apache.lucene.index.NumericDocValues} and 
+integrate them into the score.
 </p>
 <p>
 See the {@link org.apache.lucene.search.similarities} package documentation for information
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 04dfc52..723a8d9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -46,7 +46,6 @@
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
@@ -64,6 +63,7 @@
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -877,17 +877,18 @@
       hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
       assertEquals("wrong number of hits", 34, hits.length);
       
-      // check decoding into field cache
-      FieldCache.Ints fci = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieInt", false);
-      int maxDoc = searcher.getIndexReader().maxDoc();
-      for(int doc=0;doc<maxDoc;doc++) {
-        int val = fci.get(doc);
+      // check decoding of terms
+      Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "trieInt");
+      TermsEnum termsEnum = NumericUtils.filterPrefixCodedInts(terms.iterator(null));
+      while (termsEnum.next() != null) {
+        int val = NumericUtils.prefixCodedToInt(termsEnum.term());
         assertTrue("value in id bounds", val >= 0 && val < 35);
       }
       
-      FieldCache.Longs fcl = FieldCache.DEFAULT.getLongs(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieLong", false);
-      for(int doc=0;doc<maxDoc;doc++) {
-        long val = fcl.get(doc);
+      terms = MultiFields.getTerms(searcher.getIndexReader(), "trieLong");
+      termsEnum = NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+      while (termsEnum.next() != null) {
+        long val = NumericUtils.prefixCodedToLong(termsEnum.term());
         assertTrue("value in id bounds", val >= 0L && val < 35L);
       }
       
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
index 7a84269..7d6b8ae 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -32,13 +32,11 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.NoSuchDirectoryException;
 import org.apache.lucene.util.Bits;
@@ -753,44 +751,6 @@
     dir.close();
   }
   
-  // LUCENE-1579: Ensure that on a reopened reader, that any
-  // shared segments reuse the doc values arrays in
-  // FieldCache
-  public void testFieldCacheReuseAfterReopen() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
-            setMergePolicy(newLogMergePolicy(10))
-    );
-    Document doc = new Document();
-    doc.add(new IntField("number", 17, Field.Store.NO));
-    writer.addDocument(doc);
-    writer.commit();
-  
-    // Open reader1
-    DirectoryReader r = DirectoryReader.open(dir);
-    AtomicReader r1 = getOnlySegmentReader(r);
-    final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(r1, "number", false);
-    assertEquals(17, ints.get(0));
-  
-    // Add new segment
-    writer.addDocument(doc);
-    writer.commit();
-  
-    // Reopen reader1 --> reader2
-    DirectoryReader r2 = DirectoryReader.openIfChanged(r);
-    assertNotNull(r2);
-    r.close();
-    AtomicReader sub0 = r2.leaves().get(0).reader();
-    final FieldCache.Ints ints2 = FieldCache.DEFAULT.getInts(sub0, "number", false);
-    r2.close();
-    assertTrue(ints == ints2);
-  
-    writer.shutdown();
-    dir.close();
-  }
-  
   // LUCENE-1586: getUniqueTermCount
   public void testUniqueTermCount() throws Exception {
     Directory dir = newDirectory();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
index 0f1fd63..0b6b5c8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
@@ -32,7 +32,6 @@
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -109,7 +108,7 @@
 
     DirectoryReader r = w.getReader();
     w.shutdown();
-    assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
+    assertEquals(17, DocValues.getNumeric(getOnlySegmentReader(r), "field").get(0));
     r.close();
     d.close();
   }
@@ -133,7 +132,7 @@
 
     DirectoryReader r = w.getReader();
     w.shutdown();
-    assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
+    assertEquals(17, DocValues.getNumeric(getOnlySegmentReader(r), "field").get(0));
     r.close();
     d.close();
   }
@@ -176,7 +175,7 @@
     w.addDocument(doc);
     w.forceMerge(1);
     DirectoryReader r = w.getReader();
-    BinaryDocValues s = FieldCache.DEFAULT.getTerms(getOnlySegmentReader(r), "field", false);
+    BinaryDocValues s = DocValues.getSorted(getOnlySegmentReader(r), "field");
 
     BytesRef bytes1 = new BytesRef();
     s.get(0, bytes1);
@@ -783,7 +782,7 @@
     AtomicReader subR = r.leaves().get(0).reader();
     assertEquals(2, subR.numDocs());
 
-    Bits bits = FieldCache.DEFAULT.getDocsWithField(subR, "dv");
+    Bits bits = DocValues.getDocsWithField(subR, "dv");
     assertTrue(bits.get(0));
     assertTrue(bits.get(1));
     r.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index f6baf88..326e9f4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -55,7 +55,6 @@
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.PhraseQuery;
@@ -1751,11 +1750,6 @@
     w.shutdown();
     assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
 
-    SortedDocValues dti = FieldCache.DEFAULT.getTermsIndex(SlowCompositeReaderWrapper.wrap(reader), "content", random().nextFloat() * PackedInts.FAST);
-    assertEquals(4, dti.getValueCount());
-    BytesRef br = new BytesRef();
-    dti.lookupOrd(2, br);
-    assertEquals(bigTermBytesRef, br);
     reader.close();
     dir.close();
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index f3bac8e..d84d151 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -33,11 +33,11 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.English;
@@ -240,6 +240,7 @@
     for(int docCount=0;docCount<numDocs;docCount++) {
       Document doc = new Document();
       doc.add(new IntField("id", docCount, Field.Store.YES));
+      doc.add(new NumericDocValuesField("id", docCount));
       List<Token> tokens = new ArrayList<>();
       final int numTokens = atLeast(100);
       //final int numTokens = atLeast(20);
@@ -296,7 +297,7 @@
       DocsEnum docs = null;
       DocsAndPositionsEnum docsAndPositions = null;
       DocsAndPositionsEnum docsAndPositionsAndOffsets = null;
-      final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(sub, "id", false);
+      final NumericDocValues docIDToID = DocValues.getNumeric(sub, "id");
       for(String term : terms) {
         //System.out.println("  term=" + term);
         if (termsEnum.seekExact(new BytesRef(term))) {
@@ -305,7 +306,7 @@
           int doc;
           //System.out.println("    doc/freq");
           while((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-            final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
+            final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
             //System.out.println("      doc=" + docIDToID.get(doc) + " docID=" + doc + " " + expected.size() + " freq");
             assertNotNull(expected);
             assertEquals(expected.size(), docs.freq());
@@ -316,7 +317,7 @@
           assertNotNull(docsAndPositions);
           //System.out.println("    doc/freq/pos");
           while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-            final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
+            final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
             //System.out.println("      doc=" + docIDToID.get(doc) + " " + expected.size() + " freq");
             assertNotNull(expected);
             assertEquals(expected.size(), docsAndPositions.freq());
@@ -331,7 +332,7 @@
           assertNotNull(docsAndPositionsAndOffsets);
           //System.out.println("    doc/freq/pos/offs");
           while((doc = docsAndPositionsAndOffsets.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-            final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
+            final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
             //System.out.println("      doc=" + docIDToID.get(doc) + " " + expected.size() + " freq");
             assertNotNull(expected);
             assertEquals(expected.size(), docsAndPositionsAndOffsets.freq());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
index 2bea6cc..32f17c0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
@@ -24,8 +24,8 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -160,6 +160,7 @@
   private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
     Document doc = new Document();
     doc.add(new IntField("id", id, Field.Store.YES));
+    doc.add(new NumericDocValuesField("id", id));
     if (VERBOSE) {
       System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
     }
@@ -227,8 +228,7 @@
     final IndexReader r = w.getReader();
     w.shutdown();
 
-    // NOTE: intentional insanity!!
-    final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
+    final NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
 
     for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
index af21943..7eb0130 100644
--- a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -28,10 +28,13 @@
 import org.apache.lucene.document.FloatField;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -120,19 +123,33 @@
     
     Document doc = new Document();
     Field idField = newStringField(random, "id", "", Field.Store.YES);
+    Field idDVField = new SortedDocValuesField("id", new BytesRef());
     Field intIdField = new IntField("id_int", 0, Store.YES);
+    Field intDVField = new NumericDocValuesField("id_int", 0);
     Field floatIdField = new FloatField("id_float", 0, Store.YES);
+    Field floatDVField = new NumericDocValuesField("id_float", 0);
     Field longIdField = new LongField("id_long", 0, Store.YES);
+    Field longDVField = new NumericDocValuesField("id_long", 0);
     Field doubleIdField = new DoubleField("id_double", 0, Store.YES);
+    Field doubleDVField = new NumericDocValuesField("id_double", 0);
     Field randField = newStringField(random, "rand", "", Field.Store.YES);
+    Field randDVField = new SortedDocValuesField("rand", new BytesRef());
     Field bodyField = newStringField(random, "body", "", Field.Store.NO);
+    Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
     doc.add(idField);
+    doc.add(idDVField);
     doc.add(intIdField);
+    doc.add(intDVField);
     doc.add(floatIdField);
+    doc.add(floatDVField);
     doc.add(longIdField);
+    doc.add(longDVField);
     doc.add(doubleIdField);
+    doc.add(doubleDVField);
     doc.add(randField);
+    doc.add(randDVField);
     doc.add(bodyField);
+    doc.add(bodyDVField);
 
     RandomIndexWriter writer = new RandomIndexWriter(random, index.index, 
                                                      newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random))
@@ -146,10 +163,15 @@
 
       for (int d = minId; d <= maxId; d++) {
         idField.setStringValue(pad(d));
+        idDVField.setBytesValue(new BytesRef(pad(d)));
         intIdField.setIntValue(d);
+        intDVField.setLongValue(d);
         floatIdField.setFloatValue(d);
+        floatDVField.setLongValue(Float.floatToRawIntBits(d));
         longIdField.setLongValue(d);
+        longDVField.setLongValue(d);
         doubleIdField.setDoubleValue(d);
+        doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
         int r = index.allowNegativeRandomInts ? random.nextInt() : random
           .nextInt(Integer.MAX_VALUE);
         if (index.maxR < r) {
@@ -166,7 +188,9 @@
           minCount++;
         }
         randField.setStringValue(pad(r));
+        randDVField.setBytesValue(new BytesRef(pad(r)));
         bodyField.setStringValue("body");
+        bodyDVField.setBytesValue(new BytesRef("body"));
         writer.addDocument(doc);
       }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
index f09d992..8f6f8fc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -95,34 +95,6 @@
     }
   }
   
-  static final class JustCompileExtendedFieldCacheLongParser implements FieldCache.LongParser {
-
-    @Override
-    public long parseLong(BytesRef string) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-
-    @Override
-    public TermsEnum termsEnum(Terms terms) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-    
-  }
-  
-  static final class JustCompileExtendedFieldCacheDoubleParser implements FieldCache.DoubleParser {
-    
-    @Override
-    public double parseDouble(BytesRef term) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-
-    @Override
-    public TermsEnum termsEnum(Terms terms) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-    
-  }
-
   static final class JustCompileFieldComparator extends FieldComparator<Object> {
 
     @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
index 5306ac4..8b38180 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
@@ -254,7 +254,7 @@
     // returns default empty docidset, always cacheable:
     assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
     // is cacheable:
-    assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
+    assertDocIdSetCacheable(reader, DocValuesRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
     // a fixedbitset filter is always cacheable
     assertDocIdSetCacheable(reader, new Filter() {
       @Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
index 67ccec9..5f2a45f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
@@ -20,11 +20,13 @@
 import java.util.Arrays;
 
 import org.apache.lucene.index.Term;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.StoredDocument;
@@ -110,6 +112,7 @@
     String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
     Field dateTimeField = newStringField(DATE_TIME_FIELD, dateTimeString, Field.Store.YES);
     document.add(dateTimeField);
+    document.add(new SortedDocValuesField(DATE_TIME_FIELD, new BytesRef(dateTimeString)));
 
     return document;
   }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
index c44940a..4d38f81 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
@@ -33,12 +33,14 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.UnicodeUtil;
 
 /**
  * Tests the DocTermOrdsRangeFilter
  */
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // needs SORTED_SET
 public class TestDocTermOrdsRangeFilter extends LuceneTestCase {
   protected IndexSearcher searcher1;
   protected IndexSearcher searcher2;
@@ -63,10 +65,7 @@
       for (int j = 0; j < numTerms; j++) {
         String s = TestUtil.randomUnicodeString(random());
         doc.add(newStringField(fieldName, s, Field.Store.NO));
-        // if the default codec doesn't support sortedset, we will uninvert at search time
-        if (defaultCodecSupportsSortedSet()) {
-          doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
-        }
+        doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
         terms.add(s);
       }
       writer.addDocument(doc);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
index 8e2a1eb..74b514a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
@@ -51,6 +51,7 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
+    assumeTrue("requires codec support for SORTED_SET", defaultCodecSupportsSortedSet());
     dir = newDirectory();
     fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, 
@@ -65,10 +66,7 @@
       for (int j = 0; j < numTerms; j++) {
         String s = TestUtil.randomUnicodeString(random());
         doc.add(newStringField(fieldName, s, Field.Store.NO));
-        // if the default codec doesn't support sortedset, we will uninvert at search time
-        if (defaultCodecSupportsSortedSet()) {
-          doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
-        }
+        doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
         terms.add(s);
       }
       writer.addDocument(doc);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
index b85c36e..90628d2 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
@@ -23,8 +23,10 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FloatDocValuesField;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
@@ -158,12 +160,12 @@
     @Override
     public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
       final SimScorer sub = sim.simScorer(stats, context);
-      final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), boostField, false);
+      final NumericDocValues values = DocValues.getNumeric(context.reader(), boostField);
       
       return new SimScorer() {
         @Override
         public float score(int doc, float freq) {
-          return values.get(doc) * sub.score(doc, freq);
+          return Float.intBitsToFloat((int)values.get(doc)) * sub.score(doc, freq);
         }
         
         @Override
@@ -178,7 +180,7 @@
 
         @Override
         public Explanation explain(int doc, Explanation freq) {
-          Explanation boostExplanation = new Explanation(values.get(doc), "indexDocValue(" + boostField + ")");
+          Explanation boostExplanation = new Explanation(Float.intBitsToFloat((int)values.get(doc)), "indexDocValue(" + boostField + ")");
           Explanation simExplanation = sub.explain(doc, freq);
           Explanation expl = new Explanation(boostExplanation.getValue() * simExplanation.getValue(), "product of:");
           expl.addDetail(boostExplanation);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
index 5314dcf..7e4a08b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.FieldValueHitQueue.Entry;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -126,6 +127,9 @@
    Document doc = new Document();
    for (int i = 0; i < vals.length - 2; i += 2) {
      doc.add(newTextField(vals[i], vals[i + 1], Field.Store.YES));
+     if (vals[i].equals("id")) {
+       doc.add(new SortedDocValuesField(vals[i], new BytesRef(vals[i+1])));
+     }
    }
    return doc;
  }
@@ -185,7 +189,7 @@
 
      @Override
      public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
-       idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldname);
+       idIndex = DocValues.getSorted(context.reader(), fieldname);
        return this;
      }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java
index 48781f6..35ddf0c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestExplanations.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -30,6 +31,7 @@
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -73,6 +75,7 @@
     for (int i = 0; i < docFields.length; i++) {
       Document doc = new Document();
       doc.add(newStringField(KEY, ""+i, Field.Store.NO));
+      doc.add(new SortedDocValuesField(KEY, new BytesRef(""+i)));
       Field f = newTextField(FIELD, docFields[i], Field.Store.NO);
       f.setBoost(i);
       doc.add(f);
@@ -110,7 +113,7 @@
   /** 
    * Convenience subclass of FieldCacheTermsFilter
    */
-  public static class ItemizedFilter extends FieldCacheTermsFilter {
+  public static class ItemizedFilter extends DocValuesTermsFilter {
     private static String[] int2str(int [] terms) {
       String [] out = new String[terms.length];
       for (int i = 0; i < terms.length; i++) {
@@ -118,9 +121,6 @@
       }
       return out;
     }
-    public ItemizedFilter(String keyField, int [] keys) {
-      super(keyField, int2str(keys));
-    }
     public ItemizedFilter(int [] keys) {
       super(KEY, int2str(keys));
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
index 6377f7a..e22ab7d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -63,67 +64,67 @@
     Query q = new TermQuery(new Term("body","body"));
 
     // test id, bounded on both ends
-    result = search.search(q, FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
+    result = search.search(q, DocValuesRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,F,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,F,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,maxIP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,maxIP,T,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,medIP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,medIP,T,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
 
     // unbounded id
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,null,T,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,maxIP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,maxIP,F,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,null,F,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,maxIP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,maxIP,F,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,maxIP,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,maxIP,T,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,medIP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,medIP,F,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,minIP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,minIP,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,medIP,F,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,minIP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,minIP,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",null,minIP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,minIP,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",maxIP,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
   }
 
@@ -145,47 +146,47 @@
 
     // test extremes, bounded on both ends
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
     assertEquals("all but biggest", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
     assertEquals("all but smallest", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
     assertEquals("all but extremes", numDocs-2, result.length);
     
     // unbounded
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,null,T,F), numDocs).scoreDocs;
     assertEquals("smallest and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,maxRP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,maxRP,F,T), numDocs).scoreDocs;
     assertEquals("biggest and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,null,F,F), numDocs).scoreDocs;
     assertEquals("not smallest, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,maxRP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,maxRP,F,F), numDocs).scoreDocs;
     assertEquals("not biggest, but down", numDocs-1, result.length);
         
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,minRP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,minRP,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",minRP,minRP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,minRP,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",null,minRP,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,minRP,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
   }
   
@@ -208,75 +209,75 @@
 
     // test id, bounded on both ends
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,null,F,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,medIdO,F,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,minIdO,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,medIdO,F,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,minIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,minIdO,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -299,75 +300,75 @@
 
     // test id, bounded on both ends
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,null,F,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,medIdO,F,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,minIdO,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,medIdO,F,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,minIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,minIdO,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -386,19 +387,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,medIdO,F,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",medIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",medIdO,null,F,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -415,19 +416,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null,medIdO,F,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",medIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",medIdO,null,F,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -440,6 +441,7 @@
     for (int d = -20; d <= 20; d++) {
       Document doc = new Document();
       doc.add(new IntField("id_int", d, Field.Store.NO));
+      doc.add(new NumericDocValuesField("id_int", d));
       doc.add(newStringField("body", "body", Field.Store.NO));
       writer.addDocument(doc);
     }
@@ -457,19 +459,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,20,T,T), 100).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,20,T,T), 100).scoreDocs;
     assertEquals("find all", 40, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",0,20,T,T), 100).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",0,20,T,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,0,T,T), 100).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,0,T,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",10,20,T,T), 100).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",10,20,T,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,-10,T,T), 100).scoreDocs;
+    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,-10,T,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
     reader.close();
     dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRewriteMethod.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRewriteMethod.java
index 1d127c7..46879bf 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRewriteMethod.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRewriteMethod.java
@@ -31,7 +31,7 @@
   @Override
   protected void assertSame(String regexp) throws IOException {   
     RegexpQuery fieldCache = new RegexpQuery(new Term(fieldName, regexp), RegExp.NONE);
-    fieldCache.setRewriteMethod(new FieldCacheRewriteMethod());
+    fieldCache.setRewriteMethod(new DocValuesRewriteMethod());
     
     RegexpQuery filter = new RegexpQuery(new Term(fieldName, regexp), RegExp.NONE);
     filter.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
@@ -49,9 +49,9 @@
     assertEquals(a1, a2);
     assertFalse(a1.equals(b));
     
-    a1.setRewriteMethod(new FieldCacheRewriteMethod());
-    a2.setRewriteMethod(new FieldCacheRewriteMethod());
-    b.setRewriteMethod(new FieldCacheRewriteMethod());
+    a1.setRewriteMethod(new DocValuesRewriteMethod());
+    a2.setRewriteMethod(new DocValuesRewriteMethod());
+    b.setRewriteMethod(new DocValuesRewriteMethod());
     assertEquals(a1, a2);
     assertFalse(a1.equals(b));
     QueryUtils.check(a1);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
index 76744fc..800e6b7 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
@@ -18,6 +18,8 @@
  */
 
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
 import org.apache.lucene.document.Document;
@@ -31,10 +33,11 @@
 /**
  * A basic unit test for FieldCacheTermsFilter
  *
- * @see org.apache.lucene.search.FieldCacheTermsFilter
+ * @see org.apache.lucene.search.DocValuesTermsFilter
  */
 public class TestFieldCacheTermsFilter extends LuceneTestCase {
   public void testMissingTerms() throws Exception {
+    assumeTrue("requires support for missing values", defaultCodecSupportsMissingDocValues());
     String fieldName = "field1";
     Directory rd = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), rd);
@@ -42,6 +45,7 @@
       Document doc = new Document();
       int term = i * 10; //terms are units of 10;
       doc.add(newStringField(fieldName, "" + term, Field.Store.YES));
+      doc.add(new SortedDocValuesField(fieldName, new BytesRef("" + term)));
       w.addDocument(doc);
     }
     IndexReader reader = w.getReader();
@@ -54,18 +58,18 @@
 
     List<String> terms = new ArrayList<>();
     terms.add("5");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new DocValuesTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match nothing", 0, results.length);
 
     terms = new ArrayList<>();
     terms.add("10");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new DocValuesTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match 1", 1, results.length);
 
     terms = new ArrayList<>();
     terms.add("10");
     terms.add("20");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new DocValuesTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match 2", 2, results.length);
 
     reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
index 7fc51f2..37d3675 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
@@ -21,16 +21,20 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 
 /**
  * 
  */
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // suppress codecs without missing
 public class TestFieldValueFilter extends LuceneTestCase {
 
   public void testFieldValueFilterNoValue() throws IOException {
@@ -96,9 +100,12 @@
       if (random().nextBoolean()) {
         docStates[i] = 1;
         doc.add(newTextField("some", "value", Field.Store.YES));
+        doc.add(new SortedDocValuesField("some", new BytesRef("value")));
       }
       doc.add(newTextField("all", "test", Field.Store.NO));
+      doc.add(new SortedDocValuesField("all", new BytesRef("test")));
       doc.add(newTextField("id", "" + i, Field.Store.YES));
+      doc.add(new SortedDocValuesField("id", new BytesRef("" + i)));
       writer.addDocument(doc);
     }
     writer.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
index 710ed26..20d6e33 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
@@ -565,46 +565,6 @@
     testFloatRange(2);
   }
   
-  private void testSorting(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    // 10 random tests, the index order is ascending,
-    // so using a reverse sort field should retun descending documents
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
-      if (topDocs.totalHits==0) continue;
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
-      for (int j=1; j<sd.length; j++) {
-        int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
-        assertTrue("Docs should be sorted backwards", last>act );
-        last=act;
-      }
-    }
-  }
-
-  @Test
-  public void testSorting_8bit() throws Exception {
-    testSorting(8);
-  }
-  
-  @Test
-  public void testSorting_4bit() throws Exception {
-    testSorting(4);
-  }
-  
-  @Test
-  public void testSorting_2bit() throws Exception {
-    testSorting(2);
-  }
-  
   @Test
   public void testEqualsAndHash() throws Exception {
     QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
index 07ae739..21b342a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
@@ -38,7 +38,6 @@
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.TestNumericUtils; // NaN arrays
 import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -608,51 +607,6 @@
     testDoubleRange(2);
   }
   
-  private void testSorting(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    // 10 random tests, the index order is ascending,
-    // so using a reverse sort field should retun descending documents
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
-      if (topDocs.totalHits==0) continue;
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      long last=searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
-      for (int j=1; j<sd.length; j++) {
-        long act=searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
-        assertTrue("Docs should be sorted backwards", last>act );
-        last=act;
-      }
-    }
-  }
-
-  @Test
-  public void testSorting_8bit() throws Exception {
-    testSorting(8);
-  }
-  
-  @Test
-  public void testSorting_6bit() throws Exception {
-    testSorting(6);
-  }
-  
-  @Test
-  public void testSorting_4bit() throws Exception {
-    testSorting(4);
-  }
-  
-  @Test
-  public void testSorting_2bit() throws Exception {
-    testSorting(2);
-  }
-  
   @Test
   public void testEqualsAndHash() throws Exception {
     QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
index 9bc5a5d..f6f2be5 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.FilteredTermsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -66,11 +67,14 @@
     Document doc = new Document();
     Field field = newStringField(fieldName, "", Field.Store.NO);
     doc.add(field);
+    Field dvField = new SortedDocValuesField(fieldName, new BytesRef());
+    doc.add(dvField);
     List<String> terms = new ArrayList<>();
     int num = atLeast(200);
     for (int i = 0; i < num; i++) {
       String s = TestUtil.randomUnicodeString(random());
       field.setStringValue(s);
+      dvField.setBytesValue(new BytesRef(s));
       terms.add(s);
       writer.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSort.java b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
index 3361d0c..aae7ea1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
@@ -18,31 +18,20 @@
  */
 
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
 
-import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.DoubleDocValuesField;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.document.FloatDocValuesField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 
 /*
  * Very simple tests of sorting.
@@ -59,16 +48,19 @@
  *        |
  *       \./
  */
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // avoid codecs that don't support "missing"
 public class TestSort extends LuceneTestCase {
-
+  
   /** Tests sorting on type string */
   public void testString() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
+    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
     doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
+    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
     doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
@@ -82,36 +74,7 @@
     // 'bar' comes before 'foo'
     assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
     assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type string with a missing value */
-  public void testStringMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
     
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
     ir.close();
     dir.close();
   }
@@ -121,9 +84,11 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
+    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
     doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
+    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
     doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
@@ -147,9 +112,11 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
+    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
     doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
+    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
     doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
@@ -168,169 +135,16 @@
     dir.close();
   }
   
-  /** Tests sorting on type string_val with a missing value */
-  public void testStringValMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-
-  /** Tests sorting on type string with a missing
-   *  value sorted first */
-  public void testStringMissingSortedFirst() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sf = new SortField("value", SortField.Type.STRING);
-    Sort sort = new Sort(sf);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-
-  /** Tests reverse sorting on type string with a missing
-   *  value sorted first */
-  public void testStringMissingSortedFirstReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sf = new SortField("value", SortField.Type.STRING, true);
-    Sort sort = new Sort(sf);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    // null comes last
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-
-  /** Tests sorting on type string with a missing
-   *  value sorted last */
-  public void testStringValMissingSortedLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sf = new SortField("value", SortField.Type.STRING);
-    sf.setMissingValue(SortField.STRING_LAST);
-    Sort sort = new Sort(sf);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    // null comes last
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-
-  /** Tests reverse sorting on type string with a missing
-   *  value sorted last */
-  public void testStringValMissingSortedLastReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sf = new SortField("value", SortField.Type.STRING, true);
-    sf.setMissingValue(SortField.STRING_LAST);
-    Sort sort = new Sort(sf);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests reverse sorting on type string_val */
   public void testStringValReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
+    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
     doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
+    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
     doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
@@ -349,129 +163,77 @@
     dir.close();
   }
   
-  /** Tests sorting on internal docid order */
-  public void testFieldDoc() throws Exception {
+  /** Tests sorting on type string_val, but with a SortedDocValuesField */
+  public void testStringValSorted() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
+    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
+    doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
+    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
+    doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(SortField.FIELD_DOC);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
-    // docid 0, then docid 1
-    assertEquals(0, td.scoreDocs[0].doc);
-    assertEquals(1, td.scoreDocs[1].doc);
+    // 'bar' comes before 'foo'
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
 
     ir.close();
     dir.close();
   }
   
-  /** Tests sorting on reverse internal docid order */
-  public void testFieldDocReverse() throws Exception {
+  /** Tests reverse sorting on type string_val, but with a SortedDocValuesField */
+  public void testStringValReverseSorted() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
+    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
+    doc.add(newStringField("value", "bar", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
+    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
+    doc.add(newStringField("value", "foo", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField(null, SortField.Type.DOC, true));
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
-    // docid 1, then docid 0
-    assertEquals(1, td.scoreDocs[0].doc);
-    assertEquals(0, td.scoreDocs[1].doc);
+    // 'foo' comes after 'bar' in reverse order
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
 
     ir.close();
     dir.close();
   }
   
-  /** Tests default sort (by score) */
-  public void testFieldScore() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort();
-
-    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
-    assertEquals(2, actual.totalHits);
-
-    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
-    // the two topdocs should be the same
-    assertEquals(expected.totalHits, actual.totalHits);
-    for (int i = 0; i < actual.scoreDocs.length; i++) {
-      assertEquals(actual.scoreDocs[i].doc, expected.scoreDocs[i].doc);
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests default sort (by score) in reverse */
-  public void testFieldScoreReverse() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField(null, SortField.Type.SCORE, true));
-
-    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
-    assertEquals(2, actual.totalHits);
-
-    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
-    // the two topdocs should be the reverse of each other
-    assertEquals(expected.totalHits, actual.totalHits);
-    assertEquals(actual.scoreDocs[0].doc, expected.scoreDocs[1].doc);
-    assertEquals(actual.scoreDocs[1].doc, expected.scoreDocs[0].doc);
-
-    ir.close();
-    dir.close();
-  }
-
   /** Tests sorting on type int */
   public void testInt() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(new IntField("value", 300000, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 300000));
+    doc.add(newStringField("value", "300000", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -490,6 +252,39 @@
     dir.close();
   }
   
+  /** Tests sorting on type int in reverse */
+  public void testIntReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new NumericDocValuesField("value", 300000));
+    doc.add(newStringField("value", "300000", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
   /** Tests sorting on type int with a missing value */
   public void testIntMissing() throws IOException {
     Directory dir = newDirectory();
@@ -497,10 +292,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -526,10 +323,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -550,48 +349,21 @@
     dir.close();
   }
   
-  /** Tests sorting on type int in reverse */
-  public void testIntReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new IntField("value", 300000, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests sorting on type long */
   public void testLong() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 3000000000L));
+    doc.add(newStringField("value", "3000000000", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -610,6 +382,39 @@
     dir.close();
   }
   
+  /** Tests sorting on type long in reverse */
+  public void testLongReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new NumericDocValuesField("value", 3000000000L));
+    doc.add(newStringField("value", "3000000000", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
   /** Tests sorting on type long with a missing value */
   public void testLongMissing() throws IOException {
     Directory dir = newDirectory();
@@ -617,10 +422,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -646,10 +453,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", -1));
+    doc.add(newStringField("value", "-1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc.add(new NumericDocValuesField("value", 4));
+    doc.add(newStringField("value", "4", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -670,48 +479,21 @@
     dir.close();
   }
   
-  /** Tests sorting on type long in reverse */
-  public void testLongReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests sorting on type float */
   public void testFloat() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", 30.1F));
+    doc.add(newStringField("value", "30.1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", -1.3F));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", 4.2F));
+    doc.add(newStringField("value", "4.2", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -730,6 +512,39 @@
     dir.close();
   }
   
+  /** Tests sorting on type float in reverse */
+  public void testFloatReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new FloatDocValuesField("value", 30.1F));
+    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatDocValuesField("value", -1.3F));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatDocValuesField("value", 4.2F));
+    doc.add(newStringField("value", "4.2", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
   /** Tests sorting on type float with a missing value */
   public void testFloatMissing() throws IOException {
     Directory dir = newDirectory();
@@ -737,10 +552,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", -1.3F));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", 4.2F));
+    doc.add(newStringField("value", "4.2", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -766,10 +583,12 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", -1.3F));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc.add(new FloatDocValuesField("value", 4.2F));
+    doc.add(newStringField("value", "4.2", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -790,51 +609,25 @@
     dir.close();
   }
   
-  /** Tests sorting on type float in reverse */
-  public void testFloatReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests sorting on type double */
   public void testDouble() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 30.1));
+    doc.add(newStringField("value", "30.1", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", -1.3));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
+    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
+    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -859,10 +652,12 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(new DoubleField("value", +0d, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", +0D));
+    doc.add(newStringField("value", "+0", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", -0d, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", -0D));
+    doc.add(newStringField("value", "-0", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
     IndexReader ir = writer.getReader();
@@ -874,13 +669,46 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // numeric order
-    double v0 = searcher.doc(td.scoreDocs[0].doc).getField("value").numericValue().doubleValue();
-    double v1 = searcher.doc(td.scoreDocs[1].doc).getField("value").numericValue().doubleValue();
-    assertEquals(0, v0, 0d);
-    assertEquals(0, v1, 0d);
-    // check sign bits
-    assertEquals(1, Double.doubleToLongBits(v0) >>> 63);
-    assertEquals(0, Double.doubleToLongBits(v1) >>> 63);
+    assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double in reverse */
+  public void testDoubleReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new DoubleDocValuesField("value", 30.1));
+    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleDocValuesField("value", -1.3));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
+    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
+    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(4, td.totalHits);
+    // numeric order
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
 
     ir.close();
     dir.close();
@@ -893,13 +721,16 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", -1.3));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
+    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
+    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -926,13 +757,16 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", -1.3));
+    doc.add(newStringField("value", "-1.3", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
+    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
+    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.shutdown();
@@ -953,429 +787,4 @@
     ir.close();
     dir.close();
   }
-  
-  /** Tests sorting on type double in reverse */
-  public void testDoubleReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(4, td.totalHits);
-    // numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  public void testEmptyStringVsNullStringSort() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
-                        TEST_VERSION_CURRENT, new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("f", "", Field.Store.NO));
-    doc.add(newStringField("t", "1", Field.Store.NO));
-    w.addDocument(doc);
-    w.commit();
-    doc = new Document();
-    doc.add(newStringField("t", "1", Field.Store.NO));
-    w.addDocument(doc);
-
-    IndexReader r = DirectoryReader.open(w, true);
-    w.shutdown();
-    IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(new TermQuery(new Term("t", "1")), null, 10, new Sort(new SortField("f", SortField.Type.STRING)));
-    assertEquals(2, hits.totalHits);
-    // null sorts first
-    assertEquals(1, hits.scoreDocs[0].doc);
-    assertEquals(0, hits.scoreDocs[1].doc);
-    r.close();
-    dir.close();
-  }
-  
-  /** test that we don't throw exception on multi-valued field (LUCENE-2142) */
-  public void testMultiValuedField() throws IOException {
-    Directory indexStore = newDirectory();
-    IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(random())));
-    for(int i=0; i<5; i++) {
-        Document doc = new Document();
-        doc.add(new StringField("string", "a"+i, Field.Store.NO));
-        doc.add(new StringField("string", "b"+i, Field.Store.NO));
-        writer.addDocument(doc);
-    }
-    writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
-    writer.shutdown();
-    Sort sort = new Sort(
-        new SortField("string", SortField.Type.STRING),
-        SortField.FIELD_DOC);
-    // this should not throw AIOOBE or RuntimeEx
-    IndexReader reader = DirectoryReader.open(indexStore);
-    IndexSearcher searcher = newSearcher(reader);
-    searcher.search(new MatchAllDocsQuery(), null, 500, sort);
-    reader.close();
-    indexStore.close();
-  }
-  
-  public void testMaxScore() throws Exception {
-    Directory d = newDirectory();
-    // Not RIW because we need exactly 2 segs:
-    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
-    int id = 0;
-    for(int seg=0;seg<2;seg++) {
-      for(int docIDX=0;docIDX<10;docIDX++) {
-        Document doc = new Document();
-        doc.add(new IntField("id", docIDX, Field.Store.YES));
-        StringBuilder sb = new StringBuilder();
-        for(int i=0;i<id;i++) {
-          sb.append(' ');
-          sb.append("text");
-        }
-        doc.add(newTextField("body", sb.toString(), Field.Store.NO));
-        w.addDocument(doc);
-        id++;
-      }
-      w.commit();
-    }
-
-    IndexReader r = DirectoryReader.open(w, true);
-    w.shutdown();
-    Query q = new TermQuery(new Term("body", "text"));
-    IndexSearcher s = newSearcher(r);
-    float maxScore = s.search(q , 10).getMaxScore();
-    assertEquals(maxScore, s.search(q, null, 3, Sort.INDEXORDER, random().nextBoolean(), true).getMaxScore(), 0.0);
-    assertEquals(maxScore, s.search(q, null, 3, Sort.RELEVANCE, random().nextBoolean(), true).getMaxScore(), 0.0);
-    assertEquals(maxScore, s.search(q, null, 3, new Sort(new SortField[] {new SortField("id", SortField.Type.INT, false)}), random().nextBoolean(), true).getMaxScore(), 0.0);
-    assertEquals(maxScore, s.search(q, null, 3, new Sort(new SortField[] {new SortField("id", SortField.Type.INT, true)}), random().nextBoolean(), true).getMaxScore(), 0.0);
-    r.close();
-    d.close();
-  }
-  
-  /** test sorts when there's nothing in the index */
-  public void testEmptyIndex() throws Exception {
-    IndexSearcher empty = newSearcher(new MultiReader());
-    Query query = new TermQuery(new Term("contents", "foo"));
-  
-    Sort sort = new Sort();
-    TopDocs td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-
-    sort.setSort(SortField.FIELD_DOC);
-    td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-
-    sort.setSort(new SortField("int", SortField.Type.INT), SortField.FIELD_DOC);
-    td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-    
-    sort.setSort(new SortField("string", SortField.Type.STRING, true), SortField.FIELD_DOC);
-    td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-    
-    sort.setSort(new SortField("string_val", SortField.Type.STRING_VAL, true), SortField.FIELD_DOC);
-    td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-
-    sort.setSort(new SortField("float", SortField.Type.FLOAT), new SortField("string", SortField.Type.STRING));
-    td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-  }
-  
-  /** 
-   * test sorts for a custom int parser that uses a simple char encoding 
-   */
-  public void testCustomIntParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.IntParser() {
-      @Override
-      public int parseInt(BytesRef term) {
-        return (term.bytes[term.offset]-'A') * 123456;
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** 
-   * test sorts for a custom long parser that uses a simple char encoding 
-   */
-  public void testCustomLongParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.LongParser() {
-      @Override
-      public long parseLong(BytesRef term) {
-        return (term.bytes[term.offset]-'A') * 1234567890L;
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** 
-   * test sorts for a custom float parser that uses a simple char encoding 
-   */
-  public void testCustomFloatParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.FloatParser() {
-      @Override
-      public float parseFloat(BytesRef term) {
-        return (float) Math.sqrt(term.bytes[term.offset]);
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** 
-   * test sorts for a custom double parser that uses a simple char encoding 
-   */
-  public void testCustomDoubleParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.DoubleParser() {
-      @Override
-      public double parseDouble(BytesRef term) {
-        return Math.pow(term.bytes[term.offset], (term.bytes[term.offset]-'A'));
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting a single document */
-  public void testSortOneDocument() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(1, td.totalHits);
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting a single document with scores */
-  public void testSortOneDocumentWithScores() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
-
-    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
-    assertEquals(1, expected.totalHits);
-    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), null, 10, sort, true, true);
-    
-    assertEquals(expected.totalHits, actual.totalHits);
-    assertEquals(expected.scoreDocs[0].score, actual.scoreDocs[0].score, 0F);
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting with two fields */
-  public void testSortTwoFields() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    // tievalue, then value
-    Sort sort = new Sort(new SortField("tievalue", SortField.Type.STRING),
-                         new SortField("value", SortField.Type.STRING));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-
-  public void testScore() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(SortField.FIELD_SCORE);
-
-    final BooleanQuery bq = new BooleanQuery();
-    bq.add(new TermQuery(new Term("value", "foo")), Occur.SHOULD);
-    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
-    TopDocs td = searcher.search(bq, 10, sort);
-    assertEquals(2, td.totalHits);
-    assertEquals(1, td.scoreDocs[0].doc);
-    assertEquals(0, td.scoreDocs[1].doc);
-
-    ir.close();
-    dir.close();
-  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java b/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java
deleted file mode 100644
index f4e46bd..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java
+++ /dev/null
@@ -1,804 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-
-/** Tests basic sorting on docvalues fields.
- * These are mostly like TestSort's tests, except each test
- * indexes the field up-front as docvalues, and checks no fieldcaches were made */
-@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // avoid codecs that don't support "missing"
-public class TestSortDocValues extends LuceneTestCase {
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // ensure there is nothing in fieldcache before test starts
-    FieldCache.DEFAULT.purgeAllCaches();
-  }
-  
-  private void assertNoFieldCaches() {
-    // docvalues sorting should NOT create any fieldcache entries!
-    assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
-  }
-
-  /** Tests sorting on type string */
-  public void testString() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-    
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests reverse sorting on type string */
-  public void testStringReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'foo' comes after 'bar' in reverse order
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type string_val */
-  public void testStringVal() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests reverse sorting on type string_val */
-  public void testStringValReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'foo' comes after 'bar' in reverse order
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type string_val, but with a SortedDocValuesField */
-  public void testStringValSorted() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests reverse sorting on type string_val, but with a SortedDocValuesField */
-  public void testStringValReverseSorted() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'foo' comes after 'bar' in reverse order
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type int */
-  public void testInt() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300000));
-    doc.add(newStringField("value", "300000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type int in reverse */
-  public void testIntReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300000));
-    doc.add(newStringField("value", "300000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type int with a missing value */
-  public void testIntMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as a 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type int, specifying the missing value should be treated as Integer.MAX_VALUE */
-  public void testIntMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.INT);
-    sortField.setMissingValue(Integer.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as a Integer.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type long */
-  public void testLong() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 3000000000L));
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type long in reverse */
-  public void testLongReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 3000000000L));
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type long with a missing value */
-  public void testLongMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type long, specifying the missing value should be treated as Long.MAX_VALUE */
-  public void testLongMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.LONG);
-    sortField.setMissingValue(Long.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as Long.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type float */
-  public void testFloat() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatDocValuesField("value", 30.1F));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type float in reverse */
-  public void testFloatReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatDocValuesField("value", 30.1F));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type float with a missing value */
-  public void testFloatMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type float, specifying the missing value should be treated as Float.MAX_VALUE */
-  public void testFloatMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.FLOAT);
-    sortField.setMissingValue(Float.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as Float.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type double */
-  public void testDouble() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 30.1));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(4, td.totalHits);
-    // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type double with +/- zero */
-  public void testDoubleSignedZero() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", +0D));
-    doc.add(newStringField("value", "+0", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -0D));
-    doc.add(newStringField("value", "-0", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // numeric order
-    assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type double in reverse */
-  public void testDoubleReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 30.1));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(4, td.totalHits);
-    // numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type double with a missing value */
-  public void testDoubleMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(4, td.totalHits);
-    // null treated as a 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
-  public void testDoubleMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.DOUBLE);
-    sortField.setMissingValue(Double.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(4, td.totalHits);
-    // null treated as Double.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
index 4a1d810..cdabb12 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
@@ -32,7 +32,9 @@
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
@@ -87,7 +89,6 @@
 
         br = new BytesRef(s);
         doc.add(new SortedDocValuesField("stringdv", br));
-        doc.add(newStringField("string", s, Field.Store.NO));
         docValues.add(br);
 
       } else {
@@ -124,17 +125,12 @@
       final SortField sf;
       final boolean sortMissingLast;
       final boolean missingIsNull;
-      if (random.nextBoolean()) {
-        sf = new SortField("stringdv", SortField.Type.STRING, reverse);
-        // Can only use sort missing if the DVFormat
-        // supports docsWithField:
-        sortMissingLast = defaultCodecSupportsDocsWithField() && random().nextBoolean();
-        missingIsNull = defaultCodecSupportsDocsWithField();
-      } else {
-        sf = new SortField("string", SortField.Type.STRING, reverse);
-        sortMissingLast = random().nextBoolean();
-        missingIsNull = true;
-      }
+      sf = new SortField("stringdv", SortField.Type.STRING, reverse);
+      // Can only use sort missing if the DVFormat
+      // supports docsWithField:
+      sortMissingLast = defaultCodecSupportsDocsWithField() && random().nextBoolean();
+      missingIsNull = defaultCodecSupportsDocsWithField();
+
       if (sortMissingLast) {
         sf.setMissingValue(SortField.STRING_LAST);
       }
@@ -264,14 +260,14 @@
     @Override
     public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
       final int maxDoc = context.reader().maxDoc();
-      final FieldCache.Ints idSource = FieldCache.DEFAULT.getInts(context.reader(), "id", false);
+      final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
       assertNotNull(idSource);
       final FixedBitSet bits = new FixedBitSet(maxDoc);
       for(int docID=0;docID<maxDoc;docID++) {
         if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
           bits.set(docID);
           //System.out.println("  acc id=" + idSource.getInt(docID) + " docID=" + docID);
-          matchValues.add(docValues.get(idSource.get(docID)));
+          matchValues.add(docValues.get((int) idSource.get(docID)));
         }
       }
 
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldSelectors.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
similarity index 92%
rename from lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldSelectors.java
rename to lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
index c083240..0468c0f 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldSelectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.sandbox.queries;
+package org.apache.lucene.search;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -26,7 +26,6 @@
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Sort;
@@ -41,7 +40,7 @@
 
 /** Tests for SortedSetSortField selectors other than MIN,
  *  these require optional codec support (random access to ordinals) */
-public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
+public class TestSortedSetSelector extends LuceneTestCase {
   static Codec savedCodec;
   
   @BeforeClass
@@ -61,18 +60,6 @@
     Codec.setDefault(savedCodec);
   }
   
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // ensure there is nothing in fieldcache before test starts
-    FieldCache.DEFAULT.purgeAllCaches();
-  }
-  
-  private void assertNoFieldCaches() {
-    // docvalues sorting should NOT create any fieldcache entries!
-    assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
-  }
-  
   public void testMax() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@@ -91,14 +78,13 @@
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
     
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'baz' comes before 'foo'
     assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -122,14 +108,13 @@
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
     
-    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSortField.Selector.MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSelector.Type.MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'baz' comes before 'foo'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -156,7 +141,7 @@
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
     
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MAX);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MAX);
     sortField.setMissingValue(SortField.STRING_FIRST);
     Sort sort = new Sort(sortField);
 
@@ -167,7 +152,6 @@
     // 'baz' comes before 'foo'
     assertEquals("3", searcher.doc(td.scoreDocs[1].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -194,7 +178,7 @@
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
     
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MAX);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MAX);
     sortField.setMissingValue(SortField.STRING_LAST);
     Sort sort = new Sort(sortField);
 
@@ -205,7 +189,6 @@
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
     // null comes last
     assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -227,14 +210,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
@@ -259,14 +241,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MIN));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MIN));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'b' comes before 'c'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -291,14 +272,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSortField.Selector.MIDDLE_MIN));
+    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSelector.Type.MIDDLE_MIN));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'b' comes before 'c'
     assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -326,7 +306,7 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MIN);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MIN);
     sortField.setMissingValue(SortField.STRING_FIRST);
     Sort sort = new Sort(sortField);
 
@@ -337,7 +317,6 @@
     // 'b' comes before 'c'
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -365,7 +344,7 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MIN);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MIN);
     sortField.setMissingValue(SortField.STRING_LAST);
     Sort sort = new Sort(sortField);
 
@@ -376,7 +355,6 @@
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
     // null comes last
     assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -398,14 +376,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MIN));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MIN));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
@@ -430,14 +407,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'b' comes before 'c'
     assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -462,14 +438,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSortField.Selector.MIDDLE_MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", true, SortedSetSelector.Type.MIDDLE_MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'b' comes before 'c'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -497,7 +472,7 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MAX);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MAX);
     sortField.setMissingValue(SortField.STRING_FIRST);
     Sort sort = new Sort(sortField);
 
@@ -508,7 +483,6 @@
     // 'b' comes before 'c'
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -536,7 +510,7 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    SortField sortField = new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MAX);
+    SortField sortField = new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MAX);
     sortField.setMissingValue(SortField.STRING_LAST);
     Sort sort = new Sort(sortField);
 
@@ -547,7 +521,6 @@
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
     // null comes last
     assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -569,14 +542,13 @@
     
     // slow wrapper does not support random access ordinals (there is no need for that!)
     IndexSearcher searcher = newSearcher(ir, false);
-    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSortField.Selector.MIDDLE_MAX));
+    Sort sort = new Sort(new SortedSetSortField("value", false, SortedSetSelector.Type.MIDDLE_MAX));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldDocValues.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
similarity index 83%
rename from lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldDocValues.java
rename to lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
index 01cde0e..6a35e42 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortFieldDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.sandbox.queries;
+package org.apache.lucene.search;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -21,12 +21,15 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -35,18 +38,39 @@
 
 /** Simple tests for SortedSetSortField, indexing the sortedset up front */
 @SuppressCodecs({"Lucene40", "Lucene41"}) // avoid codecs that don't support sortedset
-public class TestSortedSetSortFieldDocValues extends LuceneTestCase {
+public class TestSortedSetSortField extends LuceneTestCase {
   
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // ensure there is nothing in fieldcache before test starts
-    FieldCache.DEFAULT.purgeAllCaches();
+  public void testEmptyIndex() throws Exception {
+    IndexSearcher empty = newSearcher(new MultiReader());
+    Query query = new TermQuery(new Term("contents", "foo"));
+  
+    Sort sort = new Sort();
+    sort.setSort(new SortedSetSortField("sortedset", false));
+    TopDocs td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+    
+    // for an empty index, any selector should work
+    for (SortedSetSelector.Type v : SortedSetSelector.Type.values()) {
+      sort.setSort(new SortedSetSortField("sortedset", false, v));
+      td = empty.search(query, null, 10, sort, true, true);
+      assertEquals(0, td.totalHits);
+    }
   }
   
-  private void assertNoFieldCaches() {
-    // docvalues sorting should NOT create any fieldcache entries!
-    assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
+  public void testEquals() throws Exception {
+    SortField sf = new SortedSetSortField("a", false);
+    assertFalse(sf.equals(null));
+    
+    assertEquals(sf, sf);
+    
+    SortField sf2 = new SortedSetSortField("a", false);
+    assertEquals(sf, sf2);
+    assertEquals(sf.hashCode(), sf2.hashCode());
+    
+    assertFalse(sf.equals(new SortedSetSortField("a", true)));
+    assertFalse(sf.equals(new SortedSetSortField("b", false)));
+    assertFalse(sf.equals(new SortedSetSortField("a", false, SortedSetSelector.Type.MAX)));
+    assertFalse(sf.equals("foo"));
   }
   
   public void testForward() throws Exception {
@@ -72,7 +96,6 @@
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
     
     ir.close();
     dir.close();
@@ -102,7 +125,6 @@
     // 'bar' comes before 'baz'
     assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
@@ -138,7 +160,6 @@
     assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
@@ -174,7 +195,6 @@
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
     // null comes last
     assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
@@ -202,7 +222,6 @@
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
     assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertNoFieldCaches();
 
     ir.close();
     dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java
deleted file mode 100644
index ed911ca..0000000
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java
+++ /dev/null
@@ -1,80 +0,0 @@
-package org.apache.lucene.util.junitcompat;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.AtomicReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.JUnitCore;
-import org.junit.runner.Result;
-import org.junit.runner.notification.Failure;
-
-public class TestFailOnFieldCacheInsanity extends WithNestedTests {
-  public TestFailOnFieldCacheInsanity() {
-    super(true);
-  }
-  
-  public static class Nested1 extends WithNestedTests.AbstractNestedTest {
-    private Directory d;
-    private IndexReader r;
-    private AtomicReader subR;
-
-    private void makeIndex() throws Exception {
-      // we use RAMDirectory here, because we dont want to stay on open files on Windows:
-      d = new RAMDirectory();
-      @SuppressWarnings("resource") RandomIndexWriter w =
-          new RandomIndexWriter(random(), d);
-      Document doc = new Document();
-      doc.add(newField("ints", "1", StringField.TYPE_NOT_STORED));
-      w.addDocument(doc);
-      w.forceMerge(1);
-      r = w.getReader();
-      w.shutdown();
-
-      subR = r.leaves().get(0).reader();
-    }
-
-    public void testDummy() throws Exception {
-      makeIndex();
-      assertNotNull(FieldCache.DEFAULT.getTermsIndex(subR, "ints"));
-      assertNotNull(FieldCache.DEFAULT.getTerms(subR, "ints", false));
-      // NOTE: do not close reader/directory, else it
-      // purges FC entries
-    }
-  }
-
-  @Test
-  public void testFailOnFieldCacheInsanity() {
-    Result r = JUnitCore.runClasses(Nested1.class);
-    boolean insane = false;
-    for(Failure f : r.getFailures()) {
-      if (f.getMessage().indexOf("Insane") != -1) {
-        insane = true;
-        break;
-      }
-    }
-    Assert.assertTrue(insane);
-  }
-}
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
index 5f514b2..07adc28 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
@@ -25,6 +25,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.SimpleBindings;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
@@ -92,19 +93,26 @@
     // TODO: we could index in radians instead ... saves all the conversions in getBoundingBoxFilter
 
     // Add documents with latitude/longitude location:
+    // we index these both as DoubleFields (for bounding box/ranges) and as NumericDocValuesFields (for scoring)
     Document doc = new Document();
     doc.add(new DoubleField("latitude", 40.759011, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
     doc.add(new DoubleField("longitude", -73.9844722, Field.Store.NO));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
     writer.addDocument(doc);
     
     doc = new Document();
     doc.add(new DoubleField("latitude", 40.718266, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
     doc.add(new DoubleField("longitude", -74.007819, Field.Store.NO));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
     writer.addDocument(doc);
     
     doc = new Document();
     doc.add(new DoubleField("latitude", 40.7051157, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
     doc.add(new DoubleField("longitude", -74.0088305, Field.Store.NO));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
     writer.addDocument(doc);
 
     // Open near-real-time searcher
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
index e58a6b6..5429dbd 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
@@ -25,10 +25,6 @@
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.search.FieldCache.DoubleParser;
-import org.apache.lucene.search.FieldCache.FloatParser;
-import org.apache.lucene.search.FieldCache.IntParser;
-import org.apache.lucene.search.FieldCache.LongParser;
 import org.apache.lucene.search.SortField;
 
 /**
@@ -87,13 +83,13 @@
     SortField field = (SortField) o;
     switch(field.getType()) {
       case INT:
-        return new IntFieldSource(field.getField(), (IntParser) field.getParser());
+        return new IntFieldSource(field.getField());
       case LONG:
-        return new LongFieldSource(field.getField(), (LongParser) field.getParser());
+        return new LongFieldSource(field.getField());
       case FLOAT:
-        return new FloatFieldSource(field.getField(), (FloatParser) field.getParser());
+        return new FloatFieldSource(field.getField());
       case DOUBLE:
-        return new DoubleFieldSource(field.getField(), (DoubleParser) field.getParser());
+        return new DoubleFieldSource(field.getField());
       case SCORE:
         return getScoreValueSource();
       default:
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
index 1b26b16..c46c3e1 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
@@ -1,7 +1,6 @@
 package org.apache.lucene.expressions;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
@@ -53,24 +52,24 @@
     doc.add(newStringField("id", "1", Field.Store.YES));
     doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
     doc.add(new NumericDocValuesField("popularity", 5));
-    doc.add(new DoubleField("latitude", 40.759011, Field.Store.NO));
-    doc.add(new DoubleField("longitude", -73.9844722, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
     iw.addDocument(doc);
     
     doc = new Document();
     doc.add(newStringField("id", "2", Field.Store.YES));
     doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
     doc.add(new NumericDocValuesField("popularity", 20));
-    doc.add(new DoubleField("latitude", 40.718266, Field.Store.NO));
-    doc.add(new DoubleField("longitude", -74.007819, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
     iw.addDocument(doc);
     
     doc = new Document();
     doc.add(newStringField("id", "3", Field.Store.YES));
     doc.add(newTextField("body", "crappy contents", Field.Store.NO));
     doc.add(new NumericDocValuesField("popularity", 2));
-    doc.add(new DoubleField("latitude", 40.7051157, Field.Store.NO));
-    doc.add(new DoubleField("longitude", -74.0088305, Field.Store.NO));
+    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
+    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
     iw.addDocument(doc);
     
     reader = iw.getReader();
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
index 0f9babd..81c68f6 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
@@ -30,6 +30,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
 import org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState;
@@ -497,6 +498,7 @@
     for(Doc rawDoc : docs) {
       Document doc = new Document();
       doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
+      doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
       doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
 
       if (VERBOSE) {
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
index b555c1f..a78a414 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
@@ -80,27 +80,27 @@
     // Reused across documents, to add the necessary facet
     // fields:
     Document doc = new Document();
-    doc.add(new IntField("num", 10, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 10));
     doc.add(new FacetField("Author", "Bob"));
     writer.addDocument(config.build(taxoWriter, doc));
 
     doc = new Document();
-    doc.add(new IntField("num", 20, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 20));
     doc.add(new FacetField("Author", "Lisa"));
     writer.addDocument(config.build(taxoWriter, doc));
 
     doc = new Document();
-    doc.add(new IntField("num", 30, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 30));
     doc.add(new FacetField("Author", "Lisa"));
     writer.addDocument(config.build(taxoWriter, doc));
 
     doc = new Document();
-    doc.add(new IntField("num", 40, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 40));
     doc.add(new FacetField("Author", "Susan"));
     writer.addDocument(config.build(taxoWriter, doc));
 
     doc = new Document();
-    doc.add(new IntField("num", 45, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 45));
     doc.add(new FacetField("Author", "Frank"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -145,7 +145,7 @@
     FacetsConfig config = new FacetsConfig();
 
     Document doc = new Document();
-    doc.add(new IntField("num", 10, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 10));
     doc.add(new FacetField("a", "foo1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -154,7 +154,7 @@
     }
 
     doc = new Document();
-    doc.add(new IntField("num", 20, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 20));
     doc.add(new FacetField("a", "foo2"));
     doc.add(new FacetField("b", "bar1"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -164,7 +164,7 @@
     }
 
     doc = new Document();
-    doc.add(new IntField("num", 30, Field.Store.NO));
+    doc.add(new NumericDocValuesField("num", 30));
     doc.add(new FacetField("a", "foo3"));
     doc.add(new FacetField("b", "bar2"));
     doc.add(new FacetField("c", "baz1"));
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 7c33583..f40c2a7 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -300,7 +300,7 @@
    *  This is normally not a problem, as you can obtain the
    *  value just like you obtain other values for each
    *  matching document (eg, via stored fields, via
-   *  FieldCache, etc.)
+   *  DocValues, etc.)
    *
    *  @param withinGroupSort The {@link Sort} used to sort
    *    documents within each group.  Passing null is
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
index c36d61c..6515caa 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
@@ -20,7 +20,6 @@
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.CachingCollector;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MultiCollector;
@@ -78,7 +77,7 @@
   private Bits matchingGroupHeads;
 
   /**
-   * Constructs a <code>GroupingSearch</code> instance that groups documents by index terms using the {@link FieldCache}.
+   * Constructs a <code>GroupingSearch</code> instance that groups documents by index terms using DocValues.
    * The group field can only have one token per document. This means that the field must not be analysed.
    *
    * @param groupField The name of the field to group by.
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package.html b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package.html
index e45b666..c346c71 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package.html
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package.html
@@ -80,8 +80,7 @@
 <p>Known limitations:</p>
 <ul>
   <li> For the two-pass grouping search, the group field must be a
-    single-valued indexed field (or indexed as a {@link org.apache.lucene.document.SortedDocValuesField}).
-    {@link org.apache.lucene.search.FieldCache} is used to load the {@link org.apache.lucene.index.SortedDocValues} for this field.
+    indexed as a {@link org.apache.lucene.document.SortedDocValuesField}).
   <li> Although Solr support grouping by function and this module has abstraction of what a group is, there are currently only
     implementations for grouping based on terms.
   <li> Sharding is not directly supported, though is not too
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
index 45192c1..7183aac 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
@@ -18,9 +18,8 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
@@ -161,7 +160,7 @@
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
-      groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
+      groupIndex = DocValues.getSorted(context.reader(), groupField);
 
       for (GroupHead groupHead : groups.values()) {
         for (int i = 0; i < groupHead.comparators.length; i++) {
@@ -276,13 +275,13 @@
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
-      groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
+      groupIndex = DocValues.getSorted(context.reader(), groupField);
       for (int i = 0; i < fields.length; i++) {
         if (fields[i].getType() == SortField.Type.SCORE) {
           continue;
         }
 
-        sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader(), fields[i].getField());
+        sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField());
       }
 
       // Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
@@ -444,9 +443,9 @@
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
-      groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
+      groupIndex = DocValues.getSorted(context.reader(), groupField);
       for (int i = 0; i < fields.length; i++) {
-        sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader(), fields[i].getField());
+        sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField());
       }
 
       // Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
@@ -587,7 +586,7 @@
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
       this.readerContext = context;
-      groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
+      groupIndex = DocValues.getSorted(context.reader(), groupField);
 
       // Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
       ordSet.clear();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
index 0ff1e57..4eeb78f 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
@@ -18,9 +18,9 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SentinelIntSet;
@@ -105,7 +105,7 @@
 
   @Override
   protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-    index = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
+    index = DocValues.getSorted(context.reader(), groupField);
 
     // Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
     ordSet.clear();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
index c718dc2..2fa8b60 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
@@ -18,9 +18,9 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.BytesRef;
@@ -109,8 +109,8 @@
 
   @Override
   protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-    groupFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
-    countFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), countField);
+    groupFieldTermIndex = DocValues.getSorted(context.reader(), groupField);
+    countFieldTermIndex = DocValues.getSorted(context.reader(), countField);
     ordSet.clear();
     for (GroupCount group : groups) {
       int groupOrd = group.groupValue == null ? -1 : groupFieldTermIndex.lookupTerm(group.groupValue);
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
index 6c708a9..9e5efa37 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
@@ -20,9 +20,9 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
 import org.apache.lucene.util.BytesRef;
@@ -46,7 +46,7 @@
    *
    *  @param groupField The field used to group
    *    documents. This field must be single-valued and
-   *    indexed (FieldCache is used to access its value
+   *    indexed (DocValues is used to access its value
    *    per-document).
    *  @param groupSort The {@link Sort} used to sort the
    *    groups.  The top sorted document within each group
@@ -88,6 +88,6 @@
   @Override
   protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
     super.doSetNextReader(readerContext);
-    index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
+    index = DocValues.getSorted(readerContext.reader(), groupField);
   }
 }
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
index 5cff4b0..92e2924 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
@@ -18,11 +18,11 @@
  */
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.grouping.AbstractGroupFacetCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SentinelIntSet;
@@ -34,7 +34,7 @@
 
 /**
  * An implementation of {@link AbstractGroupFacetCollector} that computes grouped facets based on the indexed terms
- * from the {@link FieldCache}.
+ * from DocValues.
  *
  * @lucene.experimental
  */
@@ -128,8 +128,8 @@
         segmentResults.add(createSegmentResult());
       }
 
-      groupFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
-      facetFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), facetField);
+      groupFieldTermsIndex = DocValues.getSorted(context.reader(), groupField);
+      facetFieldTermsIndex = DocValues.getSorted(context.reader(), facetField);
 
       // 1+ to allow for the -1 "not set":
       segmentFacetCounts = new int[facetFieldTermsIndex.getValueCount()+1];
@@ -283,8 +283,8 @@
         segmentResults.add(createSegmentResult());
       }
 
-      groupFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
-      facetFieldDocTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), facetField);
+      groupFieldTermsIndex = DocValues.getSorted(context.reader(), groupField);
+      facetFieldDocTermOrds = DocValues.getSortedSet(context.reader(), facetField);
       facetFieldNumTerms = (int) facetFieldDocTermOrds.getValueCount();
       if (facetFieldNumTerms == 0) {
         facetOrdTermsEnum = null;
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
index 624b0f7..c920347 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
@@ -21,9 +21,9 @@
 import java.util.Collection;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
@@ -56,7 +56,7 @@
   @Override
   protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
     super.doSetNextReader(readerContext);
-    index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
+    index = DocValues.getSorted(readerContext.reader(), groupField);
 
     // Rebuild ordSet
     ordSet.clear();
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package.html b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package.html
index 6a1c9f5..29b44c5 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package.html
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package.html
@@ -16,6 +16,6 @@
 -->
 <html>
 <body>
-Support for grouping by indexed terms via {@link org.apache.lucene.search.FieldCache}.
+Support for grouping by indexed terms via {@link org.apache.lucene.index.DocValues}.
 </body>
 </html>
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
index 7f427f1..8e5e1f2 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
@@ -17,23 +17,35 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryUtils;
 import org.apache.lucene.search.ScoreDoc;
@@ -48,22 +60,8 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
 public class AllGroupHeadsCollectorTest extends LuceneTestCase {
 
-  private static final DocValuesType[] vts = new DocValuesType[]{
-      DocValuesType.BINARY, DocValuesType.SORTED
-  };
-
   public void testBasic() throws Exception {
     final String groupField = "author";
     Directory dir = newDirectory();
@@ -72,30 +70,30 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT,
             new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    DocValuesType valueType = vts[random().nextInt(vts.length)];
+    DocValuesType valueType = DocValuesType.SORTED;
 
     // 0
     Document doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "random text", Field.Store.NO));
-    doc.add(new IntField("id_1", 1, Field.Store.NO));
-    doc.add(newStringField("id_2", "1", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 1));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("1")));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "some more random text blob", Field.Store.NO));
-    doc.add(new IntField("id_1", 2, Field.Store.NO));
-    doc.add(newStringField("id_2", "2", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 2));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("2")));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "some more random textual data", Field.Store.NO));
-    doc.add(new IntField("id_1", 3, Field.Store.NO));
-    doc.add(newStringField("id_2", "3", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 3));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("3")));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
@@ -103,38 +101,38 @@
     doc = new Document();
     addGroupField(doc, groupField, "author2", valueType);
     doc.add(newTextField("content", "some random text", Field.Store.NO));
-    doc.add(new IntField("id_1", 4, Field.Store.NO));
-    doc.add(newStringField("id_2", "4", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 4));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("4")));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
     addGroupField(doc, groupField, "author3", valueType);
     doc.add(newTextField("content", "some more random text", Field.Store.NO));
-    doc.add(new IntField("id_1", 5, Field.Store.NO));
-    doc.add(newStringField("id_2", "5", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 5));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("5")));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
     addGroupField(doc, groupField, "author3", valueType);
     doc.add(newTextField("content", "random blob", Field.Store.NO));
-    doc.add(new IntField("id_1", 6, Field.Store.NO));
-    doc.add(newStringField("id_2", "6", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 6));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("6")));
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = new Document();
     doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(new IntField("id_1", 6, Field.Store.NO));
-    doc.add(newStringField("id_2", "6", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 6));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("6")));
     w.addDocument(doc);
 
     // 7 -- no author field
     doc = new Document();
     doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(new IntField("id_1", 7, Field.Store.NO));
-    doc.add(newStringField("id_2", "7", Field.Store.NO));
+    doc.add(new NumericDocValuesField("id_1", 7));
+    doc.add(new SortedDocValuesField("id_2", new BytesRef("7")));
     w.addDocument(doc);
 
     IndexReader reader = w.getReader();
@@ -198,6 +196,7 @@
           // B/c of DV based impl we can't see the difference between an empty string and a null value.
           // For that reason we don't generate empty string groups.
           randomValue = TestUtil.randomRealisticUnicodeString(random());
+          //randomValue = TestUtil.randomSimpleString(random());
         } while ("".equals(randomValue));
         groups.add(new BytesRef(randomValue));
       }
@@ -224,31 +223,20 @@
           dir,
           newIndexWriterConfig(TEST_VERSION_CURRENT,
               new MockAnalyzer(random())));
-      DocValuesType valueType = vts[random().nextInt(vts.length)];
+      DocValuesType valueType = DocValuesType.SORTED;
 
       Document doc = new Document();
       Document docNoGroup = new Document();
-      Field group = newStringField("group", "", Field.Store.NO);
-      doc.add(group);
       Field valuesField = null;
-      switch(valueType) {
-        case BINARY:
-          valuesField = new BinaryDocValuesField("group_dv", new BytesRef());
-          break;
-        case SORTED:
-          valuesField = new SortedDocValuesField("group_dv", new BytesRef());
-          break;
-        default:
-          fail("unhandled type");
-      }
+      valuesField = new SortedDocValuesField("group", new BytesRef());
       doc.add(valuesField);
-      Field sort1 = newStringField("sort1", "", Field.Store.NO);
+      Field sort1 = new SortedDocValuesField("sort1", new BytesRef());
       doc.add(sort1);
       docNoGroup.add(sort1);
-      Field sort2 = newStringField("sort2", "", Field.Store.NO);
+      Field sort2 = new SortedDocValuesField("sort2", new BytesRef());
       doc.add(sort2);
       docNoGroup.add(sort2);
-      Field sort3 = newStringField("sort3", "", Field.Store.NO);
+      Field sort3 = new SortedDocValuesField("sort3", new BytesRef());
       doc.add(sort3);
       docNoGroup.add(sort3);
       Field content = newTextField("content", "", Field.Store.NO);
@@ -257,6 +245,9 @@
       IntField id = new IntField("id", 0, Field.Store.NO);
       doc.add(id);
       docNoGroup.add(id);
+      NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
+      doc.add(idDV);
+      docNoGroup.add(idDV);
       final GroupDoc[] groupDocs = new GroupDoc[numDocs];
       for (int i = 0; i < numDocs; i++) {
         final BytesRef groupValue;
@@ -283,14 +274,14 @@
 
         groupDocs[i] = groupDoc;
         if (groupDoc.group != null) {
-          group.setStringValue(groupDoc.group.utf8ToString());
           valuesField.setBytesValue(new BytesRef(groupDoc.group.utf8ToString()));
         }
-        sort1.setStringValue(groupDoc.sort1.utf8ToString());
-        sort2.setStringValue(groupDoc.sort2.utf8ToString());
-        sort3.setStringValue(groupDoc.sort3.utf8ToString());
+        sort1.setBytesValue(groupDoc.sort1);
+        sort2.setBytesValue(groupDoc.sort2);
+        sort3.setBytesValue(groupDoc.sort3);
         content.setStringValue(groupDoc.content);
         id.setIntValue(groupDoc.id);
+        idDV.setLongValue(groupDoc.id);
         if (groupDoc.group == null) {
           w.addDocument(docNoGroup);
         } else {
@@ -301,91 +292,86 @@
       final DirectoryReader r = w.getReader();
       w.shutdown();
 
-      // NOTE: intentional but temporary field cache insanity!
-      final FieldCache.Ints docIdToFieldId = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
+      final NumericDocValues docIdToFieldId = MultiDocValues.getNumericValues(r, "id");
       final int[] fieldIdToDocID = new int[numDocs];
       for (int i = 0; i < numDocs; i++) {
-        int fieldId = docIdToFieldId.get(i);
+        int fieldId = (int) docIdToFieldId.get(i);
         fieldIdToDocID[fieldId] = i;
       }
 
-      try {
-        final IndexSearcher s = newSearcher(r);
-
-        for (int contentID = 0; contentID < 3; contentID++) {
-          final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs;
-          for (ScoreDoc hit : hits) {
-            final GroupDoc gd = groupDocs[docIdToFieldId.get(hit.doc)];
-            assertTrue(gd.score == 0.0);
-            gd.score = hit.score;
-            int docId = gd.id;
-            assertEquals(docId, docIdToFieldId.get(hit.doc));
-          }
+      final IndexSearcher s = newSearcher(r);
+      
+      for (int contentID = 0; contentID < 3; contentID++) {
+        final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs;
+        for (ScoreDoc hit : hits) {
+          final GroupDoc gd = groupDocs[(int) docIdToFieldId.get(hit.doc)];
+          assertTrue(gd.score == 0.0);
+          gd.score = hit.score;
+          int docId = gd.id;
+          assertEquals(docId, docIdToFieldId.get(hit.doc));
         }
-
-        for (GroupDoc gd : groupDocs) {
-          assertTrue(gd.score != 0.0);
-        }
-
-        for (int searchIter = 0; searchIter < 100; searchIter++) {
-
-          if (VERBOSE) {
-            System.out.println("TEST: searchIter=" + searchIter);
-          }
-
-          final String searchTerm = "real" + random().nextInt(3);
-          boolean sortByScoreOnly = random().nextBoolean();
-          Sort sortWithinGroup = getRandomSort(sortByScoreOnly);
-          AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup);
-          s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector);
-          int[] expectedGroupHeads = createExpectedGroupHeads(searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID);
-          int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads();
-          // The actual group heads contains Lucene ids. Need to change them into our id value.
-          for (int i = 0; i < actualGroupHeads.length; i++) {
-            actualGroupHeads[i] = docIdToFieldId.get(actualGroupHeads[i]);
-          }
-          // Allows us the easily iterate and assert the actual and expected results.
-          Arrays.sort(expectedGroupHeads);
-          Arrays.sort(actualGroupHeads);
-
-          if (VERBOSE) {
-            System.out.println("Collector: " + allGroupHeadsCollector.getClass().getSimpleName());
-            System.out.println("Sort within group: " + sortWithinGroup);
-            System.out.println("Num group: " + numGroups);
-            System.out.println("Num doc: " + numDocs);
-            System.out.println("\n=== Expected: \n");
-            for (int expectedDocId : expectedGroupHeads) {
-              GroupDoc expectedGroupDoc = groupDocs[expectedDocId];
-              String expectedGroup = expectedGroupDoc.group == null ? null : expectedGroupDoc.group.utf8ToString();
-              System.out.println(
-                  String.format(Locale.ROOT,
-                      "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d",
-                      expectedGroup, expectedGroupDoc.score, expectedGroupDoc.sort1.utf8ToString(),
-                      expectedGroupDoc.sort2.utf8ToString(), expectedGroupDoc.sort3.utf8ToString(), expectedDocId
-                  )
-              );
-            }
-            System.out.println("\n=== Actual: \n");
-            for (int actualDocId : actualGroupHeads) {
-              GroupDoc actualGroupDoc = groupDocs[actualDocId];
-              String actualGroup = actualGroupDoc.group == null ? null : actualGroupDoc.group.utf8ToString();
-              System.out.println(
-                  String.format(Locale.ROOT,
-                      "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d",
-                      actualGroup, actualGroupDoc.score, actualGroupDoc.sort1.utf8ToString(),
-                      actualGroupDoc.sort2.utf8ToString(), actualGroupDoc.sort3.utf8ToString(), actualDocId
-                  )
-              );
-            }
-            System.out.println("\n===================================================================================");
-          }
-
-          assertArrayEquals(expectedGroupHeads, actualGroupHeads);
-        }
-      } finally {
-        QueryUtils.purgeFieldCache(r);
       }
-
+      
+      for (GroupDoc gd : groupDocs) {
+        assertTrue(gd.score != 0.0);
+      }
+      
+      for (int searchIter = 0; searchIter < 100; searchIter++) {
+        
+        if (VERBOSE) {
+          System.out.println("TEST: searchIter=" + searchIter);
+        }
+        
+        final String searchTerm = "real" + random().nextInt(3);
+        boolean sortByScoreOnly = random().nextBoolean();
+        Sort sortWithinGroup = getRandomSort(sortByScoreOnly);
+        AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup);
+        s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector);
+        int[] expectedGroupHeads = createExpectedGroupHeads(searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID);
+        int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads();
+        // The actual group heads contains Lucene ids. Need to change them into our id value.
+        for (int i = 0; i < actualGroupHeads.length; i++) {
+          actualGroupHeads[i] = (int) docIdToFieldId.get(actualGroupHeads[i]);
+        }
+        // Allows us the easily iterate and assert the actual and expected results.
+        Arrays.sort(expectedGroupHeads);
+        Arrays.sort(actualGroupHeads);
+        
+        if (VERBOSE) {
+          System.out.println("Collector: " + allGroupHeadsCollector.getClass().getSimpleName());
+          System.out.println("Sort within group: " + sortWithinGroup);
+          System.out.println("Num group: " + numGroups);
+          System.out.println("Num doc: " + numDocs);
+          System.out.println("\n=== Expected: \n");
+          for (int expectedDocId : expectedGroupHeads) {
+            GroupDoc expectedGroupDoc = groupDocs[expectedDocId];
+            String expectedGroup = expectedGroupDoc.group == null ? null : expectedGroupDoc.group.utf8ToString();
+            System.out.println(
+                String.format(Locale.ROOT,
+                    "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d",
+                    expectedGroup, expectedGroupDoc.score, expectedGroupDoc.sort1.utf8ToString(),
+                    expectedGroupDoc.sort2.utf8ToString(), expectedGroupDoc.sort3.utf8ToString(), expectedDocId
+                    )
+                );
+          }
+          System.out.println("\n=== Actual: \n");
+          for (int actualDocId : actualGroupHeads) {
+            GroupDoc actualGroupDoc = groupDocs[actualDocId];
+            String actualGroup = actualGroupDoc.group == null ? null : actualGroupDoc.group.utf8ToString();
+            System.out.println(
+                String.format(Locale.ROOT,
+                    "Group:%10s score%5f Sort1:%10s Sort2:%10s Sort3:%10s doc:%5d",
+                    actualGroup, actualGroupDoc.score, actualGroupDoc.sort1.utf8ToString(),
+                    actualGroupDoc.sort2.utf8ToString(), actualGroupDoc.sort3.utf8ToString(), actualDocId
+                    )
+                );
+          }
+          System.out.println("\n===================================================================================");
+        }
+        
+        assertArrayEquals(expectedGroupHeads, actualGroupHeads);
+      }
+      
       r.close();
       dir.close();
     }
@@ -542,14 +528,13 @@
   }
 
   private void addGroupField(Document doc, String groupField, String value, DocValuesType valueType) {
-    doc.add(new TextField(groupField, value, Field.Store.NO));
     Field valuesField = null;
     switch(valueType) {
       case BINARY:
-        valuesField = new BinaryDocValuesField(groupField + "_dv", new BytesRef(value));
+        valuesField = new BinaryDocValuesField(groupField, new BytesRef(value));
         break;
       case SORTED:
-        valuesField = new SortedDocValuesField(groupField + "_dv", new BytesRef(value));
+        valuesField = new SortedDocValuesField(groupField, new BytesRef(value));
         break;
       default:
         fail("unhandled type");
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
index 3871ef5..9a48452 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
@@ -17,34 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.FieldInfo.DocValuesType;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.grouping.function.FunctionDistinctValuesCollector;
-import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
-import org.apache.lucene.search.grouping.term.TermDistinctValuesCollector;
-import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.mutable.MutableValue;
-import org.apache.lucene.util.mutable.MutableValueStr;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -59,74 +31,92 @@
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.grouping.function.FunctionDistinctValuesCollector;
+import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.term.TermDistinctValuesCollector;
+import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.mutable.MutableValue;
+import org.apache.lucene.util.mutable.MutableValueStr;
+
+
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // we need missing support... i think?
 public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
 
   private final static NullComparator nullComparator = new NullComparator();
   
   private final String groupField = "author";
-  private final String dvGroupField = "author_dv";
   private final String countField = "publisher";
-  private final String dvCountField = "publisher_dv";
 
   public void testSimple() throws Exception {
     Random random = random();
-    DocValuesType[] dvTypes = new DocValuesType[]{
-        DocValuesType.NUMERIC,
-        DocValuesType.BINARY,
-        DocValuesType.SORTED,
-    };
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
         random,
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT,
             new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
-    boolean canUseDV = true;
-    DocValuesType dvType = canUseDV ? dvTypes[random.nextInt(dvTypes.length)] : null;
-
     Document doc = new Document();
-    addField(doc, groupField, "1", dvType);
-    addField(doc, countField, "1", dvType);
+    addField(doc, groupField, "1");
+    addField(doc, countField, "1");
     doc.add(new TextField("content", "random text", Field.Store.NO));
     doc.add(new StringField("id", "1", Field.Store.NO));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    addField(doc, groupField, "1", dvType);
-    addField(doc, countField, "1", dvType);
+    addField(doc, groupField, "1");
+    addField(doc, countField, "1");
     doc.add(new TextField("content", "some more random text blob", Field.Store.NO));
     doc.add(new StringField("id", "2", Field.Store.NO));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    addField(doc, groupField, "1", dvType);
-    addField(doc, countField, "2", dvType);
+    addField(doc, groupField, "1");
+    addField(doc, countField, "2");
     doc.add(new TextField("content", "some more random textual data", Field.Store.NO));
     doc.add(new StringField("id", "3", Field.Store.NO));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
-    // 3
+    // 3 -- no count field
     doc = new Document();
-    addField(doc, groupField, "2", dvType);
+    addField(doc, groupField, "2");
     doc.add(new TextField("content", "some random text", Field.Store.NO));
     doc.add(new StringField("id", "4", Field.Store.NO));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    addField(doc, groupField, "3", dvType);
-    addField(doc, countField, "1", dvType);
+    addField(doc, groupField, "3");
+    addField(doc, countField, "1");
     doc.add(new TextField("content", "some more random text", Field.Store.NO));
     doc.add(new StringField("id", "5", Field.Store.NO));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    addField(doc, groupField, "3", dvType);
-    addField(doc, countField, "1", dvType);
+    addField(doc, groupField, "3");
+    addField(doc, countField, "1");
     doc.add(new TextField("content", "random blob", Field.Store.NO));
     doc.add(new StringField("id", "6", Field.Store.NO));
     w.addDocument(doc);
@@ -134,7 +124,7 @@
     // 6 -- no author field
     doc = new Document();
     doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
-    addField(doc, countField, "1", dvType);
+    addField(doc, countField, "1");
     doc.add(new StringField("id", "6", Field.Store.NO));
     w.addDocument(doc);
 
@@ -160,13 +150,13 @@
     };
 
     // === Search for content:random
-    AbstractFirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(dvType, new Sort(), groupField, 10);
+    AbstractFirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
     indexSearcher.search(new TermQuery(new Term("content", "random")), firstCollector);
     AbstractDistinctValuesCollector<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> distinctValuesCollector
-        = createDistinctCountCollector(firstCollector, groupField, countField, dvType);
+        = createDistinctCountCollector(firstCollector, groupField, countField);
     indexSearcher.search(new TermQuery(new Term("content", "random")), distinctValuesCollector);
 
-    List<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> gcs =  distinctValuesCollector.getGroups();
+    List<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> gcs = distinctValuesCollector.getGroups();
     Collections.sort(gcs, cmp);
     assertEquals(4, gcs.size());
 
@@ -193,9 +183,9 @@
     compare("1", countValues.get(0));
 
     // === Search for content:some
-    firstCollector = createRandomFirstPassCollector(dvType, new Sort(), groupField, 10);
+    firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
     indexSearcher.search(new TermQuery(new Term("content", "some")), firstCollector);
-    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField, dvType);
+    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField);
     indexSearcher.search(new TermQuery(new Term("content", "some")), distinctValuesCollector);
 
     gcs = distinctValuesCollector.getGroups();
@@ -220,9 +210,9 @@
     compare("1", countValues.get(0));
 
      // === Search for content:blob
-    firstCollector = createRandomFirstPassCollector(dvType, new Sort(), groupField, 10);
+    firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
     indexSearcher.search(new TermQuery(new Term("content", "blob")), firstCollector);
-    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField, dvType);
+    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField);
     indexSearcher.search(new TermQuery(new Term("content", "blob")), distinctValuesCollector);
 
     gcs = distinctValuesCollector.getGroups();
@@ -251,18 +241,16 @@
       IndexContext context = createIndexContext();
       for (int searchIter = 0; searchIter < 100; searchIter++) {
         final IndexSearcher searcher = newSearcher(context.indexReader);
-        boolean useDv = context.dvType != null && random.nextBoolean();
-        DocValuesType dvType = useDv ? context.dvType : null;
         String term = context.contentStrings[random.nextInt(context.contentStrings.length)];
         Sort groupSort = new Sort(new SortField("id", SortField.Type.STRING));
         int topN = 1 + random.nextInt(10);
 
         List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> expectedResult = createExpectedResult(context, term, groupSort, topN);
 
-        AbstractFirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(dvType, groupSort, groupField, topN);
+        AbstractFirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(groupSort, groupField, topN);
         searcher.search(new TermQuery(new Term("content", term)), firstCollector);
         AbstractDistinctValuesCollector<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> distinctValuesCollector
-            = createDistinctCountCollector(firstCollector, groupField, countField, dvType);
+            = createDistinctCountCollector(firstCollector, groupField, countField);
         searcher.search(new TermQuery(new Term("content", term)), distinctValuesCollector);
         @SuppressWarnings("unchecked")
         List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> actualResult = (List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>>) distinctValuesCollector.getGroups();
@@ -273,7 +261,6 @@
           System.out.println("1st pass collector class name=" + firstCollector.getClass().getName());
           System.out.println("2nd pass collector class name=" + distinctValuesCollector.getClass().getName());
           System.out.println("Search term=" + term);
-          System.out.println("DVType=" + dvType);
           System.out.println("1st pass groups=" + firstCollector.getTopGroups(0, false));
           System.out.println("Expected:");      
           printGroups(expectedResult);
@@ -363,33 +350,14 @@
     }
   }
 
-  private void addField(Document doc, String field, String value, DocValuesType type) {
-    doc.add(new StringField(field, value, Field.Store.YES));
-    if (type == null) {
-      return;
-    }
-    String dvField = field + "_dv";
-
-    Field valuesField = null;
-    switch (type) {
-      case NUMERIC:
-        valuesField = new NumericDocValuesField(dvField, Integer.parseInt(value));
-        break;
-      case BINARY:
-        valuesField = new BinaryDocValuesField(dvField, new BytesRef(value));
-        break;
-      case SORTED:
-        valuesField = new SortedDocValuesField(dvField, new BytesRef(value));
-        break;
-    }
-    doc.add(valuesField);
+  private void addField(Document doc, String field, String value) {
+    doc.add(new SortedDocValuesField(field, new BytesRef(value)));
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
   private <T extends Comparable> AbstractDistinctValuesCollector<AbstractDistinctValuesCollector.GroupCount<T>> createDistinctCountCollector(AbstractFirstPassGroupingCollector<T> firstPassGroupingCollector,
                                                                       String groupField,
-                                                                      String countField,
-                                                                      DocValuesType dvType) {
+                                                                      String countField) {
     Random random = random();
     Collection<SearchGroup<T>> searchGroups = firstPassGroupingCollector.getTopGroups(0, false);
     if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
@@ -400,20 +368,12 @@
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private <T> AbstractFirstPassGroupingCollector<T> createRandomFirstPassCollector(DocValuesType dvType, Sort groupSort, String groupField, int topNGroups) throws IOException {
+  private <T> AbstractFirstPassGroupingCollector<T> createRandomFirstPassCollector(Sort groupSort, String groupField, int topNGroups) throws IOException {
     Random random = random();
-    if (dvType != null) {
-      if (random.nextBoolean()) {
-        return (AbstractFirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups);
-      } else {
-        return (AbstractFirstPassGroupingCollector<T>) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups);
-      }
+    if (random.nextBoolean()) {
+      return (AbstractFirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups);
     } else {
-      if (random.nextBoolean()) {
-        return (AbstractFirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups);
-      } else {
-        return (AbstractFirstPassGroupingCollector<T>) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups);
-      }
+      return (AbstractFirstPassGroupingCollector<T>) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups);
     }
   }
 
@@ -444,10 +404,6 @@
 
   private IndexContext createIndexContext() throws Exception {
     Random random = random();
-    DocValuesType[] dvTypes = new DocValuesType[]{
-        DocValuesType.BINARY,
-        DocValuesType.SORTED
-    };
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
@@ -457,9 +413,6 @@
         new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())
       );
 
-    boolean canUseDV = true;
-    DocValuesType dvType = canUseDV ? dvTypes[random.nextInt(dvTypes.length)] : null;
-
     int numDocs = 86 + random.nextInt(1087) * RANDOM_MULTIPLIER;
     String[] groupValues = new String[numDocs / 5];
     String[] countValues = new String[numDocs / 10];
@@ -492,10 +445,10 @@
       Document doc = new Document();
       doc.add(new StringField("id", String.format(Locale.ROOT, "%09d", i), Field.Store.YES));
       if (groupValue != null) {
-        addField(doc, groupField, groupValue, dvType);
+        addField(doc, groupField, groupValue);
       }
       if (countValue != null) {
-        addField(doc, countField, countValue, dvType);
+        addField(doc, countField, countValue);
       }
       doc.add(new TextField("content", content, Field.Store.YES));
       w.addDocument(doc);
@@ -510,22 +463,20 @@
     }
 
     w.shutdown();
-    return new IndexContext(dir, reader, dvType, searchTermToGroupCounts, contentStrings.toArray(new String[contentStrings.size()]));
+    return new IndexContext(dir, reader, searchTermToGroupCounts, contentStrings.toArray(new String[contentStrings.size()]));
   }
 
   private static class IndexContext {
 
     final Directory directory;
     final DirectoryReader indexReader;
-    final DocValuesType dvType;
     final Map<String, Map<String, Set<String>>> searchTermToGroupCounts;
     final String[] contentStrings;
 
-    IndexContext(Directory directory, DirectoryReader indexReader, DocValuesType dvType,
+    IndexContext(Directory directory, DirectoryReader indexReader, 
                  Map<String, Map<String, Set<String>>> searchTermToGroupCounts, String[] contentStrings) {
       this.directory = directory;
       this.indexReader = indexReader;
-      this.dvType = dvType;
       this.searchTermToGroupCounts = searchTermToGroupCounts;
       this.contentStrings = contentStrings;
     }
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
index 470a0a2..0594956 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
@@ -17,24 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.NoMergePolicy;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.grouping.term.TermGroupFacetCollector;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.TestUtil;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -49,6 +31,29 @@
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.grouping.term.TermGroupFacetCollector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TestUtil;
+
+
+// Need SSDV
+@SuppressCodecs({"Lucene40", "Lucene41"})
 public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
 
   public void testSimple() throws Exception {
@@ -62,7 +67,7 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT,
             new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    boolean useDv = random().nextBoolean();
+    boolean useDv = true;
 
     // 0
     Document doc = new Document();
@@ -287,7 +292,7 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT,
             new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));
-    boolean useDv = false;
+    boolean useDv = true;
 
     // Cannot assert this since we use NoMergePolicy:
     w.setDoRandomForceMergeAssert(false);
@@ -300,7 +305,7 @@
     // 1
     doc = new Document();
     addField(doc, groupField, "a", useDv);
-    doc.add(new StringField("airport", "ams", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
     w.addDocument(doc);
 
     w.commit();
@@ -309,32 +314,32 @@
     // 2
     doc = new Document();
     addField(doc, groupField, "a", useDv);
-    doc.add(new StringField("airport", "ams", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
     w.addDocument(doc);
 
     // 3
     doc = new Document();
     addField(doc, groupField, "a", useDv);
-    doc.add(new StringField("airport", "dus", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("dus")));
 
     w.addDocument(doc);
 
     // 4
     doc = new Document();
     addField(doc, groupField, "b", useDv);
-    doc.add(new StringField("airport", "ams", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
     addField(doc, groupField, "b", useDv);
-    doc.add(new StringField("airport", "ams", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
     w.addDocument(doc);
 
     // 6
     doc = new Document();
     addField(doc, groupField, "b", useDv);
-    doc.add(new StringField("airport", "ams", Field.Store.NO));
+    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
     w.addDocument(doc);
     w.commit();
 
@@ -346,7 +351,7 @@
 
     w.shutdown();
     IndexSearcher indexSearcher = newSearcher(DirectoryReader.open(dir));
-    AbstractGroupFacetCollector groupedAirportFacetCollector = createRandomCollector(groupField, "airport", null, true);
+    AbstractGroupFacetCollector groupedAirportFacetCollector = createRandomCollector(groupField + "_dv", "airport", null, true);
     indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector);
     TermGroupFacetCollector.GroupedFacetResult airportResult = groupedAirportFacetCollector.mergeSegmentResults(10, 0, false);
     assertEquals(3, airportResult.getTotalCount());
@@ -364,10 +369,8 @@
   }
 
   private void addField(Document doc, String field, String value, boolean canUseIDV) {
-    doc.add(new StringField(field, value, Field.Store.NO));
-    if (canUseIDV) {
-      doc.add(new SortedDocValuesField(field + "_dv", new BytesRef(value)));
-    }
+    assert canUseIDV;
+    doc.add(new SortedDocValuesField(field + "_dv", new BytesRef(value)));
   }
 
   public void testRandom() throws Exception {
@@ -386,7 +389,6 @@
         if (VERBOSE) {
           System.out.println("TEST: searchIter=" + searchIter);
         }
-        boolean useDv = !multipleFacetsPerDocument && context.useDV && random.nextBoolean();
         String searchTerm = context.contentStrings[random.nextInt(context.contentStrings.length)];
         int limit = random.nextInt(context.facetValues.size());
         int offset = random.nextInt(context.facetValues.size() - limit);
@@ -409,7 +411,7 @@
         }
 
         GroupedFacetResult expectedFacetResult = createExpectedFacetResult(searchTerm, context, offset, limit, minCount, orderByCount, facetPrefix);
-        AbstractGroupFacetCollector groupFacetCollector = createRandomCollector(useDv ? "group_dv" : "group", useDv ? "facet_dv" : "facet", facetPrefix, multipleFacetsPerDocument);
+        AbstractGroupFacetCollector groupFacetCollector = createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument);
         searcher.search(new TermQuery(new Term("content", searchTerm)), groupFacetCollector);
         TermGroupFacetCollector.GroupedFacetResult actualFacetResult = groupFacetCollector.mergeSegmentResults(size, minCount, orderByCount);
 
@@ -417,7 +419,6 @@
         List<TermGroupFacetCollector.FacetEntry> actualFacetEntries = actualFacetResult.getFacetEntries(offset, limit);
 
         if (VERBOSE) {
-          System.out.println("Use DV: " + useDv);
           System.out.println("Collector: " + groupFacetCollector.getClass().getSimpleName());
           System.out.println("Num group: " + context.numGroups);
           System.out.println("Num doc: " + context.numDocs);
@@ -514,35 +515,29 @@
             new MockAnalyzer(random)
         )
     );
-    boolean canUseDV = true;
-    boolean useDv = canUseDV && !multipleFacetValuesPerDocument && random.nextBoolean();
-
     Document doc = new Document();
     Document docNoGroup = new Document();
     Document docNoFacet = new Document();
     Document docNoGroupNoFacet = new Document();
     Field group = newStringField("group", "", Field.Store.NO);
-    Field groupDc = new SortedDocValuesField("group_dv", new BytesRef());
-    if (useDv) {
-      doc.add(groupDc);
-      docNoFacet.add(groupDc);
-    }
+    Field groupDc = new SortedDocValuesField("group", new BytesRef());
+    doc.add(groupDc);
+    docNoFacet.add(groupDc);
     doc.add(group);
     docNoFacet.add(group);
     Field[] facetFields;
-    if (useDv) {
-      assert !multipleFacetValuesPerDocument;
+    if (multipleFacetValuesPerDocument == false) {
       facetFields = new Field[2];
       facetFields[0] = newStringField("facet", "", Field.Store.NO);
       doc.add(facetFields[0]);
       docNoGroup.add(facetFields[0]);
-      facetFields[1] = new SortedDocValuesField("facet_dv", new BytesRef());
+      facetFields[1] = new SortedDocValuesField("facet", new BytesRef());
       doc.add(facetFields[1]);
       docNoGroup.add(facetFields[1]);
     } else {
       facetFields = multipleFacetValuesPerDocument ? new Field[2 + random.nextInt(6)] : new Field[1];
       for (int i = 0; i < facetFields.length; i++) {
-        facetFields[i] = newStringField("facet", "", Field.Store.NO);
+        facetFields[i] = new SortedSetDocValuesField("facet", new BytesRef());
         doc.add(facetFields[i]);
         docNoGroup.add(facetFields[i]);
       }
@@ -576,11 +571,7 @@
       if (random.nextInt(24) == 17) {
         // So we test the "doc doesn't have the group'd
         // field" case:
-        if (useDv) {
-          groupValue = "";
-        } else {
-          groupValue = null;
-        }
+        groupValue = "";
       } else {
         groupValue = groups.get(random.nextInt(groups.size()));
       }
@@ -592,8 +583,22 @@
       Map<String, Set<String>> facetToGroups = searchTermToFacetToGroups.get(contentStr);
 
       List<String> facetVals = new ArrayList<>();
-      if (useDv || random.nextInt(24) != 18) {
-        if (useDv) {
+      if (multipleFacetValuesPerDocument == false) {
+        String facetValue = facetValues.get(random.nextInt(facetValues.size()));
+        uniqueFacetValues.add(facetValue);
+        if (!facetToGroups.containsKey(facetValue)) {
+          facetToGroups.put(facetValue, new HashSet<String>());
+        }
+        Set<String> groupsInFacet = facetToGroups.get(facetValue);
+        groupsInFacet.add(groupValue);
+        if (groupsInFacet.size() > facetWithMostGroups) {
+          facetWithMostGroups = groupsInFacet.size();
+        }
+        facetFields[0].setStringValue(facetValue);
+        facetFields[1].setBytesValue(new BytesRef(facetValue));
+        facetVals.add(facetValue);
+      } else {
+        for (Field facetField : facetFields) {
           String facetValue = facetValues.get(random.nextInt(facetValues.size()));
           uniqueFacetValues.add(facetValue);
           if (!facetToGroups.containsKey(facetValue)) {
@@ -604,34 +609,8 @@
           if (groupsInFacet.size() > facetWithMostGroups) {
             facetWithMostGroups = groupsInFacet.size();
           }
-          facetFields[0].setStringValue(facetValue);
-          facetFields[1].setBytesValue(new BytesRef(facetValue));
+          facetField.setBytesValue(new BytesRef(facetValue));
           facetVals.add(facetValue);
-        } else {
-          for (Field facetField : facetFields) {
-            String facetValue = facetValues.get(random.nextInt(facetValues.size()));
-            uniqueFacetValues.add(facetValue);
-            if (!facetToGroups.containsKey(facetValue)) {
-              facetToGroups.put(facetValue, new HashSet<String>());
-            }
-            Set<String> groupsInFacet = facetToGroups.get(facetValue);
-            groupsInFacet.add(groupValue);
-            if (groupsInFacet.size() > facetWithMostGroups) {
-              facetWithMostGroups = groupsInFacet.size();
-            }
-            facetField.setStringValue(facetValue);
-            facetVals.add(facetValue);
-          }
-        }
-      } else {
-        uniqueFacetValues.add(null);
-        if (!facetToGroups.containsKey(null)) {
-          facetToGroups.put(null, new HashSet<String>());
-        }
-        Set<String> groupsInFacet = facetToGroups.get(null);
-        groupsInFacet.add(groupValue);
-        if (groupsInFacet.size() > facetWithMostGroups) {
-          facetWithMostGroups = groupsInFacet.size();
         }
       }
 
@@ -640,11 +619,10 @@
       }
 
       if (groupValue != null) {
-        if (useDv) {
-          groupDc.setBytesValue(new BytesRef(groupValue));
-        }
+        groupDc.setBytesValue(new BytesRef(groupValue));
         group.setStringValue(groupValue);
-      } else if (useDv) {
+      } else {
+        // TODO: not true
         // DV cannot have missing values:
         groupDc.setBytesValue(new BytesRef());
       }
@@ -663,7 +641,7 @@
     DirectoryReader reader = writer.getReader();
     writer.shutdown();
 
-    return new IndexContext(searchTermToFacetToGroups, reader, numDocs, dir, facetWithMostGroups, numGroups, contentBrs, uniqueFacetValues, useDv);
+    return new IndexContext(searchTermToFacetToGroups, reader, numDocs, dir, facetWithMostGroups, numGroups, contentBrs, uniqueFacetValues);
   }
 
   private GroupedFacetResult createExpectedFacetResult(String searchTerm, IndexContext context, int offset, int limit, int minCount, final boolean orderByCount, String facetPrefix) {
@@ -738,8 +716,6 @@
 
   private AbstractGroupFacetCollector createRandomCollector(String groupField, String facetField, String facetPrefix, boolean multipleFacetsPerDocument) {
     BytesRef facetPrefixBR = facetPrefix == null ? null : new BytesRef(facetPrefix);
-    // DocValues cannot be multi-valued:
-    assert !multipleFacetsPerDocument || !groupField.endsWith("_dv");
     return TermGroupFacetCollector.createTermGroupFacetCollector(groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random().nextInt(1024));
   }
 
@@ -764,10 +740,9 @@
     final int facetWithMostGroups;
     final int numGroups;
     final String[] contentStrings;
-    final boolean useDV;
 
     public IndexContext(Map<String, Map<String, Set<String>>> searchTermToFacetGroups, DirectoryReader r,
-                        int numDocs, Directory dir, int facetWithMostGroups, int numGroups, String[] contentStrings, NavigableSet<String> facetValues, boolean useDV) {
+                        int numDocs, Directory dir, int facetWithMostGroups, int numGroups, String[] contentStrings, NavigableSet<String> facetValues) {
       this.searchTermToFacetGroups = searchTermToFacetGroups;
       this.indexReader = r;
       this.numDocs = numDocs;
@@ -776,7 +751,6 @@
       this.numGroups = numGroups;
       this.contentStrings = contentStrings;
       this.facetValues = facetValues;
-      this.useDV = useDV;
     }
   }
 
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index 528f125..a987ca1 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -21,6 +21,8 @@
 import org.apache.lucene.document.*;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -39,6 +41,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueStr;
@@ -52,6 +55,7 @@
 //   - test ties
 //   - test compound sort
 
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // we need missing support... i think?
 public class TestGrouping extends LuceneTestCase {
 
   public void testBasic() throws Exception {
@@ -120,10 +124,6 @@
 
     final Sort groupSort = Sort.RELEVANCE;
 
-    if (random().nextBoolean()) {
-      groupField += "_dv";
-    }
-
     final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, 10);
     indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
 
@@ -172,8 +172,7 @@
   }
 
   private void addGroupField(Document doc, String groupField, String value) {
-    doc.add(new TextField(groupField, value, Field.Store.YES));
-    doc.add(new SortedDocValuesField(groupField + "_dv", new BytesRef(value)));
+    doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
   }
 
   private AbstractFirstPassGroupingCollector<?> createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException {
@@ -569,10 +568,14 @@
         docs.add(doc);
         if (groupValue.group != null) {
           doc.add(newStringField("group", groupValue.group.utf8ToString(), Field.Store.YES));
+          doc.add(new SortedDocValuesField("group", BytesRef.deepCopyOf(groupValue.group)));
         }
         doc.add(newStringField("sort1", groupValue.sort1.utf8ToString(), Field.Store.NO));
+        doc.add(new SortedDocValuesField("sort1", BytesRef.deepCopyOf(groupValue.sort1)));
         doc.add(newStringField("sort2", groupValue.sort2.utf8ToString(), Field.Store.NO));
+        doc.add(new SortedDocValuesField("sort2", BytesRef.deepCopyOf(groupValue.sort2)));
         doc.add(new IntField("id", groupValue.id, Field.Store.NO));
+        doc.add(new NumericDocValuesField("id", groupValue.id));
         doc.add(newTextField("content", groupValue.content, Field.Store.NO));
         //System.out.println("TEST:     doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
       }
@@ -642,7 +645,7 @@
           // For that reason we don't generate empty string
           // groups.
           randomValue = TestUtil.randomRealisticUnicodeString(random());
-          //randomValue = _TestUtil.randomSimpleString(random());
+          //randomValue = TestUtil.randomSimpleString(random());
         } while ("".equals(randomValue));
 
         groups.add(new BytesRef(randomValue));
@@ -670,22 +673,18 @@
                                                   dir,
                                                   newIndexWriterConfig(TEST_VERSION_CURRENT,
                                                                        new MockAnalyzer(random())));
-      boolean canUseIDV = true;
-
       Document doc = new Document();
       Document docNoGroup = new Document();
-      Field idvGroupField = new SortedDocValuesField("group_dv", new BytesRef());
-      if (canUseIDV) {
-        doc.add(idvGroupField);
-        docNoGroup.add(idvGroupField);
-      }
+      Field idvGroupField = new SortedDocValuesField("group", new BytesRef());
+      doc.add(idvGroupField);
+      docNoGroup.add(idvGroupField);
 
       Field group = newStringField("group", "", Field.Store.NO);
       doc.add(group);
-      Field sort1 = newStringField("sort1", "", Field.Store.NO);
+      Field sort1 = new SortedDocValuesField("sort1", new BytesRef());
       doc.add(sort1);
       docNoGroup.add(sort1);
-      Field sort2 = newStringField("sort2", "", Field.Store.NO);
+      Field sort2 = new SortedDocValuesField("sort2", new BytesRef());
       doc.add(sort2);
       docNoGroup.add(sort2);
       Field content = newTextField("content", "", Field.Store.NO);
@@ -693,7 +692,10 @@
       docNoGroup.add(content);
       IntField id = new IntField("id", 0, Field.Store.NO);
       doc.add(id);
+      NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
+      doc.add(idDV);
       docNoGroup.add(id);
+      docNoGroup.add(idDV);
       final GroupDoc[] groupDocs = new GroupDoc[numDocs];
       for(int i=0;i<numDocs;i++) {
         final BytesRef groupValue;
@@ -716,19 +718,19 @@
         groupDocs[i] = groupDoc;
         if (groupDoc.group != null) {
           group.setStringValue(groupDoc.group.utf8ToString());
-          if (canUseIDV) {
-            idvGroupField.setBytesValue(BytesRef.deepCopyOf(groupDoc.group));
-          }
-        } else if (canUseIDV) {
+          idvGroupField.setBytesValue(BytesRef.deepCopyOf(groupDoc.group));
+        } else {
+          // TODO: not true
           // Must explicitly set empty string, else eg if
           // the segment has all docs missing the field then
           // we get null back instead of empty BytesRef:
           idvGroupField.setBytesValue(new BytesRef());
         }
-        sort1.setStringValue(groupDoc.sort1.utf8ToString());
-        sort2.setStringValue(groupDoc.sort2.utf8ToString());
+        sort1.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort1));
+        sort2.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort2));
         content.setStringValue(groupDoc.content);
         id.setIntValue(groupDoc.id);
+        idDV.setLongValue(groupDoc.id);
         if (groupDoc.group == null) {
           w.addDocument(docNoGroup);
         } else {
@@ -742,405 +744,387 @@
       final DirectoryReader r = w.getReader();
       w.shutdown();
 
-      // NOTE: intentional but temporary field cache insanity!
-      final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
+      final NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
       DirectoryReader rBlocks = null;
       Directory dirBlocks = null;
 
-      try {
-        final IndexSearcher s = newSearcher(r);
-        if (VERBOSE) {
-          System.out.println("\nTEST: searcher=" + s);
-        }
-
-        if (SlowCompositeReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
-          canUseIDV = false;
-        } else {
-          canUseIDV = true;
-        }
-        final ShardState shards = new ShardState(s);
-
-        for(int contentID=0;contentID<3;contentID++) {
-          final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
-          for(ScoreDoc hit : hits) {
-            final GroupDoc gd = groupDocs[docIDToID.get(hit.doc)];
-            assertTrue(gd.score == 0.0);
-            gd.score = hit.score;
-            assertEquals(gd.id, docIDToID.get(hit.doc));
-          }
-        }
-
-        for(GroupDoc gd : groupDocs) {
-          assertTrue(gd.score != 0.0);
-        }
-
-        // Build 2nd index, where docs are added in blocks by
-        // group, so we can use single pass collector
-        dirBlocks = newDirectory();
-        rBlocks = getDocBlockReader(dirBlocks, groupDocs);
-        final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
-        final FieldCache.Ints docIDToIDBlocks = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(rBlocks), "id", false);
-
-        final IndexSearcher sBlocks = newSearcher(rBlocks);
-        final ShardState shardsBlocks = new ShardState(sBlocks);
-
-        // ReaderBlocks only increases maxDoc() vs reader, which
-        // means a monotonic shift in scores, so we can
-        // reliably remap them w/ Map:
-        final Map<String,Map<Float,Float>> scoreMap = new HashMap<>();
-
-        // Tricky: must separately set .score2, because the doc
-        // block index was created with possible deletions!
-        //System.out.println("fixup score2");
-        for(int contentID=0;contentID<3;contentID++) {
-          //System.out.println("  term=real" + contentID);
-          final Map<Float,Float> termScoreMap = new HashMap<>();
-          scoreMap.put("real"+contentID, termScoreMap);
-          //System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) +
-          //" dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID)));
-          final ScoreDoc[] hits = sBlocks.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
-          for(ScoreDoc hit : hits) {
-            final GroupDoc gd = groupDocsByID[docIDToIDBlocks.get(hit.doc)];
-            assertTrue(gd.score2 == 0.0);
-            gd.score2 = hit.score;
-            assertEquals(gd.id, docIDToIDBlocks.get(hit.doc));
-            //System.out.println("    score=" + gd.score + " score2=" + hit.score + " id=" + docIDToIDBlocks.get(hit.doc));
-            termScoreMap.put(gd.score, gd.score2);
-          }
-        }
-
-        for(int searchIter=0;searchIter<100;searchIter++) {
-
-          if (VERBOSE) {
-            System.out.println("\nTEST: searchIter=" + searchIter);
-          }
-
-          final String searchTerm = "real" + random().nextInt(3);
-          final boolean fillFields = random().nextBoolean();
-          boolean getScores = random().nextBoolean();
-          final boolean getMaxScores = random().nextBoolean();
-          final Sort groupSort = getRandomSort();
-          //final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)});
-          // TODO: also test null (= sort by relevance)
-          final Sort docSort = getRandomSort();
-
-          for(SortField sf : docSort.getSort()) {
-            if (sf.getType() == SortField.Type.SCORE) {
-              getScores = true;
-              break;
-            }
-          }
-
-          for(SortField sf : groupSort.getSort()) {
-            if (sf.getType() == SortField.Type.SCORE) {
-              getScores = true;
-              break;
-            }
-          }
-
-          final int topNGroups = TestUtil.nextInt(random(), 1, 30);
-          //final int topNGroups = 10;
-          final int docsPerGroup = TestUtil.nextInt(random(), 1, 50);
-
-          final int groupOffset = TestUtil.nextInt(random(), 0, (topNGroups - 1) / 2);
-          //final int groupOffset = 0;
-
-          final int docOffset = TestUtil.nextInt(random(), 0, docsPerGroup - 1);
-          //final int docOffset = 0;
-
-          final boolean doCache = random().nextBoolean();
-          final boolean doAllGroups = random().nextBoolean();
-          if (VERBOSE) {
-            System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq(new Term("content", searchTerm))  +" dFBlock=" + rBlocks.docFreq(new Term("content", searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
-          }
-
-          String groupField = "group";
-          if (canUseIDV && random().nextBoolean()) {
-            groupField += "_dv";
-          }
-          if (VERBOSE) {
-            System.out.println("  groupField=" + groupField);
-          }
-          final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups);
-          final CachingCollector cCache;
-          final Collector c;
-
-          final AbstractAllGroupsCollector<?> allGroupsCollector;
-          if (doAllGroups) {
-            allGroupsCollector = createAllGroupsCollector(c1, groupField);
-          } else {
-            allGroupsCollector = null;
-          }
-
-          final boolean useWrappingCollector = random().nextBoolean();
-
-          if (doCache) {
-            final double maxCacheMB = random().nextDouble();
-            if (VERBOSE) {
-              System.out.println("TEST: maxCacheMB=" + maxCacheMB);
-            }
-
-            if (useWrappingCollector) {
-              if (doAllGroups) {
-                cCache = CachingCollector.create(c1, true, maxCacheMB);
-                c = MultiCollector.wrap(cCache, allGroupsCollector);
-              } else {
-                c = cCache = CachingCollector.create(c1, true, maxCacheMB);
-              }
-            } else {
-              // Collect only into cache, then replay multiple times:
-              c = cCache = CachingCollector.create(false, true, maxCacheMB);
-            }
-          } else {
-            cCache = null;
-            if (doAllGroups) {
-              c = MultiCollector.wrap(c1, allGroupsCollector);
-            } else {
-              c = c1;
-            }
-          }
-
-          // Search top reader:
-          final Query query = new TermQuery(new Term("content", searchTerm));
-
-          s.search(query, c);
-
-          if (doCache && !useWrappingCollector) {
-            if (cCache.isCached()) {
-              // Replay for first-pass grouping
-              cCache.replay(c1);
-              if (doAllGroups) {
-                // Replay for all groups:
-                cCache.replay(allGroupsCollector);
-              }
-            } else {
-              // Replay by re-running search:
-              s.search(query, c1);
-              if (doAllGroups) {
-                s.search(query, allGroupsCollector);
-              }
-            }
-          }
-
-          // Get 1st pass top groups
-          final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(c1, groupOffset, fillFields);
-          final TopGroups<BytesRef> groupsResult;
-          if (VERBOSE) {
-            System.out.println("TEST: first pass topGroups");
-            if (topGroups == null) {
-              System.out.println("  null");
-            } else {
-              for (SearchGroup<BytesRef> searchGroup : topGroups) {
-                System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue) + ": " + Arrays.deepToString(searchGroup.sortValues));
-              }
-            }
-          }
-
-          // Get 1st pass top groups using shards
-
-          ValueHolder<Boolean> idvBasedImplsUsedSharded = new ValueHolder<>(false);
-          final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
-              groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, canUseIDV, false, idvBasedImplsUsedSharded);
-          final AbstractSecondPassGroupingCollector<?> c2;
-          if (topGroups != null) {
-
-            if (VERBOSE) {
-              System.out.println("TEST: topGroups");
-              for (SearchGroup<BytesRef> searchGroup : topGroups) {
-                System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue.utf8ToString()) + ": " + Arrays.deepToString(searchGroup.sortValues));
-              }
-            }
-
-            c2 = createSecondPassCollector(c1, groupField, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getScores, getMaxScores, fillFields);
-            if (doCache) {
-              if (cCache.isCached()) {
-                if (VERBOSE) {
-                  System.out.println("TEST: cache is intact");
-                }
-                cCache.replay(c2);
-              } else {
-                if (VERBOSE) {
-                  System.out.println("TEST: cache was too large");
-                }
-                s.search(query, c2);
-              }
-            } else {
-              s.search(query, c2);
-            }
-
-            if (doAllGroups) {
-              TopGroups<BytesRef> tempTopGroups = getTopGroups(c2, docOffset);
-              groupsResult = new TopGroups<>(tempTopGroups, allGroupsCollector.getGroupCount());
-            } else {
-              groupsResult = getTopGroups(c2, docOffset);
-            }
-          } else {
-            c2 = null;
-            groupsResult = null;
-            if (VERBOSE) {
-              System.out.println("TEST:   no results");
-            }
-          }
-
-          final TopGroups<BytesRef> expectedGroups = slowGrouping(groupDocs, searchTerm, fillFields, getScores, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset);
-
-          if (VERBOSE) {
-            if (expectedGroups == null) {
-              System.out.println("TEST: no expected groups");
-            } else {
-              System.out.println("TEST: expected groups totalGroupedHitCount=" + expectedGroups.totalGroupedHitCount);
-              for(GroupDocs<BytesRef> gd : expectedGroups.groups) {
-                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits + " scoreDocs.len=" + gd.scoreDocs.length);
-                for(ScoreDoc sd : gd.scoreDocs) {
-                  System.out.println("    id=" + sd.doc + " score=" + sd.score);
-                }
-              }
-            }
-
-            if (groupsResult == null) {
-              System.out.println("TEST: no matched groups");
-            } else {
-              System.out.println("TEST: matched groups totalGroupedHitCount=" + groupsResult.totalGroupedHitCount);
-              for(GroupDocs<BytesRef> gd : groupsResult.groups) {
-                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
-                for(ScoreDoc sd : gd.scoreDocs) {
-                  System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
-                }
-              }
-
-              if (searchIter == 14) {
-                for(int docIDX=0;docIDX<s.getIndexReader().maxDoc();docIDX++) {
-                  System.out.println("ID=" + docIDToID.get(docIDX) + " explain=" + s.explain(query, docIDX));
-                }
-              }
-            }
-
-            if (topGroupsShards == null) {
-              System.out.println("TEST: no matched-merged groups");
-            } else {
-              System.out.println("TEST: matched-merged groups totalGroupedHitCount=" + topGroupsShards.totalGroupedHitCount);
-              for(GroupDocs<BytesRef> gd : topGroupsShards.groups) {
-                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
-                for(ScoreDoc sd : gd.scoreDocs) {
-                  System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
-                }
-              }
-            }
-          }
-
-          assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores, groupField.endsWith("_dv"));
-
-          // Confirm merged shards match:
-          assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores, idvBasedImplsUsedSharded.value);
-          if (topGroupsShards != null) {
-            verifyShards(shards.docStarts, topGroupsShards);
-          }
-
-          final boolean needsScores = getScores || getMaxScores || docSort == null;
-          final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
-          final TermAllGroupsCollector allGroupsCollector2;
-          final Collector c4;
-          if (doAllGroups) {
-            // NOTE: must be "group" and not "group_dv"
-            // (groupField) because we didn't index doc
-            // values in the block index:
-            allGroupsCollector2 = new TermAllGroupsCollector("group");
-            c4 = MultiCollector.wrap(c3, allGroupsCollector2);
-          } else {
-            allGroupsCollector2 = null;
-            c4 = c3;
-          }
-          // Get block grouping result:
-          sBlocks.search(query, c4);
-          @SuppressWarnings({"unchecked","rawtypes"})
-          final TopGroups<BytesRef> tempTopGroupsBlocks = (TopGroups<BytesRef>) c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
-          final TopGroups<BytesRef> groupsResultBlocks;
-          if (doAllGroups && tempTopGroupsBlocks != null) {
-            assertEquals((int) tempTopGroupsBlocks.totalGroupCount, allGroupsCollector2.getGroupCount());
-            groupsResultBlocks = new TopGroups<>(tempTopGroupsBlocks, allGroupsCollector2.getGroupCount());
-          } else {
-            groupsResultBlocks = tempTopGroupsBlocks;
-          }
-
-          if (VERBOSE) {
-            if (groupsResultBlocks == null) {
-              System.out.println("TEST: no block groups");
-            } else {
-              System.out.println("TEST: block groups totalGroupedHitCount=" + groupsResultBlocks.totalGroupedHitCount);
-              boolean first = true;
-              for(GroupDocs<BytesRef> gd : groupsResultBlocks.groups) {
-                System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()) + " totalHits=" + gd.totalHits);
-                for(ScoreDoc sd : gd.scoreDocs) {
-                  System.out.println("    id=" + docIDToIDBlocks.get(sd.doc) + " score=" + sd.score);
-                  if (first) {
-                    System.out.println("explain: " + sBlocks.explain(query, sd.doc));
-                    first = false;
-                  }
-                }
-              }
-            }
-          }
-
-          // Get shard'd block grouping result:
-          // Block index does not index DocValues so we pass
-          // false for canUseIDV:
-          final TopGroups<BytesRef> topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query,
-              groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, false, false, new ValueHolder<>(false));
-
-          if (expectedGroups != null) {
-            // Fixup scores for reader2
-            for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
-              for(ScoreDoc hit : groupDocsHits.scoreDocs) {
-                final GroupDoc gd = groupDocsByID[hit.doc];
-                assertEquals(gd.id, hit.doc);
-                //System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score);
-                hit.score = gd.score2;
-              }
-            }
-
-            final SortField[] sortFields = groupSort.getSort();
-            final Map<Float,Float> termScoreMap = scoreMap.get(searchTerm);
-            for(int groupSortIDX=0;groupSortIDX<sortFields.length;groupSortIDX++) {
-              if (sortFields[groupSortIDX].getType() == SortField.Type.SCORE) {
-                for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
-                  if (groupDocsHits.groupSortValues != null) {
-                    //System.out.println("remap " + groupDocsHits.groupSortValues[groupSortIDX] + " to " + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]));
-                    groupDocsHits.groupSortValues[groupSortIDX] = termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]);
-                    assertNotNull(groupDocsHits.groupSortValues[groupSortIDX]);
-                  }
-                }
-              }
-            }
-
-            final SortField[] docSortFields = docSort.getSort();
-            for(int docSortIDX=0;docSortIDX<docSortFields.length;docSortIDX++) {
-              if (docSortFields[docSortIDX].getType() == SortField.Type.SCORE) {
-                for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
-                  for(ScoreDoc _hit : groupDocsHits.scoreDocs) {
-                    FieldDoc hit = (FieldDoc) _hit;
-                    if (hit.fields != null) {
-                      hit.fields[docSortIDX] = termScoreMap.get(hit.fields[docSortIDX]);
-                      assertNotNull(hit.fields[docSortIDX]);
-                    }
-                  }
-                }
-              }
-            }
-          }
-
-          assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, true, getScores, false);
-          assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false);
-        }
-      } finally {
-        QueryUtils.purgeFieldCache(r);
-        if (rBlocks != null) {
-          QueryUtils.purgeFieldCache(rBlocks);
+      final IndexSearcher s = newSearcher(r);
+      if (VERBOSE) {
+        System.out.println("\nTEST: searcher=" + s);
+      }
+      
+      final ShardState shards = new ShardState(s);
+      
+      for(int contentID=0;contentID<3;contentID++) {
+        final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
+        for(ScoreDoc hit : hits) {
+          final GroupDoc gd = groupDocs[(int) docIDToID.get(hit.doc)];
+          assertTrue(gd.score == 0.0);
+          gd.score = hit.score;
+          assertEquals(gd.id, docIDToID.get(hit.doc));
         }
       }
-
+      
+      for(GroupDoc gd : groupDocs) {
+        assertTrue(gd.score != 0.0);
+      }
+      
+      // Build 2nd index, where docs are added in blocks by
+      // group, so we can use single pass collector
+      dirBlocks = newDirectory();
+      rBlocks = getDocBlockReader(dirBlocks, groupDocs);
+      final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
+      final NumericDocValues docIDToIDBlocks = MultiDocValues.getNumericValues(rBlocks, "id");
+      assertNotNull(docIDToIDBlocks);
+      
+      final IndexSearcher sBlocks = newSearcher(rBlocks);
+      final ShardState shardsBlocks = new ShardState(sBlocks);
+      
+      // ReaderBlocks only increases maxDoc() vs reader, which
+      // means a monotonic shift in scores, so we can
+      // reliably remap them w/ Map:
+      final Map<String,Map<Float,Float>> scoreMap = new HashMap<>();
+      
+      // Tricky: must separately set .score2, because the doc
+      // block index was created with possible deletions!
+      //System.out.println("fixup score2");
+      for(int contentID=0;contentID<3;contentID++) {
+        //System.out.println("  term=real" + contentID);
+        final Map<Float,Float> termScoreMap = new HashMap<>();
+        scoreMap.put("real"+contentID, termScoreMap);
+        //System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) +
+        //" dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID)));
+        final ScoreDoc[] hits = sBlocks.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
+        for(ScoreDoc hit : hits) {
+          final GroupDoc gd = groupDocsByID[(int) docIDToIDBlocks.get(hit.doc)];
+          assertTrue(gd.score2 == 0.0);
+          gd.score2 = hit.score;
+          assertEquals(gd.id, docIDToIDBlocks.get(hit.doc));
+          //System.out.println("    score=" + gd.score + " score2=" + hit.score + " id=" + docIDToIDBlocks.get(hit.doc));
+          termScoreMap.put(gd.score, gd.score2);
+        }
+      }
+      
+      for(int searchIter=0;searchIter<100;searchIter++) {
+        
+        if (VERBOSE) {
+          System.out.println("\nTEST: searchIter=" + searchIter);
+        }
+        
+        final String searchTerm = "real" + random().nextInt(3);
+        final boolean fillFields = random().nextBoolean();
+        boolean getScores = random().nextBoolean();
+        final boolean getMaxScores = random().nextBoolean();
+        final Sort groupSort = getRandomSort();
+        //final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)});
+        // TODO: also test null (= sort by relevance)
+        final Sort docSort = getRandomSort();
+        
+        for(SortField sf : docSort.getSort()) {
+          if (sf.getType() == SortField.Type.SCORE) {
+            getScores = true;
+            break;
+          }
+        }
+        
+        for(SortField sf : groupSort.getSort()) {
+          if (sf.getType() == SortField.Type.SCORE) {
+            getScores = true;
+            break;
+          }
+        }
+        
+        final int topNGroups = TestUtil.nextInt(random(), 1, 30);
+        //final int topNGroups = 10;
+        final int docsPerGroup = TestUtil.nextInt(random(), 1, 50);
+        
+        final int groupOffset = TestUtil.nextInt(random(), 0, (topNGroups - 1) / 2);
+        //final int groupOffset = 0;
+        
+        final int docOffset = TestUtil.nextInt(random(), 0, docsPerGroup - 1);
+        //final int docOffset = 0;
+        
+        final boolean doCache = random().nextBoolean();
+        final boolean doAllGroups = random().nextBoolean();
+        if (VERBOSE) {
+          System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq(new Term("content", searchTerm))  +" dFBlock=" + rBlocks.docFreq(new Term("content", searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
+        }
+        
+        String groupField = "group";
+        if (VERBOSE) {
+          System.out.println("  groupField=" + groupField);
+        }
+        final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups);
+        final CachingCollector cCache;
+        final Collector c;
+        
+        final AbstractAllGroupsCollector<?> allGroupsCollector;
+        if (doAllGroups) {
+          allGroupsCollector = createAllGroupsCollector(c1, groupField);
+        } else {
+          allGroupsCollector = null;
+        }
+        
+        final boolean useWrappingCollector = random().nextBoolean();
+        
+        if (doCache) {
+          final double maxCacheMB = random().nextDouble();
+          if (VERBOSE) {
+            System.out.println("TEST: maxCacheMB=" + maxCacheMB);
+          }
+          
+          if (useWrappingCollector) {
+            if (doAllGroups) {
+              cCache = CachingCollector.create(c1, true, maxCacheMB);
+              c = MultiCollector.wrap(cCache, allGroupsCollector);
+            } else {
+              c = cCache = CachingCollector.create(c1, true, maxCacheMB);
+            }
+          } else {
+            // Collect only into cache, then replay multiple times:
+            c = cCache = CachingCollector.create(false, true, maxCacheMB);
+          }
+        } else {
+          cCache = null;
+          if (doAllGroups) {
+            c = MultiCollector.wrap(c1, allGroupsCollector);
+          } else {
+            c = c1;
+          }
+        }
+        
+        // Search top reader:
+        final Query query = new TermQuery(new Term("content", searchTerm));
+        
+        s.search(query, c);
+        
+        if (doCache && !useWrappingCollector) {
+          if (cCache.isCached()) {
+            // Replay for first-pass grouping
+            cCache.replay(c1);
+            if (doAllGroups) {
+              // Replay for all groups:
+              cCache.replay(allGroupsCollector);
+            }
+          } else {
+            // Replay by re-running search:
+            s.search(query, c1);
+            if (doAllGroups) {
+              s.search(query, allGroupsCollector);
+            }
+          }
+        }
+        
+        // Get 1st pass top groups
+        final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(c1, groupOffset, fillFields);
+        final TopGroups<BytesRef> groupsResult;
+        if (VERBOSE) {
+          System.out.println("TEST: first pass topGroups");
+          if (topGroups == null) {
+            System.out.println("  null");
+          } else {
+            for (SearchGroup<BytesRef> searchGroup : topGroups) {
+              System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue) + ": " + Arrays.deepToString(searchGroup.sortValues));
+            }
+          }
+        }
+        
+        // Get 1st pass top groups using shards
+        
+        final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
+            groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, true, false);
+        final AbstractSecondPassGroupingCollector<?> c2;
+        if (topGroups != null) {
+          
+          if (VERBOSE) {
+            System.out.println("TEST: topGroups");
+            for (SearchGroup<BytesRef> searchGroup : topGroups) {
+              System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue.utf8ToString()) + ": " + Arrays.deepToString(searchGroup.sortValues));
+            }
+          }
+          
+          c2 = createSecondPassCollector(c1, groupField, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getScores, getMaxScores, fillFields);
+          if (doCache) {
+            if (cCache.isCached()) {
+              if (VERBOSE) {
+                System.out.println("TEST: cache is intact");
+              }
+              cCache.replay(c2);
+            } else {
+              if (VERBOSE) {
+                System.out.println("TEST: cache was too large");
+              }
+              s.search(query, c2);
+            }
+          } else {
+            s.search(query, c2);
+          }
+          
+          if (doAllGroups) {
+            TopGroups<BytesRef> tempTopGroups = getTopGroups(c2, docOffset);
+            groupsResult = new TopGroups<>(tempTopGroups, allGroupsCollector.getGroupCount());
+          } else {
+            groupsResult = getTopGroups(c2, docOffset);
+          }
+        } else {
+          c2 = null;
+          groupsResult = null;
+          if (VERBOSE) {
+            System.out.println("TEST:   no results");
+          }
+        }
+        
+        final TopGroups<BytesRef> expectedGroups = slowGrouping(groupDocs, searchTerm, fillFields, getScores, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset);
+        
+        if (VERBOSE) {
+          if (expectedGroups == null) {
+            System.out.println("TEST: no expected groups");
+          } else {
+            System.out.println("TEST: expected groups totalGroupedHitCount=" + expectedGroups.totalGroupedHitCount);
+            for(GroupDocs<BytesRef> gd : expectedGroups.groups) {
+              System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits + " scoreDocs.len=" + gd.scoreDocs.length);
+              for(ScoreDoc sd : gd.scoreDocs) {
+                System.out.println("    id=" + sd.doc + " score=" + sd.score);
+              }
+            }
+          }
+          
+          if (groupsResult == null) {
+            System.out.println("TEST: no matched groups");
+          } else {
+            System.out.println("TEST: matched groups totalGroupedHitCount=" + groupsResult.totalGroupedHitCount);
+            for(GroupDocs<BytesRef> gd : groupsResult.groups) {
+              System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
+              for(ScoreDoc sd : gd.scoreDocs) {
+                System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
+              }
+            }
+            
+            if (searchIter == 14) {
+              for(int docIDX=0;docIDX<s.getIndexReader().maxDoc();docIDX++) {
+                System.out.println("ID=" + docIDToID.get(docIDX) + " explain=" + s.explain(query, docIDX));
+              }
+            }
+          }
+          
+          if (topGroupsShards == null) {
+            System.out.println("TEST: no matched-merged groups");
+          } else {
+            System.out.println("TEST: matched-merged groups totalGroupedHitCount=" + topGroupsShards.totalGroupedHitCount);
+            for(GroupDocs<BytesRef> gd : topGroupsShards.groups) {
+              System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
+              for(ScoreDoc sd : gd.scoreDocs) {
+                System.out.println("    id=" + docIDToID.get(sd.doc) + " score=" + sd.score);
+              }
+            }
+          }
+        }
+        
+        assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores, true);
+        
+        // Confirm merged shards match:
+        assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores, true);
+        if (topGroupsShards != null) {
+          verifyShards(shards.docStarts, topGroupsShards);
+        }
+        
+        final boolean needsScores = getScores || getMaxScores || docSort == null;
+        final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
+        final TermAllGroupsCollector allGroupsCollector2;
+        final Collector c4;
+        if (doAllGroups) {
+          // NOTE: must be "group" and not "group_dv"
+          // (groupField) because we didn't index doc
+          // values in the block index:
+          allGroupsCollector2 = new TermAllGroupsCollector("group");
+          c4 = MultiCollector.wrap(c3, allGroupsCollector2);
+        } else {
+          allGroupsCollector2 = null;
+          c4 = c3;
+        }
+        // Get block grouping result:
+        sBlocks.search(query, c4);
+        @SuppressWarnings({"unchecked","rawtypes"})
+        final TopGroups<BytesRef> tempTopGroupsBlocks = (TopGroups<BytesRef>) c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
+        final TopGroups<BytesRef> groupsResultBlocks;
+        if (doAllGroups && tempTopGroupsBlocks != null) {
+          assertEquals((int) tempTopGroupsBlocks.totalGroupCount, allGroupsCollector2.getGroupCount());
+          groupsResultBlocks = new TopGroups<>(tempTopGroupsBlocks, allGroupsCollector2.getGroupCount());
+        } else {
+          groupsResultBlocks = tempTopGroupsBlocks;
+        }
+        
+        if (VERBOSE) {
+          if (groupsResultBlocks == null) {
+            System.out.println("TEST: no block groups");
+          } else {
+            System.out.println("TEST: block groups totalGroupedHitCount=" + groupsResultBlocks.totalGroupedHitCount);
+            boolean first = true;
+            for(GroupDocs<BytesRef> gd : groupsResultBlocks.groups) {
+              System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()) + " totalHits=" + gd.totalHits);
+              for(ScoreDoc sd : gd.scoreDocs) {
+                System.out.println("    id=" + docIDToIDBlocks.get(sd.doc) + " score=" + sd.score);
+                if (first) {
+                  System.out.println("explain: " + sBlocks.explain(query, sd.doc));
+                  first = false;
+                }
+              }
+            }
+          }
+        }
+        
+        // Get shard'd block grouping result:
+        final TopGroups<BytesRef> topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query,
+            groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, false, false);
+        
+        if (expectedGroups != null) {
+          // Fixup scores for reader2
+          for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
+            for(ScoreDoc hit : groupDocsHits.scoreDocs) {
+              final GroupDoc gd = groupDocsByID[hit.doc];
+              assertEquals(gd.id, hit.doc);
+              //System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score);
+              hit.score = gd.score2;
+            }
+          }
+          
+          final SortField[] sortFields = groupSort.getSort();
+          final Map<Float,Float> termScoreMap = scoreMap.get(searchTerm);
+          for(int groupSortIDX=0;groupSortIDX<sortFields.length;groupSortIDX++) {
+            if (sortFields[groupSortIDX].getType() == SortField.Type.SCORE) {
+              for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
+                if (groupDocsHits.groupSortValues != null) {
+                  //System.out.println("remap " + groupDocsHits.groupSortValues[groupSortIDX] + " to " + termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]));
+                  groupDocsHits.groupSortValues[groupSortIDX] = termScoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]);
+                  assertNotNull(groupDocsHits.groupSortValues[groupSortIDX]);
+                }
+              }
+            }
+          }
+          
+          final SortField[] docSortFields = docSort.getSort();
+          for(int docSortIDX=0;docSortIDX<docSortFields.length;docSortIDX++) {
+            if (docSortFields[docSortIDX].getType() == SortField.Type.SCORE) {
+              for (GroupDocs<?> groupDocsHits : expectedGroups.groups) {
+                for(ScoreDoc _hit : groupDocsHits.scoreDocs) {
+                  FieldDoc hit = (FieldDoc) _hit;
+                  if (hit.fields != null) {
+                    hit.fields[docSortIDX] = termScoreMap.get(hit.fields[docSortIDX]);
+                    assertNotNull(hit.fields[docSortIDX]);
+                  }
+                }
+              }
+            }
+          }
+        }
+        
+        assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, true, getScores, false);
+        assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false);
+      }
+      
       r.close();
       dir.close();
-
+      
       rBlocks.close();
       dirBlocks.close();
     }
@@ -1158,7 +1142,7 @@
   }
 
   private TopGroups<BytesRef> searchShards(IndexSearcher topSearcher, ShardSearcher[] subSearchers, Query query, Sort groupSort, Sort docSort, int groupOffset, int topNGroups, int docOffset,
-                                           int topNDocs, boolean getScores, boolean getMaxScores, boolean canUseIDV, boolean preFlex, ValueHolder<Boolean> usedIdvBasedImpl) throws Exception {
+                                           int topNDocs, boolean getScores, boolean getMaxScores, boolean canUseIDV, boolean preFlex) throws Exception {
 
     // TODO: swap in caching, all groups collector hereassertEquals(expected.totalHitCount, actual.totalHitCount);
     // too...
@@ -1182,10 +1166,6 @@
     }
 
     String groupField = "group";
-    if (shardsCanUseIDV && random().nextBoolean()) {
-      groupField += "_dv";
-      usedIdvBasedImpl.value = true;
-    }
 
     for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
 
@@ -1257,7 +1237,7 @@
     }
   }
 
-  private void assertEquals(FieldCache.Ints docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
+  private void assertEquals(NumericDocValues docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
     if (expected == null) {
       assertNull(actual);
       return;
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
index 56545b5..6eb251d 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsCollector.java
@@ -21,10 +21,10 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.BytesRef;
@@ -85,7 +85,7 @@
 
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-      docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
+      docTermOrds = DocValues.getSortedSet(context.reader(), field);
     }
   }
 
@@ -107,7 +107,7 @@
 
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-      fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
+      fromDocTerms = DocValues.getBinary(context.reader(), field);
     }
   }
 
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
index 851ef63..e347b87 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsWithScoreCollector.java
@@ -21,10 +21,10 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.ArrayUtil;
@@ -131,7 +131,7 @@
 
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-      fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
+      fromDocTerms = DocValues.getBinary(context.reader(), field);
     }
 
     static class Avg extends SV {
@@ -217,7 +217,7 @@
 
     @Override
     protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-      fromDocTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
+      fromDocTermOrds = DocValues.getSortedSet(context.reader(), field);
     }
 
     static class Avg extends MV {
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 849e0d5..2b49ba1 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -17,6 +17,13 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.*;
 import org.apache.lucene.index.*;
@@ -27,13 +34,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.*;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-
 public class TestBlockJoin extends LuceneTestCase {
 
   // One resume...
@@ -451,7 +451,7 @@
       final String[] values = fields[fieldID] = new String[valueCount];
       for(int i=0;i<valueCount;i++) {
         values[i] = TestUtil.randomRealisticUnicodeString(random());
-        //values[i] = _TestUtil.randomSimpleString(random);
+        //values[i] = TestUtil.randomSimpleString(random());
       }
     }
 
@@ -511,9 +511,18 @@
       parentDoc.add(id);
       parentJoinDoc.add(id);
       parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
+      id = new NumericDocValuesField("parentID", parentDocID);
+      parentDoc.add(id);
+      parentJoinDoc.add(id);
+      parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
       for(int field=0;field<parentFields.length;field++) {
         if (random().nextDouble() < 0.9) {
-          Field f = newStringField("parent" + field, parentFields[field][random().nextInt(parentFields[field].length)], Field.Store.NO);
+          String s = parentFields[field][random().nextInt(parentFields[field].length)];
+          Field f = newStringField("parent" + field, s, Field.Store.NO);
+          parentDoc.add(f);
+          parentJoinDoc.add(f);
+
+          f = new SortedDocValuesField("parent" + field, new BytesRef(s));
           parentDoc.add(f);
           parentJoinDoc.add(f);
         }
@@ -548,10 +557,18 @@
         Field childID = new IntField("childID", childDocID, Field.Store.YES);
         childDoc.add(childID);
         joinChildDoc.add(childID);
+        childID = new NumericDocValuesField("childID", childDocID);
+        childDoc.add(childID);
+        joinChildDoc.add(childID);
 
         for(int childFieldID=0;childFieldID<childFields.length;childFieldID++) {
           if (random().nextDouble() < 0.9) {
-            Field f = newStringField("child" + childFieldID, childFields[childFieldID][random().nextInt(childFields[childFieldID].length)], Field.Store.NO);
+            String s = childFields[childFieldID][random().nextInt(childFields[childFieldID].length)];
+            Field f = newStringField("child" + childFieldID, s, Field.Store.NO);
+            childDoc.add(f);
+            joinChildDoc.add(f);
+
+            f = new SortedDocValuesField("child" + childFieldID, new BytesRef(s));
             childDoc.add(f);
             joinChildDoc.add(f);
           }
@@ -727,7 +744,7 @@
                                        parentAndChildSort);
 
       if (VERBOSE) {
-        System.out.println("\nTEST: normal index gets " + results.totalHits + " hits");
+        System.out.println("\nTEST: normal index gets " + results.totalHits + " hits; sort=" + parentAndChildSort);
         final ScoreDoc[] hits = results.scoreDocs;
         for(int hitIDX=0;hitIDX<hits.length;hitIDX++) {
           final StoredDocument doc = s.doc(hits[hitIDX].doc);
@@ -735,7 +752,7 @@
           System.out.println("  parentID=" + doc.get("parentID") + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")");
           FieldDoc fd = (FieldDoc) hits[hitIDX];
           if (fd.fields != null) {
-            System.out.print("    ");
+            System.out.print("    " + fd.fields.length + " sort values: ");
             for(Object o : fd.fields) {
               if (o instanceof BytesRef) {
                 System.out.print(((BytesRef) o).utf8ToString() + " ");
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
index 0f9162a..72a9247 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.NoMergePolicy;
@@ -58,14 +59,17 @@
     List<Document> docs = new ArrayList<>();
     Document document = new Document();
     document.add(new StringField("field2", "a", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("a")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "b", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("b")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "c", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("c")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -78,14 +82,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "c", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("c")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "d", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("d")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "e", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("e")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -97,14 +104,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "e", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("e")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "f", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("f")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "g", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("g")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -116,14 +126,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "g", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("g")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "h", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("h")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "i", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("i")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -136,14 +149,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "i", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("i")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "j", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("j")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "k", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("k")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -155,14 +171,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "k", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("k")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "l", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("l")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "m", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("m")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
@@ -180,14 +199,17 @@
     docs.clear();
     document = new Document();
     document.add(new StringField("field2", "m", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("m")));
     document.add(new StringField("filter_1", "T", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "n", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("n")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
     document.add(new StringField("field2", "o", Field.Store.NO));
+    document.add(new SortedDocValuesField("field2", new BytesRef("o")));
     document.add(new StringField("filter_1", "F", Field.Store.NO));
     docs.add(document);
     document = new Document();
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index 1d6a278..1586b6d 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -34,10 +34,13 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
@@ -53,7 +56,6 @@
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
@@ -68,9 +70,11 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
 
+@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // we need SortedSet, docsWithField
 public class TestJoinUtil extends LuceneTestCase {
 
   public void testSimple() throws Exception {
@@ -89,20 +93,25 @@
     doc.add(new TextField("description", "random text", Field.Store.NO));
     doc.add(new TextField("name", "name1", Field.Store.NO));
     doc.add(new TextField(idField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
     doc.add(new TextField("price", "10.0", Field.Store.NO));
     doc.add(new TextField(idField, "2", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
     doc.add(new TextField(toField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
     doc.add(new TextField("price", "20.0", Field.Store.NO));
     doc.add(new TextField(idField, "3", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
     doc.add(new TextField(toField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 3
@@ -110,6 +119,7 @@
     doc.add(new TextField("description", "more random text", Field.Store.NO));
     doc.add(new TextField("name", "name2", Field.Store.NO));
     doc.add(new TextField(idField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
     w.addDocument(doc);
     w.commit();
 
@@ -117,14 +127,18 @@
     doc = new Document();
     doc.add(new TextField("price", "10.0", Field.Store.NO));
     doc.add(new TextField(idField, "5", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
     doc.add(new TextField(toField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
     doc.add(new TextField("price", "20.0", Field.Store.NO));
     doc.add(new TextField(idField, "6", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
     doc.add(new TextField(toField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -180,16 +194,18 @@
     doc.add(new TextField("description", "random text", Field.Store.NO));
     doc.add(new TextField("name", "name1", Field.Store.NO));
     doc.add(new TextField(idField, "0", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("0")));
     w.addDocument(doc);
 
     doc = new Document();
     doc.add(new TextField("price", "10.0", Field.Store.NO));
-    for(int i=0;i<300;i++){
-      doc.add(new TextField(toField, ""+i, Field.Store.NO));
-      if(!multipleValues){
-        w.addDocument(doc);
-        doc.removeFields(toField);
+
+    if (multipleValues) {
+      for(int i=0;i<300;i++) {
+        doc.add(new SortedSetDocValuesField(toField, new BytesRef(""+i)));
       }
+    } else {
+      doc.add(new SortedDocValuesField(toField, new BytesRef("0")));
     }
     w.addDocument(doc);
 
@@ -317,20 +333,25 @@
     doc.add(new TextField("description", "A random movie", Field.Store.NO));
     doc.add(new TextField("name", "Movie 1", Field.Store.NO));
     doc.add(new TextField(idField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
     doc.add(new TextField("subtitle", "The first subtitle of this movie", Field.Store.NO));
     doc.add(new TextField(idField, "2", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
     doc.add(new TextField(toField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
     doc.add(new TextField("subtitle", "random subtitle; random event movie", Field.Store.NO));
     doc.add(new TextField(idField, "3", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
     doc.add(new TextField(toField, "1", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
     w.addDocument(doc);
 
     // 3
@@ -338,6 +359,7 @@
     doc.add(new TextField("description", "A second random movie", Field.Store.NO));
     doc.add(new TextField("name", "Movie 2", Field.Store.NO));
     doc.add(new TextField(idField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
     w.addDocument(doc);
     w.commit();
 
@@ -345,14 +367,18 @@
     doc = new Document();
     doc.add(new TextField("subtitle", "a very random event happened during christmas night", Field.Store.NO));
     doc.add(new TextField(idField, "5", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
     doc.add(new TextField(toField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
     doc.add(new TextField("subtitle", "movie end movie test 123 test 123 random", Field.Store.NO));
     doc.add(new TextField(idField, "6", Field.Store.NO));
+    doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
     doc.add(new TextField(toField, "4", Field.Store.NO));
+    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -572,6 +598,11 @@
           context.fromDocuments.get(linkValue).add(docs[i]);
           context.randomValueFromDocs.get(value).add(docs[i]);
           document.add(newTextField(random(), "from", linkValue, Field.Store.NO));
+          if (multipleValuesPerDocument) {
+            document.add(new SortedSetDocValuesField("from", new BytesRef(linkValue)));
+          } else {
+            document.add(new SortedDocValuesField("from", new BytesRef(linkValue)));
+          }
         } else {
           if (!context.toDocuments.containsKey(linkValue)) {
             context.toDocuments.put(linkValue, new ArrayList<RandomDoc>());
@@ -583,6 +614,11 @@
           context.toDocuments.get(linkValue).add(docs[i]);
           context.randomValueToDocs.get(value).add(docs[i]);
           document.add(newTextField(random(), "to", linkValue, Field.Store.NO));
+          if (multipleValuesPerDocument) {
+            document.add(new SortedSetDocValuesField("to", new BytesRef(linkValue)));
+          } else {
+            document.add(new SortedDocValuesField("to", new BytesRef(linkValue)));
+          }
         }
       }
 
@@ -644,7 +680,7 @@
 
           @Override
           protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-            docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), fromField);
+            docTermOrds = DocValues.getSortedSet(context.reader(), fromField);
           }
 
           @Override
@@ -682,8 +718,8 @@
 
           @Override
           protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-            terms = FieldCache.DEFAULT.getTerms(context.reader(), fromField, true);
-            docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), fromField);
+            terms = DocValues.getBinary(context.reader(), fromField);
+            docsWithField = DocValues.getDocsWithField(context.reader(), fromField);
           }
 
           @Override
@@ -753,7 +789,7 @@
             @Override
             protected void doSetNextReader(AtomicReaderContext context) throws IOException {
               docBase = context.docBase;
-              docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), toField);
+              docTermOrds = DocValues.getSortedSet(context.reader(), toField);
             }
 
             @Override
@@ -781,7 +817,7 @@
 
           @Override
           protected void doSetNextReader(AtomicReaderContext context) throws IOException {
-            terms = FieldCache.DEFAULT.getTerms(context.reader(), toField, false);
+            terms = DocValues.getBinary(context.reader(), toField);
             docBase = context.docBase;
           }
 
diff --git a/lucene/misc/build.xml b/lucene/misc/build.xml
index b5ee7b2..2dad71e 100644
--- a/lucene/misc/build.xml
+++ b/lucene/misc/build.xml
@@ -32,6 +32,10 @@
     org/apache/lucene/misc/IndexMergeTool.class
   "/>
 
+  <property name="forbidden-rue-excludes" value="
+    org/apache/lucene/uninverting/FieldCache$CacheEntry.class
+  "/>
+
   <import file="../module-build.xml"/>
 
   <target name="install-cpptasks" unless="cpptasks.uptodate" depends="ivy-availability-check,ivy-fail,ivy-configure">
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
similarity index 97%
rename from lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java
rename to lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
index 1905964..2015e4f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.lucene.index;
+package org.apache.lucene.uninverting;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -23,6 +23,15 @@
 import java.util.List;
 
 import org.apache.lucene.codecs.PostingsFormat; // javadocs
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.Bits;
@@ -176,6 +185,8 @@
   public DocTermOrds(AtomicReader reader, Bits liveDocs, String field) throws IOException {
     this(reader, liveDocs, field, null, Integer.MAX_VALUE);
   }
+  
+  // TODO: instead of all these ctors and options, take termsenum!
 
   /** Inverts only terms starting w/ prefix */
   public DocTermOrds(AtomicReader reader, Bits liveDocs, String field, BytesRef termPrefix) throws IOException {
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
new file mode 100644
index 0000000..68b266d
--- /dev/null
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
@@ -0,0 +1,387 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintStream;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.IndexReader; // javadocs
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Expert: Maintains caches of term values.
+ *
+ * <p>Created: May 19, 2004 11:13:14 AM
+ *
+ * @since   lucene 1.4
+ * @see FieldCacheSanityChecker
+ *
+ * @lucene.internal
+ */
+interface FieldCache {
+
+  /**
+   * Placeholder indicating creation of this cache is currently in-progress.
+   */
+  public static final class CreationPlaceholder {
+    Object value;
+  }
+
+  /**
+   * interface to all parsers. It is used to parse different numeric types.
+   */
+  public interface Parser {
+    
+    /**
+     * Pulls a {@link TermsEnum} from the given {@link Terms}. This method allows certain parsers
+     * to filter the actual TermsEnum before the field cache is filled.
+     * 
+     * @param terms the {@link Terms} instance to create the {@link TermsEnum} from.
+     * @return a possibly filtered {@link TermsEnum} instance, this method must not return <code>null</code>.
+     * @throws IOException if an {@link IOException} occurs
+     */
+    public TermsEnum termsEnum(Terms terms) throws IOException;
+    
+    /** Parse's this field's value */
+    public long parseValue(BytesRef term);
+  }
+
+  /** Expert: The cache used internally by sorting and range query classes. */
+  public static FieldCache DEFAULT = new FieldCacheImpl();
+
+  /**
+   * A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
+   * via {@link IntField}/{@link NumericTokenStream}.
+   */
+  public static final Parser NUMERIC_UTILS_INT_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return NumericUtils.prefixCodedToInt(term);
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER"; 
+    }
+  };
+
+  /**
+   * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
+   * via {@link FloatField}/{@link NumericTokenStream}.
+   */
+  public static final Parser NUMERIC_UTILS_FLOAT_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      int val = NumericUtils.prefixCodedToInt(term);
+      if (val<0) val ^= 0x7fffffff;
+      return val;
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER"; 
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
+    }
+  };
+
+  /**
+   * A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
+   * via {@link LongField}/{@link NumericTokenStream}.
+   */
+  public static final Parser NUMERIC_UTILS_LONG_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return NumericUtils.prefixCodedToLong(term);
+    }
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER"; 
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+    }
+  };
+
+  /**
+   * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
+   * via {@link DoubleField}/{@link NumericTokenStream}.
+   */
+  public static final Parser NUMERIC_UTILS_DOUBLE_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      long val = NumericUtils.prefixCodedToLong(term);
+      if (val<0) val ^= 0x7fffffffffffffffL;
+      return val;
+    }
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER"; 
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+    }
+  };
+  
+  /** Checks the internal cache for an appropriate entry, and if none is found,
+   *  reads the terms in <code>field</code> and returns a bit set at the size of
+   *  <code>reader.maxDoc()</code>, with turned on bits for each docid that 
+   *  does have a value for this field.
+   */
+  public Bits getDocsWithField(AtomicReader reader, String field) throws IOException;
+
+  /**
+   * Returns a {@link NumericDocValues} over the values found in documents in the given
+   * field. If the field was indexed as {@link NumericDocValuesField}, it simply
+   * uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
+   * Otherwise, it checks the internal cache for an appropriate entry, and if
+   * none is found, reads the terms in <code>field</code> as longs and returns
+   * an array of size <code>reader.maxDoc()</code> of the value each document
+   * has in the given field.
+   * 
+   * @param reader
+   *          Used to get field values.
+   * @param field
+   *          Which field contains the longs.
+   * @param parser
+   *          Computes long for string values. May be {@code null} if the
+   *          requested field was indexed as {@link NumericDocValuesField} or
+   *          {@link LongField}.
+   * @param setDocsWithField
+   *          If true then {@link #getDocsWithField} will also be computed and
+   *          stored in the FieldCache.
+   * @return The values in the given field for each document.
+   * @throws IOException
+   *           If any error occurs.
+   */
+  public NumericDocValues getNumerics(AtomicReader reader, String field, Parser parser, boolean setDocsWithField) throws IOException;
+
+  /** Checks the internal cache for an appropriate entry, and if none
+   * is found, reads the term values in <code>field</code>
+   * and returns a {@link BinaryDocValues} instance, providing a
+   * method to retrieve the term (as a BytesRef) per document.
+   * @param reader  Used to get field values.
+   * @param field   Which field contains the strings.
+   * @param setDocsWithField  If true then {@link #getDocsWithField} will
+   *        also be computed and stored in the FieldCache.
+   * @return The values in the given field for each document.
+   * @throws IOException  If any error occurs.
+   */
+  public BinaryDocValues getTerms(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
+
+  /** Expert: just like {@link #getTerms(AtomicReader,String,boolean)},
+   *  but you can specify whether more RAM should be consumed in exchange for
+   *  faster lookups (default is "true").  Note that the
+   *  first call for a given reader and field "wins",
+   *  subsequent calls will share the same cache entry. */
+  public BinaryDocValues getTerms(AtomicReader reader, String field, boolean setDocsWithField, float acceptableOverheadRatio) throws IOException;
+
+  /** Checks the internal cache for an appropriate entry, and if none
+   * is found, reads the term values in <code>field</code>
+   * and returns a {@link SortedDocValues} instance,
+   * providing methods to retrieve sort ordinals and terms
+   * (as a ByteRef) per document.
+   * @param reader  Used to get field values.
+   * @param field   Which field contains the strings.
+   * @return The values in the given field for each document.
+   * @throws IOException  If any error occurs.
+   */
+  public SortedDocValues getTermsIndex(AtomicReader reader, String field) throws IOException;
+
+  /** Expert: just like {@link
+   *  #getTermsIndex(AtomicReader,String)}, but you can specify
+   *  whether more RAM should be consumed in exchange for
+   *  faster lookups (default is "true").  Note that the
+   *  first call for a given reader and field "wins",
+   *  subsequent calls will share the same cache entry. */
+  public SortedDocValues getTermsIndex(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException;
+
+  /** Can be passed to {@link #getDocTermOrds} to filter for 32-bit numeric terms */
+  public static final BytesRef INT32_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_INT });
+  /** Can be passed to {@link #getDocTermOrds} to filter for 64-bit numeric terms */
+  public static final BytesRef INT64_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_LONG });
+  
+  /**
+   * Checks the internal cache for an appropriate entry, and if none is found, reads the term values
+   * in <code>field</code> and returns a {@link DocTermOrds} instance, providing a method to retrieve
+   * the terms (as ords) per document.
+   *
+   * @param reader  Used to build a {@link DocTermOrds} instance
+   * @param field   Which field contains the strings.
+   * @param prefix  prefix for a subset of the terms which should be uninverted. Can be null or
+   *                {@link #INT32_TERM_PREFIX} or {@link #INT64_TERM_PREFIX}
+   *                
+   * @return a {@link DocTermOrds} instance
+   * @throws IOException  If any error occurs.
+   */
+  public SortedSetDocValues getDocTermOrds(AtomicReader reader, String field, BytesRef prefix) throws IOException;
+
+  /**
+   * EXPERT: A unique Identifier/Description for each item in the FieldCache. 
+   * Can be useful for logging/debugging.
+   * @lucene.experimental
+   */
+  public final class CacheEntry {
+
+    private final Object readerKey;
+    private final String fieldName;
+    private final Class<?> cacheType;
+    private final Object custom;
+    private final Object value;
+    private String size;
+
+    public CacheEntry(Object readerKey, String fieldName,
+                      Class<?> cacheType,
+                      Object custom,
+                      Object value) {
+      this.readerKey = readerKey;
+      this.fieldName = fieldName;
+      this.cacheType = cacheType;
+      this.custom = custom;
+      this.value = value;
+    }
+
+    public Object getReaderKey() {
+      return readerKey;
+    }
+
+    public String getFieldName() {
+      return fieldName;
+    }
+
+    public Class<?> getCacheType() {
+      return cacheType;
+    }
+
+    public Object getCustom() {
+      return custom;
+    }
+
+    public Object getValue() {
+      return value;
+    }
+
+    /** 
+     * Computes (and stores) the estimated size of the cache Value 
+     * @see #getEstimatedSize
+     */
+    public void estimateSize() {
+      long bytesUsed = RamUsageEstimator.sizeOf(getValue());
+      size = RamUsageEstimator.humanReadableUnits(bytesUsed);
+    }
+
+    /**
+     * The most recently estimated size of the value, null unless 
+     * estimateSize has been called.
+     */
+    public String getEstimatedSize() {
+      return size;
+    }
+    
+    @Override
+    public String toString() {
+      StringBuilder b = new StringBuilder();
+      b.append("'").append(getReaderKey()).append("'=>");
+      b.append("'").append(getFieldName()).append("',");
+      b.append(getCacheType()).append(",").append(getCustom());
+      b.append("=>").append(getValue().getClass().getName()).append("#");
+      b.append(System.identityHashCode(getValue()));
+      
+      String s = getEstimatedSize();
+      if(null != s) {
+        b.append(" (size =~ ").append(s).append(')');
+      }
+
+      return b.toString();
+    }
+  }
+  
+  /**
+   * EXPERT: Generates an array of CacheEntry objects representing all items 
+   * currently in the FieldCache.
+   * <p>
+   * NOTE: These CacheEntry objects maintain a strong reference to the 
+   * Cached Values.  Maintaining references to a CacheEntry the AtomicIndexReader 
+   * associated with it has garbage collected will prevent the Value itself
+   * from being garbage collected when the Cache drops the WeakReference.
+   * </p>
+   * @lucene.experimental
+   */
+  public CacheEntry[] getCacheEntries();
+
+  /**
+   * <p>
+   * EXPERT: Instructs the FieldCache to forcibly expunge all entries 
+   * from the underlying caches.  This is intended only to be used for 
+   * test methods as a way to ensure a known base state of the Cache 
+   * (with out needing to rely on GC to free WeakReferences).  
+   * It should not be relied on for "Cache maintenance" in general 
+   * application code.
+   * </p>
+   * @lucene.experimental
+   */
+  public void purgeAllCaches();
+
+  /**
+   * Expert: drops all cache entries associated with this
+   * reader {@link IndexReader#getCoreCacheKey}.  NOTE: this cache key must
+   * precisely match the reader that the cache entry is
+   * keyed on. If you pass a top-level reader, it usually
+   * will have no effect as Lucene now caches at the segment
+   * reader level.
+   */
+  public void purgeByCacheKey(Object coreCacheKey);
+
+  /**
+   * If non-null, FieldCacheImpl will warn whenever
+   * entries are created that are not sane according to
+   * {@link FieldCacheSanityChecker}.
+   */
+  public void setInfoStream(PrintStream stream);
+
+  /** counterpart of {@link #setInfoStream(PrintStream)} */
+  public PrintStream getInfoStream();
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
similarity index 69%
rename from lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
rename to lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
index 243e42a..76081ee 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.search;
+package org.apache.lucene.uninverting;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -27,7 +27,6 @@
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocTermOrds;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
@@ -38,9 +37,9 @@
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.FieldCacheSanityChecker;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.PagedBytes;
 import org.apache.lucene.util.packed.GrowableWriter;
@@ -61,11 +60,8 @@
   }
 
   private synchronized void init() {
-    caches = new HashMap<>(9);
-    caches.put(Integer.TYPE, new IntCache(this));
-    caches.put(Float.TYPE, new FloatCache(this));
+    caches = new HashMap<>(6);
     caches.put(Long.TYPE, new LongCache(this));
-    caches.put(Double.TYPE, new DoubleCache(this));
     caches.put(BinaryDocValues.class, new BinaryDocValuesCache(this));
     caches.put(SortedDocValues.class, new SortedDocValuesCache(this));
     caches.put(DocTermOrds.class, new DocTermOrdsCache(this));
@@ -352,54 +348,6 @@
     caches.get(DocsWithFieldCache.class).put(reader, new CacheKey(field, null), bits);
   }
 
-  @Override
-  public Ints getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
-    return getInts(reader, field, null, setDocsWithField);
-  }
-
-  @Override
-  public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
-      throws IOException {
-    final NumericDocValues valuesIn = reader.getNumericDocValues(field);
-    if (valuesIn != null) {
-      // Not cached here by FieldCacheImpl (cached instead
-      // per-thread by SegmentReader):
-      return new Ints() {
-        @Override
-        public int get(int docID) {
-          return (int) valuesIn.get(docID);
-        }
-      };
-    } else {
-      final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
-      if (info == null) {
-        return Ints.EMPTY;
-      } else if (info.hasDocValues()) {
-        throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
-      } else if (!info.isIndexed()) {
-        return Ints.EMPTY;
-      }
-      return (Ints) caches.get(Integer.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
-    }
-  }
-
-  static class IntsFromArray extends Ints {
-    private final PackedInts.Reader values;
-    private final int minValue;
-
-    public IntsFromArray(PackedInts.Reader values, int minValue) {
-      assert values.getBitsPerValue() <= 32;
-      this.values = values;
-      this.minValue = minValue;
-    }
-    
-    @Override
-    public int get(int docID) {
-      final long delta = values.get(docID);
-      return minValue + (int) delta;
-    }
-  }
-
   private static class HoldsOneThing<T> {
     private T it;
 
@@ -421,79 +369,6 @@
     public long minValue;
   }
 
-  static final class IntCache extends Cache {
-    IntCache(FieldCacheImpl wrapper) {
-      super(wrapper);
-    }
-
-    @Override
-    protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
-        throws IOException {
-
-      final IntParser parser = (IntParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = NUMERIC_UTILS_INT_PARSER) so
-        // cache key includes NUMERIC_UTILS_INT_PARSER:
-        return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
-      }
-
-      final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<>();
-
-      Uninvert u = new Uninvert() {
-          private int minValue;
-          private int currentValue;
-          private GrowableWriter values;
-
-          @Override
-          public void visitTerm(BytesRef term) {
-            currentValue = parser.parseInt(term);
-            if (values == null) {
-              // Lazy alloc so for the numeric field case
-              // (which will hit a NumberFormatException
-              // when we first try the DEFAULT_INT_PARSER),
-              // we don't double-alloc:
-              int startBitsPerValue;
-              // Make sure than missing values (0) can be stored without resizing
-              if (currentValue < 0) {
-                minValue = currentValue;
-                startBitsPerValue = PackedInts.bitsRequired((-minValue) & 0xFFFFFFFFL);
-              } else {
-                minValue = 0;
-                startBitsPerValue = PackedInts.bitsRequired(currentValue);
-              }
-              values = new GrowableWriter(startBitsPerValue, reader.maxDoc(), PackedInts.FAST);
-              if (minValue != 0) {
-                values.fill(0, values.size(), (-minValue) & 0xFFFFFFFFL); // default value must be 0
-              }
-              valuesRef.set(new GrowableWriterAndMinValue(values, minValue));
-            }
-          }
-
-          @Override
-          public void visitDoc(int docID) {
-            values.set(docID, (currentValue - minValue) & 0xFFFFFFFFL);
-          }
-
-          @Override
-          protected TermsEnum termsEnum(Terms terms) throws IOException {
-            return parser.termsEnum(terms);
-          }
-        };
-
-      u.uninvert(reader, key.field, setDocsWithField);
-
-      if (setDocsWithField) {
-        wrapper.setDocsWithField(reader, key.field, u.docsWithField);
-      }
-      GrowableWriterAndMinValue values = valuesRef.get();
-      if (values == null) {
-        return new IntsFromArray(new PackedInts.NullReader(reader.maxDoc()), 0);
-      }
-      return new IntsFromArray(values.writer.getMutable(), (int) values.minValue);
-    }
-  }
-
   public Bits getDocsWithField(AtomicReader reader, String field) throws IOException {
     final FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
     if (fieldInfo == null) {
@@ -563,145 +438,31 @@
       return res;
     }
   }
-
-  @Override
-  public Floats getFloats (AtomicReader reader, String field, boolean setDocsWithField)
-    throws IOException {
-    return getFloats(reader, field, null, setDocsWithField);
-  }
-
-  @Override
-  public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField)
-    throws IOException {
-    final NumericDocValues valuesIn = reader.getNumericDocValues(field);
-    if (valuesIn != null) {
-      // Not cached here by FieldCacheImpl (cached instead
-      // per-thread by SegmentReader):
-      return new Floats() {
-        @Override
-        public float get(int docID) {
-          return Float.intBitsToFloat((int) valuesIn.get(docID));
-        }
-      };
-    } else {
-      final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
-      if (info == null) {
-        return Floats.EMPTY;
-      } else if (info.hasDocValues()) {
-        throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
-      } else if (!info.isIndexed()) {
-        return Floats.EMPTY;
-      }
-      return (Floats) caches.get(Float.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
-    }
-  }
-
-  static class FloatsFromArray extends Floats {
-    private final float[] values;
-
-    public FloatsFromArray(float[] values) {
-      this.values = values;
-    }
-    
-    @Override
-    public float get(int docID) {
-      return values[docID];
-    }
-  }
-
-  static final class FloatCache extends Cache {
-    FloatCache(FieldCacheImpl wrapper) {
-      super(wrapper);
-    }
-
-    @Override
-    protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
-        throws IOException {
-
-      final FloatParser parser = (FloatParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = NUMERIC_UTILS_FLOAT_PARSER) so
-        // cache key includes NUMERIC_UTILS_FLOAT_PARSER:
-        return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
-      }
-
-      final HoldsOneThing<float[]> valuesRef = new HoldsOneThing<>();
-
-      Uninvert u = new Uninvert() {
-          private float currentValue;
-          private float[] values;
-
-          @Override
-          public void visitTerm(BytesRef term) {
-            currentValue = parser.parseFloat(term);
-            if (values == null) {
-              // Lazy alloc so for the numeric field case
-              // (which will hit a NumberFormatException
-              // when we first try the DEFAULT_INT_PARSER),
-              // we don't double-alloc:
-              values = new float[reader.maxDoc()];
-              valuesRef.set(values);
-            }
-          }
-
-          @Override
-          public void visitDoc(int docID) {
-            values[docID] = currentValue;
-          }
-          
-          @Override
-          protected TermsEnum termsEnum(Terms terms) throws IOException {
-            return parser.termsEnum(terms);
-          }
-        };
-
-      u.uninvert(reader, key.field, setDocsWithField);
-
-      if (setDocsWithField) {
-        wrapper.setDocsWithField(reader, key.field, u.docsWithField);
-      }
-
-      float[] values = valuesRef.get();
-      if (values == null) {
-        values = new float[reader.maxDoc()];
-      }
-      return new FloatsFromArray(values);
-    }
-  }
-
-  @Override
-  public Longs getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
-    return getLongs(reader, field, null, setDocsWithField);
-  }
   
   @Override
-  public Longs getLongs(AtomicReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
-      throws IOException {
+  public NumericDocValues getNumerics(AtomicReader reader, String field, Parser parser, boolean setDocsWithField) throws IOException {
+    if (parser == null) {
+      throw new NullPointerException();
+    }
     final NumericDocValues valuesIn = reader.getNumericDocValues(field);
     if (valuesIn != null) {
       // Not cached here by FieldCacheImpl (cached instead
       // per-thread by SegmentReader):
-      return new Longs() {
-        @Override
-        public long get(int docID) {
-          return valuesIn.get(docID);
-        }
-      };
+      return valuesIn;
     } else {
       final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
       if (info == null) {
-        return Longs.EMPTY;
+        return DocValues.EMPTY_NUMERIC;
       } else if (info.hasDocValues()) {
         throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
       } else if (!info.isIndexed()) {
-        return Longs.EMPTY;
+        return DocValues.EMPTY_NUMERIC;
       }
-      return (Longs) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
+      return (NumericDocValues) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
     }
   }
 
-  static class LongsFromArray extends Longs {
+  static class LongsFromArray extends NumericDocValues {
     private final PackedInts.Reader values;
     private final long minValue;
 
@@ -725,13 +486,7 @@
     protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
         throws IOException {
 
-      final LongParser parser = (LongParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = NUMERIC_UTILS_LONG_PARSER) so
-        // cache key includes NUMERIC_UTILS_LONG_PARSER:
-        return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
-      }
+      final Parser parser = (Parser) key.custom;
 
       final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<>();
 
@@ -742,7 +497,7 @@
 
           @Override
           public void visitTerm(BytesRef term) {
-            currentValue = parser.parseLong(term);
+            currentValue = parser.parseValue(term);
             if (values == null) {
               // Lazy alloc so for the numeric field case
               // (which will hit a NumberFormatException
@@ -789,111 +544,6 @@
     }
   }
 
-  @Override
-  public Doubles getDoubles(AtomicReader reader, String field, boolean setDocsWithField)
-    throws IOException {
-    return getDoubles(reader, field, null, setDocsWithField);
-  }
-
-  @Override
-  public Doubles getDoubles(AtomicReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
-      throws IOException {
-    final NumericDocValues valuesIn = reader.getNumericDocValues(field);
-    if (valuesIn != null) {
-      // Not cached here by FieldCacheImpl (cached instead
-      // per-thread by SegmentReader):
-      return new Doubles() {
-        @Override
-        public double get(int docID) {
-          return Double.longBitsToDouble(valuesIn.get(docID));
-        }
-      };
-    } else {
-      final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
-      if (info == null) {
-        return Doubles.EMPTY;
-      } else if (info.hasDocValues()) {
-        throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
-      } else if (!info.isIndexed()) {
-        return Doubles.EMPTY;
-      }
-      return (Doubles) caches.get(Double.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
-    }
-  }
-
-  static class DoublesFromArray extends Doubles {
-    private final double[] values;
-
-    public DoublesFromArray(double[] values) {
-      this.values = values;
-    }
-    
-    @Override
-    public double get(int docID) {
-      return values[docID];
-    }
-  }
-
-  static final class DoubleCache extends Cache {
-    DoubleCache(FieldCacheImpl wrapper) {
-      super(wrapper);
-    }
-
-    @Override
-    protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
-        throws IOException {
-
-      final DoubleParser parser = (DoubleParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = NUMERIC_UTILS_DOUBLE_PARSER) so
-        // cache key includes NUMERIC_UTILS_DOUBLE_PARSER:
-        return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
-      }
-
-      final HoldsOneThing<double[]> valuesRef = new HoldsOneThing<>();
-
-      Uninvert u = new Uninvert() {
-          private double currentValue;
-          private double[] values;
-
-          @Override
-          public void visitTerm(BytesRef term) {
-            currentValue = parser.parseDouble(term);
-            if (values == null) {
-              // Lazy alloc so for the numeric field case
-              // (which will hit a NumberFormatException
-              // when we first try the DEFAULT_INT_PARSER),
-              // we don't double-alloc:
-              values = new double[reader.maxDoc()];
-              valuesRef.set(values);
-            }
-          }
-
-          @Override
-          public void visitDoc(int docID) {
-            values[docID] = currentValue;
-          }
-          
-          @Override
-          protected TermsEnum termsEnum(Terms terms) throws IOException {
-            return parser.termsEnum(terms);
-          }
-        };
-
-      u.uninvert(reader, key.field, setDocsWithField);
-
-      if (setDocsWithField) {
-        wrapper.setDocsWithField(reader, key.field, u.docsWithField);
-      }
-      double[] values = valuesRef.get();
-      if (values == null) {
-        values = new double[reader.maxDoc()];
-      }
-      return new DoublesFromArray(values);
-    }
-  }
-
   public static class SortedDocValuesImpl extends SortedDocValues {
     private final PagedBytes.Reader bytes;
     private final MonotonicAppendingLongBuffer termOrdToBytesOffset;
@@ -1190,7 +840,10 @@
 
   // TODO: this if DocTermsIndex was already created, we
   // should share it...
-  public SortedSetDocValues getDocTermOrds(AtomicReader reader, String field) throws IOException {
+  public SortedSetDocValues getDocTermOrds(AtomicReader reader, String field, BytesRef prefix) throws IOException {
+    // not a general purpose filtering mechanism...
+    assert prefix == null || prefix == INT32_TERM_PREFIX || prefix == INT64_TERM_PREFIX;
+    
     SortedSetDocValues dv = reader.getSortedSetDocValues(field);
     if (dv != null) {
       return dv;
@@ -1210,7 +863,22 @@
       return DocValues.EMPTY_SORTED_SET;
     }
     
-    DocTermOrds dto = (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new CacheKey(field, null), false);
+    // ok we need to uninvert. check if we can optimize a bit.
+    
+    Terms terms = reader.terms(field);
+    if (terms == null) {
+      return DocValues.EMPTY_SORTED_SET;
+    } else {
+      // if #postings = #docswithfield we know that the field is "single valued enough".
+      // its possible the same term might appear twice in the same document, but SORTED_SET discards frequency.
+      // its still ok with filtering (which we limit to numerics), it just means precisionStep = Inf
+      long numPostings = terms.getSumDocFreq();
+      if (numPostings != -1 && numPostings == terms.getDocCount()) {
+        return DocValues.singleton(getTermsIndex(reader, field));
+      }
+    }
+    
+    DocTermOrds dto = (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new CacheKey(field, prefix), false);
     return dto.iterator(reader);
   }
 
@@ -1222,7 +890,8 @@
     @Override
     protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
         throws IOException {
-      return new DocTermOrds(reader, null, key.field);
+      BytesRef prefix = (BytesRef) key.custom;
+      return new DocTermOrds(reader, null, key.field, prefix);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java
similarity index 98%
rename from lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
rename to lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java
index febc4e3..1562e42 100644
--- a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.util;
+package org.apache.lucene.uninverting;
 /**
  * Copyright 2009 The Apache Software Foundation
  *
@@ -23,12 +23,12 @@
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.lucene.index.CompositeReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.CacheEntry;
 import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.uninverting.FieldCache.CacheEntry;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.MapOfSets;
 
 /** 
  * Provides methods for sanity checking that entries in the FieldCache 
@@ -52,7 +52,7 @@
  * @see FieldCacheSanityChecker.Insanity
  * @see FieldCacheSanityChecker.InsanityType
  */
-public final class FieldCacheSanityChecker {
+final class FieldCacheSanityChecker {
 
   private boolean estimateRam;
 
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
new file mode 100644
index 0000000..7f5b673
--- /dev/null
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
@@ -0,0 +1,326 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+import org.apache.lucene.document.IntField; // javadocs
+import org.apache.lucene.document.LongField; // javadocs
+import org.apache.lucene.document.FloatField; // javadocs
+import org.apache.lucene.document.DoubleField; // javadocs
+import org.apache.lucene.document.BinaryDocValuesField; // javadocs
+import org.apache.lucene.document.NumericDocValuesField; // javadocs
+import org.apache.lucene.document.SortedDocValuesField; // javadocs
+import org.apache.lucene.document.SortedSetDocValuesField; // javadocs
+import org.apache.lucene.document.StringField; // javadocs
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.FilterAtomicReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.uninverting.FieldCache.CacheEntry;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * A FilterReader that exposes <i>indexed</i> values as if they also had
+ * docvalues.
+ * <p>
+ * This is accomplished by "inverting the inverted index" or "uninversion".
+ * <p>
+ * The uninversion process happens lazily: upon the first request for the 
+ * field's docvalues (e.g. via {@link AtomicReader#getNumericDocValues(String)} 
+ * or similar), it will create the docvalues on-the-fly if needed and cache it,
+ * based on the core cache key of the wrapped AtomicReader.
+ */
+public class UninvertingReader extends FilterAtomicReader {
+  
+  /**
+   * Specifies the type of uninversion to apply for the field. 
+   */
+  public static enum Type {
+    /** 
+     * Single-valued Integer, (e.g. indexed with {@link IntField})
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link NumericDocValuesField}.
+     */
+    INTEGER,
+    /** 
+     * Single-valued Long, (e.g. indexed with {@link LongField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link NumericDocValuesField}.
+     */
+    LONG,
+    /** 
+     * Single-valued Float, (e.g. indexed with {@link FloatField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link NumericDocValuesField}.
+     */
+    FLOAT,
+    /** 
+     * Single-valued Double, (e.g. indexed with {@link DoubleField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link NumericDocValuesField}.
+     */
+    DOUBLE,
+    /** 
+     * Single-valued Binary, (e.g. indexed with {@link StringField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link BinaryDocValuesField}.
+     */
+    BINARY,
+    /** 
+     * Single-valued Binary, (e.g. indexed with {@link StringField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedDocValuesField}.
+     */
+    SORTED,
+    /** 
+     * Multi-valued Binary, (e.g. indexed with {@link StringField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedSetDocValuesField}.
+     */
+    SORTED_SET_BINARY,
+    /** 
+     * Multi-valued Integer, (e.g. indexed with {@link IntField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedSetDocValuesField}.
+     */
+    SORTED_SET_INTEGER,
+    /** 
+     * Multi-valued Float, (e.g. indexed with {@link FloatField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedSetDocValuesField}.
+     */
+    SORTED_SET_FLOAT,
+    /** 
+     * Multi-valued Long, (e.g. indexed with {@link LongField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedSetDocValuesField}.
+     */
+    SORTED_SET_LONG,
+    /** 
+     * Multi-valued Double, (e.g. indexed with {@link DoubleField}) 
+     * <p>
+     * Fields with this type act as if they were indexed with
+     * {@link SortedSetDocValuesField}.
+     */
+    SORTED_SET_DOUBLE
+  }
+  
+  /**
+   * Wraps a provided DirectoryReader. Note that for convenience, the returned reader
+   * can be used normally (e.g. passed to {@link DirectoryReader#openIfChanged(DirectoryReader)})
+   * and so on. 
+   */
+  public static DirectoryReader wrap(DirectoryReader in, final Map<String,Type> mapping) {
+    return new UninvertingDirectoryReader(in, mapping);
+  }
+  
+  static class UninvertingDirectoryReader extends FilterDirectoryReader {
+    final Map<String,Type> mapping;
+    
+    public UninvertingDirectoryReader(DirectoryReader in, final Map<String,Type> mapping) {
+      super(in, new FilterDirectoryReader.SubReaderWrapper() {
+        @Override
+        public AtomicReader wrap(AtomicReader reader) {
+          return new UninvertingReader(reader, mapping);
+        }
+      });
+      this.mapping = mapping;
+    }
+
+    @Override
+    protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+      return new UninvertingDirectoryReader(in, mapping);
+    }
+  }
+  
+  final Map<String,Type> mapping;
+  final FieldInfos fieldInfos;
+  
+  /** 
+   * Create a new UninvertingReader with the specified mapping 
+   * <p>
+   * Expert: This should almost never be used. Use {@link #wrap(DirectoryReader, Map)}
+   * instead.
+   *  
+   * @lucene.internal
+   */
+  public UninvertingReader(AtomicReader in, Map<String,Type> mapping) {
+    super(in);
+    this.mapping = mapping;
+    ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
+    for (FieldInfo fi : in.getFieldInfos()) {
+      FieldInfo.DocValuesType type = fi.getDocValuesType();
+      if (fi.isIndexed() && !fi.hasDocValues()) {
+        Type t = mapping.get(fi.name);
+        if (t != null) {
+          switch(t) {
+            case INTEGER:
+            case LONG:
+            case FLOAT:
+            case DOUBLE:
+              type = FieldInfo.DocValuesType.NUMERIC;
+              break;
+            case BINARY:
+              type = FieldInfo.DocValuesType.BINARY;
+              break;
+            case SORTED:
+              type = FieldInfo.DocValuesType.SORTED;
+              break;
+            case SORTED_SET_BINARY:
+            case SORTED_SET_INTEGER:
+            case SORTED_SET_FLOAT:
+            case SORTED_SET_LONG:
+            case SORTED_SET_DOUBLE:
+              type = FieldInfo.DocValuesType.SORTED_SET;
+              break;
+            default:
+              throw new AssertionError();
+          }
+        }
+      }
+      filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
+                                      fi.hasPayloads(), fi.getIndexOptions(), type, fi.getNormType(), null));
+    }
+    fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
+  }
+
+  @Override
+  public FieldInfos getFieldInfos() {
+    return fieldInfos;
+  }
+
+  @Override
+  public NumericDocValues getNumericDocValues(String field) throws IOException {
+    Type v = mapping.get(field);
+    if (v != null) {
+      switch (v) {
+        case INTEGER: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+        case FLOAT: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+        case LONG: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+        case DOUBLE: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+        default:
+          throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + v);
+      }
+    }
+    return super.getNumericDocValues(field);
+  }
+
+  @Override
+  public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+    Type v = mapping.get(field);
+    if (v == Type.BINARY) {
+      return FieldCache.DEFAULT.getTerms(in, field, true);
+    } else if (v != null && v != Type.SORTED) {
+      throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + v);
+    } else {
+      return in.getBinaryDocValues(field);
+    }
+  }
+
+  @Override
+  public SortedDocValues getSortedDocValues(String field) throws IOException {
+    Type v = mapping.get(field);
+    if (v == Type.SORTED) {
+      return FieldCache.DEFAULT.getTermsIndex(in, field);
+    } else if (v != null) {
+      throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + v);
+    } else {
+      return in.getSortedDocValues(field);
+    }
+  }
+  
+  @Override
+  public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+    Type v = mapping.get(field);
+    if (v != null) {
+      switch (v) {
+        case SORTED_SET_INTEGER:
+        case SORTED_SET_FLOAT: 
+          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT32_TERM_PREFIX);
+        case SORTED_SET_LONG:
+        case SORTED_SET_DOUBLE:
+          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT64_TERM_PREFIX);
+        case SORTED_SET_BINARY:
+          return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
+        default:
+          if (v != Type.SORTED) {
+            throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + v);
+          }
+      }
+    }
+    return in.getSortedSetDocValues(field);
+  }
+
+  @Override
+  public Bits getDocsWithField(String field) throws IOException {
+    if (mapping.containsKey(field)) {
+      return FieldCache.DEFAULT.getDocsWithField(in, field);
+    } else {
+      return in.getDocsWithField(field);
+    }
+  }
+
+  @Override
+  public Object getCoreCacheKey() {
+    return in.getCoreCacheKey();
+  }
+
+  @Override
+  public Object getCombinedCoreAndDeletesKey() {
+    return in.getCombinedCoreAndDeletesKey();
+  }
+
+  @Override
+  public String toString() {
+    return "Uninverting(" + in.toString() + ")";
+  }
+  
+  /** 
+   * Return information about the backing cache
+   * @lucene.internal 
+   */
+  public static String[] getUninvertedStats() {
+    CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
+    String[] info = new String[entries.length];
+    for (int i = 0; i < entries.length; i++) {
+      info[i] = entries[i].toString();
+    }
+    return info;
+  }
+}
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/package.html b/lucene/misc/src/java/org/apache/lucene/uninverting/package.html
new file mode 100644
index 0000000..bb07636
--- /dev/null
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/package.html
@@ -0,0 +1,21 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+<body>
+Support for creating docvalues on-the-fly from the inverted index at runtime.
+</body>
+</html>
\ No newline at end of file
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
similarity index 73%
rename from lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java
rename to lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
index b2d131e..785f5df 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.index;
+package org.apache.lucene.uninverting;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -31,12 +31,28 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;
 
@@ -306,7 +322,7 @@
                                             TestUtil.nextInt(random(), 2, 10));
                                             
 
-    final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(r, "id", false);
+    final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
     /*
       for(int docID=0;docID<subR.maxDoc();docID++) {
       System.out.println("  docID=" + docID + " id=" + docIDToID[docID]);
@@ -362,7 +378,7 @@
         System.out.println("TEST: docID=" + docID + " of " + r.maxDoc() + " (id=" + docIDToID.get(docID) + ")");
       }
       iter.setDocument(docID);
-      final int[] answers = idToOrds[docIDToID.get(docID)];
+      final int[] answers = idToOrds[(int) docIDToID.get(docID)];
       int upto = 0;
       long ord;
       while ((ord = iter.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
@@ -387,6 +403,8 @@
     
     doc = new Document();
     doc.add(newStringField("foo", "baz", Field.Store.NO));
+    // we need a second value for a doc, or we don't actually test DocTermOrds!
+    doc.add(newStringField("foo", "car", Field.Store.NO));
     iw.addDocument(doc);
     
     DirectoryReader r1 = DirectoryReader.open(iw, true);
@@ -394,10 +412,10 @@
     iw.deleteDocuments(new Term("foo", "baz"));
     DirectoryReader r2 = DirectoryReader.open(iw, true);
     
-    FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r2), "foo");
+    FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r2), "foo", null);
     
-    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r1), "foo");
-    assertEquals(2, v.getValueCount());
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r1), "foo", null);
+    assertEquals(3, v.getValueCount());
     v.setDocument(1);
     assertEquals(1, v.nextOrd());
     
@@ -407,6 +425,90 @@
     dir.close();
   }
   
+  public void testNumericEncoded32() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new IntField("foo", 5, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new IntField("foo", 5, Field.Store.NO));
+    doc.add(new IntField("foo", -3, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = DirectoryReader.open(dir);
+    AtomicReader ar = getOnlySegmentReader(ir);
+    
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT32_TERM_PREFIX);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(-3, NumericUtils.prefixCodedToInt(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(5, NumericUtils.prefixCodedToInt(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
+  public void testNumericEncoded64() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new LongField("foo", 5, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LongField("foo", 5, Field.Store.NO));
+    doc.add(new LongField("foo", -3, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = DirectoryReader.open(dir);
+    AtomicReader ar = getOnlySegmentReader(ir);
+    
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT64_TERM_PREFIX);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(-3, NumericUtils.prefixCodedToLong(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(5, NumericUtils.prefixCodedToLong(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
   public void testSortedTermsEnum() throws IOException {
     Directory directory = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
@@ -420,6 +522,8 @@
     
     doc = new Document();
     doc.add(new StringField("field", "world", Field.Store.NO));
+    // we need a second value for a doc, or we don't actually test DocTermOrds!
+    doc.add(new StringField("field", "hello", Field.Store.NO));
     iwriter.addDocument(doc);
 
     doc = new Document();
@@ -431,7 +535,7 @@
     iwriter.shutdown();
 
     AtomicReader ar = getOnlySegmentReader(ireader);
-    SortedSetDocValues dv = FieldCache.DEFAULT.getDocTermOrds(ar, "field");
+    SortedSetDocValues dv = FieldCache.DEFAULT.getDocTermOrds(ar, "field", null);
     assertEquals(3, dv.getValueCount());
     
     TermsEnum termsEnum = dv.termsEnum();
@@ -478,4 +582,62 @@
     ireader.close();
     directory.close();
   }
+  
+  public void testActuallySingleValued() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwconfig =  newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    iwconfig.setMergePolicy(newLogMergePolicy());
+    IndexWriter iw = new IndexWriter(dir, iwconfig);
+    
+    Document doc = new Document();
+    doc.add(new StringField("foo", "bar", Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new StringField("foo", "baz", Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new StringField("foo", "baz", Field.Store.NO));
+    doc.add(new StringField("foo", "baz", Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = DirectoryReader.open(dir);
+    AtomicReader ar = getOnlySegmentReader(ir);
+    
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null);
+    assertNotNull(DocValues.unwrapSingleton(v)); // actually a single-valued field
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(0, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(2);
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(3);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals("bar", value.utf8ToString());
+    
+    v.lookupOrd(1, value);
+    assertEquals("baz", value.utf8ToString());
+    
+    ir.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
similarity index 84%
rename from lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java
rename to lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
index 6ff41ed..b15e933 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.search;
+package org.apache.lucene.uninverting;
 
 /**
  * Copyright 2004 The Apache Software Foundation
@@ -43,20 +43,16 @@
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocTermOrds;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.FieldCache.Doubles;
-import org.apache.lucene.search.FieldCache.Floats;
-import org.apache.lucene.search.FieldCache.Ints;
-import org.apache.lucene.search.FieldCache.Longs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -141,15 +137,17 @@
       FieldCache cache = FieldCache.DEFAULT;
       ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
       cache.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8));
-      cache.getDoubles(reader, "theDouble", false);
-      cache.getFloats(reader, "theDouble", new FieldCache.FloatParser() {
+      cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+      cache.getNumerics(reader, "theDouble", new FieldCache.Parser() {
         @Override
         public TermsEnum termsEnum(Terms terms) throws IOException {
           return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
         }
         @Override
-        public float parseFloat(BytesRef term) {
-          return NumericUtils.sortableIntToFloat((int) NumericUtils.prefixCodedToLong(term));
+        public long parseValue(BytesRef term) {
+          int val = (int) NumericUtils.prefixCodedToLong(term);
+          if (val<0) val ^= 0x7fffffff;
+          return val;
         }
       }, false);
       assertTrue(bos.toString(IOUtils.UTF_8).indexOf("WARNING") != -1);
@@ -161,32 +159,28 @@
 
   public void test() throws IOException {
     FieldCache cache = FieldCache.DEFAULT;
-    FieldCache.Doubles doubles = cache.getDoubles(reader, "theDouble", random().nextBoolean());
-    assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
+    NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(doubles.get(i) + " does not equal: " + (Double.MAX_VALUE - i), doubles.get(i) == (Double.MAX_VALUE - i));
+      assertEquals(Double.doubleToLongBits(Double.MAX_VALUE - i), doubles.get(i));
     }
     
-    FieldCache.Longs longs = cache.getLongs(reader, "theLong", random().nextBoolean());
-    assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
+    NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(longs.get(i) + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs.get(i) == (Long.MAX_VALUE - i));
+      assertEquals(Long.MAX_VALUE - i, longs.get(i));
     }
 
-    FieldCache.Ints ints = cache.getInts(reader, "theInt", random().nextBoolean());
-    assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
+    NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(ints.get(i) + " does not equal: " + (Integer.MAX_VALUE - i), ints.get(i) == (Integer.MAX_VALUE - i));
+      assertEquals(Integer.MAX_VALUE - i, ints.get(i));
     }
     
-    FieldCache.Floats floats = cache.getFloats(reader, "theFloat", random().nextBoolean());
-    assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
+    NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(floats.get(i) + " does not equal: " + (Float.MAX_VALUE - i), floats.get(i) == (Float.MAX_VALUE - i));
+      assertEquals(Float.floatToIntBits(Float.MAX_VALUE - i), floats.get(i));
     }
 
     Bits docsWithField = cache.getDocsWithField(reader, "theLong");
@@ -271,10 +265,10 @@
     terms = cache.getTerms(reader, "bogusfield", false);
 
     // getDocTermOrds
-    SortedSetDocValues termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField");
+    SortedSetDocValues termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField", null);
     int numEntries = cache.getCacheEntries().length;
     // ask for it again, and check that we didnt create any additional entries:
-    termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField");
+    termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField", null);
     assertEquals(numEntries, cache.getCacheEntries().length);
 
     for (int i = 0; i < NUM_DOCS; i++) {
@@ -296,7 +290,7 @@
     }
 
     // test bad field
-    termOrds = cache.getDocTermOrds(reader, "bogusfield");
+    termOrds = cache.getDocTermOrds(reader, "bogusfield", null);
     assertTrue(termOrds.getValueCount() == 0);
 
     FieldCache.DEFAULT.purgeByCacheKey(reader.getCoreCacheKey());
@@ -335,22 +329,21 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
-    cache.getDoubles(reader, "theDouble", true);
+    cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
 
-    // The double[] takes two slots (one w/ null parser, one
-    // w/ real parser), and docsWithField should also
+    // The double[] takes one slots, and docsWithField should also
     // have been populated:
-    assertEquals(3, cache.getCacheEntries().length);
+    assertEquals(2, cache.getCacheEntries().length);
     Bits bits = cache.getDocsWithField(reader, "theDouble");
 
     // No new entries should appear:
-    assertEquals(3, cache.getCacheEntries().length);
+    assertEquals(2, cache.getCacheEntries().length);
     assertTrue(bits instanceof Bits.MatchAllBits);
 
-    FieldCache.Ints ints = cache.getInts(reader, "sparse", true);
-    assertEquals(6, cache.getCacheEntries().length);
+    NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    assertEquals(4, cache.getCacheEntries().length);
     Bits docsWithField = cache.getDocsWithField(reader, "sparse");
-    assertEquals(6, cache.getCacheEntries().length);
+    assertEquals(4, cache.getCacheEntries().length);
     for (int i = 0; i < docsWithField.length(); i++) {
       if (i%2 == 0) {
         assertTrue(docsWithField.get(i));
@@ -360,7 +353,7 @@
       }
     }
 
-    FieldCache.Ints numInts = cache.getInts(reader, "numInt", random().nextBoolean());
+    NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
     docsWithField = cache.getDocsWithField(reader, "numInt");
     for (int i = 0; i < docsWithField.length(); i++) {
       if (i%2 == 0) {
@@ -410,7 +403,7 @@
                     assertEquals(i%2 == 0, docsWithField.get(i));
                   }
                 } else {
-                  FieldCache.Ints ints = cache.getInts(reader, "sparse", true);
+                  NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
                   Bits docsWithField = cache.getDocsWithField(reader, "sparse");
                   for (int i = 0; i < docsWithField.length(); i++) {
                     if (i%2 == 0) {
@@ -459,7 +452,7 @@
     
     // Binary type: can be retrieved via getTerms()
     try {
-      FieldCache.DEFAULT.getInts(ar, "binary", false);
+      FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -473,7 +466,7 @@
     } catch (IllegalStateException expected) {}
     
     try {
-      FieldCache.DEFAULT.getDocTermOrds(ar, "binary");
+      FieldCache.DEFAULT.getDocTermOrds(ar, "binary", null);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -487,7 +480,7 @@
     
     // Sorted type: can be retrieved via getTerms(), getTermsIndex(), getDocTermOrds()
     try {
-      FieldCache.DEFAULT.getInts(ar, "sorted", false);
+      FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -506,7 +499,7 @@
     sorted.get(0, scratch);
     assertEquals("sorted value", scratch.utf8ToString());
     
-    SortedSetDocValues sortedSet = FieldCache.DEFAULT.getDocTermOrds(ar, "sorted");
+    SortedSetDocValues sortedSet = FieldCache.DEFAULT.getDocTermOrds(ar, "sorted", null);
     sortedSet.setDocument(0);
     assertEquals(0, sortedSet.nextOrd());
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd());
@@ -516,7 +509,7 @@
     assertTrue(bits.get(0));
     
     // Numeric type: can be retrieved via getInts() and so on
-    Ints numeric = FieldCache.DEFAULT.getInts(ar, "numeric", false);
+    NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
     assertEquals(42, numeric.get(0));
     
     try {
@@ -530,7 +523,7 @@
     } catch (IllegalStateException expected) {}
     
     try {
-      FieldCache.DEFAULT.getDocTermOrds(ar, "numeric");
+      FieldCache.DEFAULT.getDocTermOrds(ar, "numeric", null);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -545,7 +538,7 @@
     // SortedSet type: can be retrieved via getDocTermOrds() 
     if (defaultCodecSupportsSortedSet()) {
       try {
-        FieldCache.DEFAULT.getInts(ar, "sortedset", false);
+        FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
         fail();
       } catch (IllegalStateException expected) {}
     
@@ -564,7 +557,7 @@
         fail();
       } catch (IllegalStateException expected) {}
     
-      sortedSet = FieldCache.DEFAULT.getDocTermOrds(ar, "sortedset");
+      sortedSet = FieldCache.DEFAULT.getDocTermOrds(ar, "sortedset", null);
       sortedSet.setDocument(0);
       assertEquals(0, sortedSet.nextOrd());
       assertEquals(1, sortedSet.nextOrd());
@@ -593,17 +586,17 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    Ints ints = cache.getInts(ar, "bogusints", true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    Longs longs = cache.getLongs(ar, "boguslongs", true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    Floats floats = cache.getFloats(ar, "bogusfloats", true);
-    assertEquals(0, floats.get(0), 0.0f);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    assertEquals(0, floats.get(0));
     
-    Doubles doubles = cache.getDoubles(ar, "bogusdoubles", true);
-    assertEquals(0, doubles.get(0), 0.0D);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    assertEquals(0, doubles.get(0));
     
     BytesRef scratch = new BytesRef();
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -615,7 +608,7 @@
     sorted.get(0, scratch);
     assertEquals(0, scratch.length);
     
-    SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued");
+    SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued", null);
     sortedSet.setDocument(0);
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd());
     
@@ -652,17 +645,17 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    Ints ints = cache.getInts(ar, "bogusints", true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    Longs longs = cache.getLongs(ar, "boguslongs", true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    Floats floats = cache.getFloats(ar, "bogusfloats", true);
-    assertEquals(0, floats.get(0), 0.0f);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    assertEquals(0, floats.get(0));
     
-    Doubles doubles = cache.getDoubles(ar, "bogusdoubles", true);
-    assertEquals(0, doubles.get(0), 0.0D);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    assertEquals(0, doubles.get(0));
     
     BytesRef scratch = new BytesRef();
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -674,7 +667,7 @@
     sorted.get(0, scratch);
     assertEquals(0, scratch.length);
     
-    SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued");
+    SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued", null);
     sortedSet.setDocument(0);
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd());
     
@@ -724,7 +717,7 @@
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final FieldCache.Longs longs = FieldCache.DEFAULT.getLongs(getOnlySegmentReader(reader), "f", false);
+    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], longs.get(i));
     }
@@ -770,7 +763,7 @@
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(getOnlySegmentReader(reader), "f", false);
+    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], ints.get(i));
     }
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java
new file mode 100644
index 0000000..ea6a359
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java
@@ -0,0 +1,72 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestFieldCacheReopen extends LuceneTestCase {
+  
+  // TODO: make a version of this that tests the same thing with UninvertingReader.wrap()
+  
+  // LUCENE-1579: Ensure that on a reopened reader, that any
+  // shared segments reuse the doc values arrays in
+  // FieldCache
+  public void testFieldCacheReuseAfterReopen() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    Document doc = new Document();
+    doc.add(new IntField("number", 17, Field.Store.NO));
+    writer.addDocument(doc);
+    writer.commit();
+  
+    // Open reader1
+    DirectoryReader r = DirectoryReader.open(dir);
+    AtomicReader r1 = getOnlySegmentReader(r);
+    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    assertEquals(17, ints.get(0));
+  
+    // Add new segment
+    writer.addDocument(doc);
+    writer.commit();
+  
+    // Reopen reader1 --> reader2
+    DirectoryReader r2 = DirectoryReader.openIfChanged(r);
+    assertNotNull(r2);
+    r.close();
+    AtomicReader sub0 = r2.leaves().get(0).reader();
+    final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    r2.close();
+    assertTrue(ints == ints2);
+  
+    writer.shutdown();
+    dir.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
similarity index 86%
rename from lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
rename to lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
index 4e89a30..1dc461a 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.util;
+package org.apache.lucene.uninverting;
 
 /**
  * Copyright 2009 The Apache Software Foundation
@@ -30,10 +30,10 @@
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
-import org.apache.lucene.util.FieldCacheSanityChecker.InsanityType;
+import org.apache.lucene.uninverting.FieldCacheSanityChecker.Insanity;
+import org.apache.lucene.uninverting.FieldCacheSanityChecker.InsanityType;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class TestFieldCacheSanityChecker extends LuceneTestCase {
 
@@ -94,13 +94,11 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
 
-    cache.getDoubles(readerA, "theDouble", false);
-    cache.getDoubles(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
-    cache.getDoubles(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
-    cache.getDoubles(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getNumerics(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getNumerics(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getNumerics(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
 
-    cache.getInts(readerX, "theInt", false);
-    cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
 
     // // // 
 
@@ -119,7 +117,7 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
 
-    cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
     cache.getTerms(readerX, "theInt", false);
 
     // // // 
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java
new file mode 100644
index 0000000..c8380d9
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java
@@ -0,0 +1,1235 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.util.LuceneTestCase;
+
+/*
+ * Tests sorting (but with fieldcache instead of docvalues)
+ */
+public class TestFieldCacheSort extends LuceneTestCase {
+
+  /** Tests sorting on type string */
+  public void testString() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // 'bar' comes before 'foo'
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type string with a missing value */
+  public void testStringMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null comes first
+    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests reverse sorting on type string */
+  public void testStringReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // 'foo' comes after 'bar' in reverse order
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type string_val */
+  public void testStringVal() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.BINARY));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // 'bar' comes before 'foo'
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type string_val with a missing value */
+  public void testStringValMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.BINARY));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null comes first
+    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+
+  /** Tests sorting on type string with a missing
+   *  value sorted first */
+  public void testStringMissingSortedFirst() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sf = new SortField("value", SortField.Type.STRING);
+    Sort sort = new Sort(sf);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null comes first
+    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+
+  /** Tests reverse sorting on type string with a missing
+   *  value sorted first */
+  public void testStringMissingSortedFirstReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sf = new SortField("value", SortField.Type.STRING, true);
+    Sort sort = new Sort(sf);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    // null comes last
+    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+
+  /** Tests sorting on type string with a missing
+   *  value sorted last */
+  public void testStringValMissingSortedLast() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sf = new SortField("value", SortField.Type.STRING);
+    sf.setMissingValue(SortField.STRING_LAST);
+    Sort sort = new Sort(sf);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    // null comes last
+    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+
+  /** Tests reverse sorting on type string with a missing
+   *  value sorted last */
+  public void testStringValMissingSortedLastReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sf = new SortField("value", SortField.Type.STRING, true);
+    sf.setMissingValue(SortField.STRING_LAST);
+    Sort sort = new Sort(sf);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null comes first
+    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests reverse sorting on type string_val */
+  public void testStringValReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.BINARY));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // 'foo' comes after 'bar' in reverse order
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on internal docid order */
+  public void testFieldDoc() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.NO));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.NO));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(SortField.FIELD_DOC);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // docid 0, then docid 1
+    assertEquals(0, td.scoreDocs[0].doc);
+    assertEquals(1, td.scoreDocs[1].doc);
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on reverse internal docid order */
+  public void testFieldDocReverse() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.NO));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.NO));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField(null, SortField.Type.DOC, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // docid 1, then docid 0
+    assertEquals(1, td.scoreDocs[0].doc);
+    assertEquals(0, td.scoreDocs[1].doc);
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests default sort (by score) */
+  public void testFieldScore() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort();
+
+    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
+    assertEquals(2, actual.totalHits);
+
+    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
+    // the two topdocs should be the same
+    assertEquals(expected.totalHits, actual.totalHits);
+    for (int i = 0; i < actual.scoreDocs.length; i++) {
+      assertEquals(actual.scoreDocs[i].doc, expected.scoreDocs[i].doc);
+    }
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests default sort (by score) in reverse */
+  public void testFieldScoreReverse() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField(null, SortField.Type.SCORE, true));
+
+    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
+    assertEquals(2, actual.totalHits);
+
+    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
+    // the two topdocs should be the reverse of each other
+    assertEquals(expected.totalHits, actual.totalHits);
+    assertEquals(actual.scoreDocs[0].doc, expected.scoreDocs[1].doc);
+    assertEquals(actual.scoreDocs[1].doc, expected.scoreDocs[0].doc);
+
+    ir.close();
+    dir.close();
+  }
+
+  /** Tests sorting on type int */
+  public void testInt() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new IntField("value", 300000, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.INTEGER));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // numeric order
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type int with a missing value */
+  public void testIntMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.INTEGER));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as a 0
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type int, specifying the missing value should be treated as Integer.MAX_VALUE */
+  public void testIntMissingLast() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.INTEGER));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sortField = new SortField("value", SortField.Type.INT);
+    sortField.setMissingValue(Integer.MAX_VALUE);
+    Sort sort = new Sort(sortField);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as a Integer.MAX_VALUE
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type int in reverse */
+  public void testIntReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new IntField("value", 300000, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new IntField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.INTEGER));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type long */
+  public void testLong() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.LONG));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // numeric order
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type long with a missing value */
+  public void testLongMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.LONG));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as 0
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type long, specifying the missing value should be treated as Long.MAX_VALUE */
+  public void testLongMissingLast() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.LONG));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sortField = new SortField("value", SortField.Type.LONG);
+    sortField.setMissingValue(Long.MAX_VALUE);
+    Sort sort = new Sort(sortField);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as Long.MAX_VALUE
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type long in reverse */
+  public void testLongReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", -1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new LongField("value", 4, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.LONG));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type float */
+  public void testFloat() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.FLOAT));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // numeric order
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type float with a missing value */
+  public void testFloatMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.FLOAT));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as 0
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type float, specifying the missing value should be treated as Float.MAX_VALUE */
+  public void testFloatMissingLast() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.FLOAT));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sortField = new SortField("value", SortField.Type.FLOAT);
+    sortField.setMissingValue(Float.MAX_VALUE);
+    Sort sort = new Sort(sortField);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // null is treated as Float.MAX_VALUE
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type float in reverse */
+  public void testFloatReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.FLOAT));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(3, td.totalHits);
+    // reverse numeric order
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double */
+  public void testDouble() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.DOUBLE));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(4, td.totalHits);
+    // numeric order
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double with +/- zero */
+  public void testDoubleSignedZero() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new DoubleField("value", +0d, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", -0d, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.DOUBLE));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // numeric order
+    double v0 = searcher.doc(td.scoreDocs[0].doc).getField("value").numericValue().doubleValue();
+    double v1 = searcher.doc(td.scoreDocs[1].doc).getField("value").numericValue().doubleValue();
+    assertEquals(0, v0, 0d);
+    assertEquals(0, v1, 0d);
+    // check sign bits
+    assertEquals(1, Double.doubleToLongBits(v0) >>> 63);
+    assertEquals(0, Double.doubleToLongBits(v1) >>> 63);
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double with a missing value */
+  public void testDoubleMissing() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.DOUBLE));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(4, td.totalHits);
+    // null treated as a 0
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
+  public void testDoubleMissingLast() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.DOUBLE));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    SortField sortField = new SortField("value", SortField.Type.DOUBLE);
+    sortField.setMissingValue(Double.MAX_VALUE);
+    Sort sort = new Sort(sortField);
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(4, td.totalHits);
+    // null treated as Double.MAX_VALUE
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting on type double in reverse */
+  public void testDoubleReverse() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
+                     Collections.singletonMap("value", Type.DOUBLE));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(4, td.totalHits);
+    // numeric order
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  public void testEmptyStringVsNullStringSort() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+                        TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    Document doc = new Document();
+    doc.add(newStringField("f", "", Field.Store.NO));
+    doc.add(newStringField("t", "1", Field.Store.NO));
+    w.addDocument(doc);
+    w.commit();
+    doc = new Document();
+    doc.add(newStringField("t", "1", Field.Store.NO));
+    w.addDocument(doc);
+
+    IndexReader r = UninvertingReader.wrap(DirectoryReader.open(w, true), 
+                    Collections.singletonMap("f", Type.SORTED));
+    w.shutdown();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new TermQuery(new Term("t", "1")), null, 10, new Sort(new SortField("f", SortField.Type.STRING)));
+    assertEquals(2, hits.totalHits);
+    // null sorts first
+    assertEquals(1, hits.scoreDocs[0].doc);
+    assertEquals(0, hits.scoreDocs[1].doc);
+    r.close();
+    dir.close();
+  }
+  
+  /** test that we don't throw exception on multi-valued field (LUCENE-2142) */
+  public void testMultiValuedField() throws IOException {
+    Directory indexStore = newDirectory();
+    IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    for(int i=0; i<5; i++) {
+        Document doc = new Document();
+        doc.add(new StringField("string", "a"+i, Field.Store.NO));
+        doc.add(new StringField("string", "b"+i, Field.Store.NO));
+        writer.addDocument(doc);
+    }
+    writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
+    writer.shutdown();
+    Sort sort = new Sort(
+        new SortField("string", SortField.Type.STRING),
+        SortField.FIELD_DOC);
+    // this should not throw AIOOBE or RuntimeEx
+    IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
+                         Collections.singletonMap("string", Type.SORTED));
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.search(new MatchAllDocsQuery(), null, 500, sort);
+    reader.close();
+    indexStore.close();
+  }
+  
+  public void testMaxScore() throws Exception {
+    Directory d = newDirectory();
+    // Not RIW because we need exactly 2 segs:
+    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    int id = 0;
+    for(int seg=0;seg<2;seg++) {
+      for(int docIDX=0;docIDX<10;docIDX++) {
+        Document doc = new Document();
+        doc.add(new IntField("id", docIDX, Field.Store.YES));
+        StringBuilder sb = new StringBuilder();
+        for(int i=0;i<id;i++) {
+          sb.append(' ');
+          sb.append("text");
+        }
+        doc.add(newTextField("body", sb.toString(), Field.Store.NO));
+        w.addDocument(doc);
+        id++;
+      }
+      w.commit();
+    }
+
+    IndexReader r = UninvertingReader.wrap(DirectoryReader.open(w, true),
+                    Collections.singletonMap("id", Type.INTEGER));
+    w.shutdown();
+    Query q = new TermQuery(new Term("body", "text"));
+    IndexSearcher s = newSearcher(r);
+    float maxScore = s.search(q , 10).getMaxScore();
+    assertEquals(maxScore, s.search(q, null, 3, Sort.INDEXORDER, random().nextBoolean(), true).getMaxScore(), 0.0);
+    assertEquals(maxScore, s.search(q, null, 3, Sort.RELEVANCE, random().nextBoolean(), true).getMaxScore(), 0.0);
+    assertEquals(maxScore, s.search(q, null, 3, new Sort(new SortField[] {new SortField("id", SortField.Type.INT, false)}), random().nextBoolean(), true).getMaxScore(), 0.0);
+    assertEquals(maxScore, s.search(q, null, 3, new Sort(new SortField[] {new SortField("id", SortField.Type.INT, true)}), random().nextBoolean(), true).getMaxScore(), 0.0);
+    r.close();
+    d.close();
+  }
+  
+  /** test sorts when there's nothing in the index */
+  public void testEmptyIndex() throws Exception {
+    IndexSearcher empty = newSearcher(new MultiReader());
+    Query query = new TermQuery(new Term("contents", "foo"));
+  
+    Sort sort = new Sort();
+    TopDocs td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+
+    sort.setSort(SortField.FIELD_DOC);
+    td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+
+    sort.setSort(new SortField("int", SortField.Type.INT), SortField.FIELD_DOC);
+    td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+    
+    sort.setSort(new SortField("string", SortField.Type.STRING, true), SortField.FIELD_DOC);
+    td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+    
+    sort.setSort(new SortField("string_val", SortField.Type.STRING_VAL, true), SortField.FIELD_DOC);
+    td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+
+    sort.setSort(new SortField("float", SortField.Type.FLOAT), new SortField("string", SortField.Type.STRING));
+    td = empty.search(query, null, 10, sort, true, true);
+    assertEquals(0, td.totalHits);
+  }
+  
+  /** Tests sorting a single document */
+  public void testSortOneDocument() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(),
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(1, td.totalHits);
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting a single document with scores */
+  public void testSortOneDocumentWithScores() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(),
+                     Collections.singletonMap("value", Type.SORTED));
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
+
+    TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
+    assertEquals(1, expected.totalHits);
+    TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), null, 10, sort, true, true);
+    
+    assertEquals(expected.totalHits, actual.totalHits);
+    assertEquals(expected.scoreDocs[0].score, actual.scoreDocs[0].score, 0F);
+
+    ir.close();
+    dir.close();
+  }
+  
+  /** Tests sorting with two fields */
+  public void testSortTwoFields() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
+    doc.add(newStringField("value", "foo", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
+    doc.add(newStringField("value", "bar", Field.Store.YES));
+    writer.addDocument(doc);
+    Map<String,Type> mappings = new HashMap<>();
+    mappings.put("tievalue", Type.SORTED);
+    mappings.put("value", Type.SORTED);
+    
+    IndexReader ir = UninvertingReader.wrap(writer.getReader(), mappings);
+    writer.shutdown();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    // tievalue, then value
+    Sort sort = new Sort(new SortField("tievalue", SortField.Type.STRING),
+                         new SortField("value", SortField.Type.STRING));
+
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    assertEquals(2, td.totalHits);
+    // 'bar' comes before 'foo'
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+
+    ir.close();
+    dir.close();
+  }
+
+  public void testScore() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(newStringField("value", "bar", Field.Store.NO));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(newStringField("value", "foo", Field.Store.NO));
+    writer.addDocument(doc);
+    IndexReader ir = writer.getReader();
+    writer.shutdown();
+
+    IndexSearcher searcher = newSearcher(ir);
+    Sort sort = new Sort(SortField.FIELD_SCORE);
+
+    final BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermQuery(new Term("value", "foo")), Occur.SHOULD);
+    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
+    TopDocs td = searcher.search(bq, 10, sort);
+    assertEquals(2, td.totalHits);
+    assertEquals(1, td.scoreDocs[0].doc);
+    assertEquals(0, td.scoreDocs[1].doc);
+
+    ir.close();
+    dir.close();
+  }
+}
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java
new file mode 100644
index 0000000..1284470
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java
@@ -0,0 +1,595 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
+import org.apache.lucene.document.BinaryDocValuesField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestFieldCacheVsDocValues extends LuceneTestCase {
+  
+  public void testByteMissingVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestMissingVsFieldCache(Byte.MIN_VALUE, Byte.MAX_VALUE);
+    }
+  }
+  
+  public void testShortMissingVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestMissingVsFieldCache(Short.MIN_VALUE, Short.MAX_VALUE);
+    }
+  }
+  
+  public void testIntMissingVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestMissingVsFieldCache(Integer.MIN_VALUE, Integer.MAX_VALUE);
+    }
+  }
+  
+  public void testLongMissingVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestMissingVsFieldCache(Long.MIN_VALUE, Long.MAX_VALUE);
+    }
+  }
+  
+  public void testSortedFixedLengthVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      int fixedLength = TestUtil.nextInt(random(), 1, 10);
+      doTestSortedVsFieldCache(fixedLength, fixedLength);
+    }
+  }
+  
+  public void testSortedVariableLengthVsFieldCache() throws Exception {
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestSortedVsFieldCache(1, 10);
+    }
+  }
+  
+  public void testSortedSetFixedLengthVsUninvertedField() throws Exception {
+    assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      int fixedLength = TestUtil.nextInt(random(), 1, 10);
+      doTestSortedSetVsUninvertedField(fixedLength, fixedLength);
+    }
+  }
+  
+  public void testSortedSetVariableLengthVsUninvertedField() throws Exception {
+    assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
+    int numIterations = atLeast(1);
+    for (int i = 0; i < numIterations; i++) {
+      doTestSortedSetVsUninvertedField(1, 10);
+    }
+  }
+  
+  // LUCENE-4853
+  public void testHugeBinaryValues() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    // FSDirectory because SimpleText will consume gobbs of
+    // space when storing big binary values:
+    Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
+    boolean doFixed = random().nextBoolean();
+    int numDocs;
+    int fixedLength = 0;
+    if (doFixed) {
+      // Sometimes make all values fixed length since some
+      // codecs have different code paths for this:
+      numDocs = TestUtil.nextInt(random(), 10, 20);
+      fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024);
+    } else {
+      numDocs = TestUtil.nextInt(random(), 100, 200);
+    }
+    IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+    List<byte[]> docBytes = new ArrayList<>();
+    long totalBytes = 0;
+    for(int docID=0;docID<numDocs;docID++) {
+      // we don't use RandomIndexWriter because it might add
+      // more docvalues than we expect !!!!
+
+      // Must be > 64KB in size to ensure more than 2 pages in
+      // PagedBytes would be needed:
+      int numBytes;
+      if (doFixed) {
+        numBytes = fixedLength;
+      } else if (docID == 0 || random().nextInt(5) == 3) {
+        numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024);
+      } else {
+        numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024);
+      }
+      totalBytes += numBytes;
+      if (totalBytes > 5 * 1024*1024) {
+        break;
+      }
+      byte[] bytes = new byte[numBytes];
+      random().nextBytes(bytes);
+      docBytes.add(bytes);
+      Document doc = new Document();      
+      BytesRef b = new BytesRef(bytes);
+      b.length = bytes.length;
+      doc.add(new BinaryDocValuesField("field", b));
+      doc.add(new StringField("id", ""+docID, Field.Store.YES));
+      try {
+        w.addDocument(doc);
+      } catch (IllegalArgumentException iae) {
+        if (iae.getMessage().indexOf("is too large") == -1) {
+          throw iae;
+        } else {
+          // OK: some codecs can't handle binary DV > 32K
+          assertFalse(codecAcceptsHugeBinaryValues("field"));
+          w.rollback();
+          d.close();
+          return;
+        }
+      }
+    }
+    
+    DirectoryReader r;
+    try {
+      r = DirectoryReader.open(w, true);
+    } catch (IllegalArgumentException iae) {
+      if (iae.getMessage().indexOf("is too large") == -1) {
+        throw iae;
+      } else {
+        assertFalse(codecAcceptsHugeBinaryValues("field"));
+
+        // OK: some codecs can't handle binary DV > 32K
+        w.rollback();
+        d.close();
+        return;
+      }
+    }
+    w.shutdown();
+
+    AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
+
+    BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
+    for(int docID=0;docID<docBytes.size();docID++) {
+      StoredDocument doc = ar.document(docID);
+      BytesRef bytes = new BytesRef();
+      s.get(docID, bytes);
+      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
+      assertEquals(expected.length, bytes.length);
+      assertEquals(new BytesRef(expected), bytes);
+    }
+
+    assertTrue(codecAcceptsHugeBinaryValues("field"));
+
+    ar.close();
+    d.close();
+  }
+
+  // TODO: get this out of here and into the deprecated codecs (4.0, 4.2)
+  public void testHugeBinaryValueLimit() throws Exception {
+    // We only test DVFormats that have a limit
+    assumeFalse("test requires codec with limits on max binary field length", codecAcceptsHugeBinaryValues("field"));
+    Analyzer analyzer = new MockAnalyzer(random());
+    // FSDirectory because SimpleText will consume gobbs of
+    // space when storing big binary values:
+    Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
+    boolean doFixed = random().nextBoolean();
+    int numDocs;
+    int fixedLength = 0;
+    if (doFixed) {
+      // Sometimes make all values fixed length since some
+      // codecs have different code paths for this:
+      numDocs = TestUtil.nextInt(random(), 10, 20);
+      fixedLength = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
+    } else {
+      numDocs = TestUtil.nextInt(random(), 100, 200);
+    }
+    IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+    List<byte[]> docBytes = new ArrayList<>();
+    long totalBytes = 0;
+    for(int docID=0;docID<numDocs;docID++) {
+      // we don't use RandomIndexWriter because it might add
+      // more docvalues than we expect !!!!
+
+      // Must be > 64KB in size to ensure more than 2 pages in
+      // PagedBytes would be needed:
+      int numBytes;
+      if (doFixed) {
+        numBytes = fixedLength;
+      } else if (docID == 0 || random().nextInt(5) == 3) {
+        numBytes = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
+      } else {
+        numBytes = TestUtil.nextInt(random(), 1, Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH);
+      }
+      totalBytes += numBytes;
+      if (totalBytes > 5 * 1024*1024) {
+        break;
+      }
+      byte[] bytes = new byte[numBytes];
+      random().nextBytes(bytes);
+      docBytes.add(bytes);
+      Document doc = new Document();      
+      BytesRef b = new BytesRef(bytes);
+      b.length = bytes.length;
+      doc.add(new BinaryDocValuesField("field", b));
+      doc.add(new StringField("id", ""+docID, Field.Store.YES));
+      w.addDocument(doc);
+    }
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    w.shutdown();
+
+    AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
+
+    BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
+    for(int docID=0;docID<docBytes.size();docID++) {
+      StoredDocument doc = ar.document(docID);
+      BytesRef bytes = new BytesRef();
+      s.get(docID, bytes);
+      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
+      assertEquals(expected.length, bytes.length);
+      assertEquals(new BytesRef(expected), bytes);
+    }
+
+    ar.close();
+    d.close();
+  }
+  
+  private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+    Document doc = new Document();
+    Field idField = new StringField("id", "", Field.Store.NO);
+    Field indexedField = new StringField("indexed", "", Field.Store.NO);
+    Field dvField = new SortedDocValuesField("dv", new BytesRef());
+    doc.add(idField);
+    doc.add(indexedField);
+    doc.add(dvField);
+    
+    // index some docs
+    int numDocs = atLeast(300);
+    for (int i = 0; i < numDocs; i++) {
+      idField.setStringValue(Integer.toString(i));
+      final int length;
+      if (minLength == maxLength) {
+        length = minLength; // fixed length
+      } else {
+        length = TestUtil.nextInt(random(), minLength, maxLength);
+      }
+      String value = TestUtil.randomSimpleString(random(), length);
+      indexedField.setStringValue(value);
+      dvField.setBytesValue(new BytesRef(value));
+      writer.addDocument(doc);
+      if (random().nextInt(31) == 0) {
+        writer.commit();
+      }
+    }
+    
+    // delete some docs
+    int numDeletions = random().nextInt(numDocs/10);
+    for (int i = 0; i < numDeletions; i++) {
+      int id = random().nextInt(numDocs);
+      writer.deleteDocuments(new Term("id", Integer.toString(id)));
+    }
+    writer.shutdown();
+    
+    // compare
+    DirectoryReader ir = DirectoryReader.open(dir);
+    for (AtomicReaderContext context : ir.leaves()) {
+      AtomicReader r = context.reader();
+      SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
+      SortedDocValues actual = r.getSortedDocValues("dv");
+      assertEquals(r.maxDoc(), expected, actual);
+    }
+    ir.close();
+    dir.close();
+  }
+  
+  private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+    
+    // index some docs
+    int numDocs = atLeast(300);
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
+      doc.add(idField);
+      final int length;
+      if (minLength == maxLength) {
+        length = minLength; // fixed length
+      } else {
+        length = TestUtil.nextInt(random(), minLength, maxLength);
+      }
+      int numValues = random().nextInt(17);
+      // create a random list of strings
+      List<String> values = new ArrayList<>();
+      for (int v = 0; v < numValues; v++) {
+        values.add(TestUtil.randomSimpleString(random(), length));
+      }
+      
+      // add in any order to the indexed field
+      ArrayList<String> unordered = new ArrayList<>(values);
+      Collections.shuffle(unordered, random());
+      for (String v : values) {
+        doc.add(newStringField("indexed", v, Field.Store.NO));
+      }
+
+      // add in any order to the dv field
+      ArrayList<String> unordered2 = new ArrayList<>(values);
+      Collections.shuffle(unordered2, random());
+      for (String v : unordered2) {
+        doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
+      }
+
+      writer.addDocument(doc);
+      if (random().nextInt(31) == 0) {
+        writer.commit();
+      }
+    }
+    
+    // delete some docs
+    int numDeletions = random().nextInt(numDocs/10);
+    for (int i = 0; i < numDeletions; i++) {
+      int id = random().nextInt(numDocs);
+      writer.deleteDocuments(new Term("id", Integer.toString(id)));
+    }
+    
+    // compare per-segment
+    DirectoryReader ir = writer.getReader();
+    for (AtomicReaderContext context : ir.leaves()) {
+      AtomicReader r = context.reader();
+      SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed", null);
+      SortedSetDocValues actual = r.getSortedSetDocValues("dv");
+      assertEquals(r.maxDoc(), expected, actual);
+    }
+    ir.close();
+    
+    writer.forceMerge(1);
+    
+    // now compare again after the merge
+    ir = writer.getReader();
+    AtomicReader ar = getOnlySegmentReader(ir);
+    SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed", null);
+    SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
+    assertEquals(ir.maxDoc(), expected, actual);
+    ir.close();
+    
+    writer.shutdown();
+    dir.close();
+  }
+  
+  private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
+    assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+    Field idField = new StringField("id", "", Field.Store.NO);
+    Field indexedField = newStringField("indexed", "", Field.Store.NO);
+    Field dvField = new NumericDocValuesField("dv", 0);
+
+    
+    // index some docs
+    int numDocs = atLeast(300);
+    // numDocs should be always > 256 so that in case of a codec that optimizes
+    // for numbers of values <= 256, all storage layouts are tested
+    assert numDocs > 256;
+    for (int i = 0; i < numDocs; i++) {
+      idField.setStringValue(Integer.toString(i));
+      long value = longs.next();
+      indexedField.setStringValue(Long.toString(value));
+      dvField.setLongValue(value);
+      Document doc = new Document();
+      doc.add(idField);
+      // 1/4 of the time we neglect to add the fields
+      if (random().nextInt(4) > 0) {
+        doc.add(indexedField);
+        doc.add(dvField);
+      }
+      writer.addDocument(doc);
+      if (random().nextInt(31) == 0) {
+        writer.commit();
+      }
+    }
+    
+    // delete some docs
+    int numDeletions = random().nextInt(numDocs/10);
+    for (int i = 0; i < numDeletions; i++) {
+      int id = random().nextInt(numDocs);
+      writer.deleteDocuments(new Term("id", Integer.toString(id)));
+    }
+
+    // merge some segments and ensure that at least one of them has more than
+    // 256 values
+    writer.forceMerge(numDocs / 256);
+
+    writer.shutdown();
+    
+    // compare
+    DirectoryReader ir = DirectoryReader.open(dir);
+    for (AtomicReaderContext context : ir.leaves()) {
+      AtomicReader r = context.reader();
+      Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed");
+      Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv");
+      assertEquals(expected, actual);
+    }
+    ir.close();
+    dir.close();
+  }
+  
+  private void doTestMissingVsFieldCache(final long minValue, final long maxValue) throws Exception {
+    doTestMissingVsFieldCache(new LongProducer() {
+      @Override
+      long next() {
+        return TestUtil.nextLong(random(), minValue, maxValue);
+      }
+    });
+  }
+  
+  static abstract class LongProducer {
+    abstract long next();
+  }
+
+  private void assertEquals(Bits expected, Bits actual) throws Exception {
+    assertEquals(expected.length(), actual.length());
+    for (int i = 0; i < expected.length(); i++) {
+      assertEquals(expected.get(i), actual.get(i));
+    }
+  }
+  
+  private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
+    assertEquals(maxDoc, DocValues.singleton(expected), DocValues.singleton(actual));
+  }
+  
+  private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
+    // can be null for the segment if no docs actually had any SortedDocValues
+    // in this case FC.getDocTermsOrds returns EMPTY
+    if (actual == null) {
+      assertEquals(DocValues.EMPTY_SORTED_SET, expected);
+      return;
+    }
+    assertEquals(expected.getValueCount(), actual.getValueCount());
+    // compare ord lists
+    for (int i = 0; i < maxDoc; i++) {
+      expected.setDocument(i);
+      actual.setDocument(i);
+      long expectedOrd;
+      while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) {
+        assertEquals(expectedOrd, actual.nextOrd());
+      }
+      assertEquals(NO_MORE_ORDS, actual.nextOrd());
+    }
+    
+    // compare ord dictionary
+    BytesRef expectedBytes = new BytesRef();
+    BytesRef actualBytes = new BytesRef();
+    for (long i = 0; i < expected.getValueCount(); i++) {
+      expected.lookupTerm(expectedBytes);
+      actual.lookupTerm(actualBytes);
+      assertEquals(expectedBytes, actualBytes);
+    }
+    
+    // compare termsenum
+    assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
+  }
+  
+  private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
+    BytesRef ref;
+    
+    // sequential next() through all terms
+    while ((ref = expected.next()) != null) {
+      assertEquals(ref, actual.next());
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    assertNull(actual.next());
+    
+    // sequential seekExact(ord) through all terms
+    for (long i = 0; i < numOrds; i++) {
+      expected.seekExact(i);
+      actual.seekExact(i);
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    
+    // sequential seekExact(BytesRef) through all terms
+    for (long i = 0; i < numOrds; i++) {
+      expected.seekExact(i);
+      assertTrue(actual.seekExact(expected.term()));
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    
+    // sequential seekCeil(BytesRef) through all terms
+    for (long i = 0; i < numOrds; i++) {
+      expected.seekExact(i);
+      assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    
+    // random seekExact(ord)
+    for (long i = 0; i < numOrds; i++) {
+      long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
+      expected.seekExact(randomOrd);
+      actual.seekExact(randomOrd);
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    
+    // random seekExact(BytesRef)
+    for (long i = 0; i < numOrds; i++) {
+      long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
+      expected.seekExact(randomOrd);
+      actual.seekExact(expected.term());
+      assertEquals(expected.ord(), actual.ord());
+      assertEquals(expected.term(), actual.term());
+    }
+    
+    // random seekCeil(BytesRef)
+    for (long i = 0; i < numOrds; i++) {
+      BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
+      SeekStatus expectedStatus = expected.seekCeil(target);
+      assertEquals(expectedStatus, actual.seekCeil(target));
+      if (expectedStatus != SeekStatus.END) {
+        assertEquals(expected.ord(), actual.ord());
+        assertEquals(expected.term(), actual.term());
+      }
+    }
+  }
+  
+  protected boolean codecAcceptsHugeBinaryValues(String field) {
+    String name = Codec.getDefault().getName();
+    return !(name.equals("Lucene40") || name.equals("Lucene41") || name.equals("Lucene42") || name.equals("Memory") || name.equals("Direct"));
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
similarity index 84%
rename from lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java
rename to lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
index abc3904..7c0f689 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.index;
+package org.apache.lucene.uninverting;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -30,14 +30,20 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.TestUtil;
 
-public class TestDocValuesWithThreads extends LuceneTestCase {
+public class TestFieldCacheWithThreads extends LuceneTestCase {
 
   public void test() throws Exception {
     Directory dir = newDirectory();
@@ -62,7 +68,7 @@
     }
 
     w.forceMerge(1);
-    final IndexReader r = w.getReader();
+    final IndexReader r = DirectoryReader.open(w, true);
     w.shutdown();
 
     assertEquals(1, r.leaves().size());
@@ -78,7 +84,7 @@
           public void run() {
             try {
               //NumericDocValues ndv = ar.getNumericDocValues("number");
-              FieldCache.Longs ndv = FieldCache.DEFAULT.getLongs(ar, "number", false);
+              NumericDocValues ndv = FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
               //BinaryDocValues bdv = ar.getBinaryDocValues("bytes");
               BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes", false);
               SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
@@ -90,16 +96,16 @@
                 int docID = threadRandom.nextInt(numDocs);
                 switch(threadRandom.nextInt(4)) {
                 case 0:
-                  assertEquals((int) numbers.get(docID).longValue(), FieldCache.DEFAULT.getInts(ar, "number", false).get(docID));
+                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false).get(docID));
                   break;
                 case 1:
-                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getLongs(ar, "number", false).get(docID));
+                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false).get(docID));
                   break;
                 case 2:
-                  assertEquals(Float.intBitsToFloat((int) numbers.get(docID).longValue()), FieldCache.DEFAULT.getFloats(ar, "number", false).get(docID), 0.0f);
+                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, false).get(docID));
                   break;
                 case 3:
-                  assertEquals(Double.longBitsToDouble(numbers.get(docID).longValue()), FieldCache.DEFAULT.getDoubles(ar, "number", false).get(docID), 0.0);
+                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false).get(docID));
                   break;
                 }
                 bdv.get(docID, scratch);
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java
new file mode 100644
index 0000000..687c00d
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java
@@ -0,0 +1,156 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericTerms32 extends LuceneTestCase {
+  // distance of entries
+  private static int distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final int startOffset = - 1 << 15;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1 << 30) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+    
+    final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
+    storedInt.setStored(true);
+    storedInt.freeze();
+
+    final FieldType storedInt8 = new FieldType(storedInt);
+    storedInt8.setNumericPrecisionStep(8);
+
+    final FieldType storedInt4 = new FieldType(storedInt);
+    storedInt4.setNumericPrecisionStep(4);
+
+    final FieldType storedInt2 = new FieldType(storedInt);
+    storedInt2.setNumericPrecisionStep(2);
+
+    IntField
+      field8 = new IntField("field8", 0, storedInt8),
+      field4 = new IntField("field4", 0, storedInt4),
+      field2 = new IntField("field2", 0, storedInt2);
+    
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field4); doc.add(field2);
+    
+    // Add a series of noDocs docs with increasing int values
+    for (int l=0; l<noDocs; l++) {
+      int val=distance*l+startOffset;
+      field8.setIntValue(val);
+      field4.setIntValue(val);
+      field2.setIntValue(val);
+
+      val=l-(noDocs/2);
+      writer.addDocument(doc);
+    }
+  
+    Map<String,Type> map = new HashMap<>();
+    map.put("field2", Type.INTEGER);
+    map.put("field4", Type.INTEGER);
+    map.put("field8", Type.INTEGER);
+    reader = UninvertingReader.wrap(writer.getReader(), map);
+    searcher=newSearcher(reader);
+    writer.shutdown();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  private void testSorting(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    // 10 random tests, the index order is ascending,
+    // so using a reverse sort field should retun descending documents
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int i = 0; i < num; i++) {
+      int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
+      int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
+      if (topDocs.totalHits==0) continue;
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
+      for (int j=1; j<sd.length; j++) {
+        int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
+        assertTrue("Docs should be sorted backwards", last>act );
+        last=act;
+      }
+    }
+  }
+
+  @Test
+  public void testSorting_8bit() throws Exception {
+    testSorting(8);
+  }
+  
+  @Test
+  public void testSorting_4bit() throws Exception {
+    testSorting(4);
+  }
+  
+  @Test
+  public void testSorting_2bit() throws Exception {
+    testSorting(2);
+  }  
+}
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java
new file mode 100644
index 0000000..83b8353
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java
@@ -0,0 +1,166 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericTerms64 extends LuceneTestCase {
+  // distance of entries
+  private static long distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final long startOffset = - 1L << 31;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1L << 60) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+
+    final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
+    storedLong.setStored(true);
+    storedLong.freeze();
+
+    final FieldType storedLong8 = new FieldType(storedLong);
+    storedLong8.setNumericPrecisionStep(8);
+
+    final FieldType storedLong4 = new FieldType(storedLong);
+    storedLong4.setNumericPrecisionStep(4);
+
+    final FieldType storedLong6 = new FieldType(storedLong);
+    storedLong6.setNumericPrecisionStep(6);
+
+    final FieldType storedLong2 = new FieldType(storedLong);
+    storedLong2.setNumericPrecisionStep(2);
+
+    LongField
+      field8 = new LongField("field8", 0L, storedLong8),
+      field6 = new LongField("field6", 0L, storedLong6),
+      field4 = new LongField("field4", 0L, storedLong4),
+      field2 = new LongField("field2", 0L, storedLong2);
+
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2);
+    
+    // Add a series of noDocs docs with increasing long values, by updating the fields
+    for (int l=0; l<noDocs; l++) {
+      long val=distance*l+startOffset;
+      field8.setLongValue(val);
+      field6.setLongValue(val);
+      field4.setLongValue(val);
+      field2.setLongValue(val);
+
+      val=l-(noDocs/2);
+      writer.addDocument(doc);
+    }
+    Map<String,Type> map = new HashMap<>();
+    map.put("field2", Type.LONG);
+    map.put("field4", Type.LONG);
+    map.put("field6", Type.LONG);
+    map.put("field8", Type.LONG);
+    reader = UninvertingReader.wrap(writer.getReader(), map);
+    searcher=newSearcher(reader);
+    writer.shutdown();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  private void testSorting(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    // 10 random tests, the index order is ascending,
+    // so using a reverse sort field should retun descending documents
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
+      long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
+      if (topDocs.totalHits==0) continue;
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      long last=searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
+      for (int j=1; j<sd.length; j++) {
+        long act=searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
+        assertTrue("Docs should be sorted backwards", last>act );
+        last=act;
+      }
+    }
+  }
+
+  @Test
+  public void testSorting_8bit() throws Exception {
+    testSorting(8);
+  }
+  
+  @Test
+  public void testSorting_6bit() throws Exception {
+    testSorting(6);
+  }
+  
+  @Test
+  public void testSorting_4bit() throws Exception {
+    testSorting(4);
+  }
+  
+  @Test
+  public void testSorting_2bit() throws Exception {
+    testSorting(2);
+  }
+}
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java
new file mode 100644
index 0000000..3b8253f
--- /dev/null
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java
@@ -0,0 +1,248 @@
+package org.apache.lucene.uninverting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestUninvertingReader extends LuceneTestCase {
+  
+  public void testSortedSetInteger() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new IntField("foo", 5, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new IntField("foo", 5, Field.Store.NO));
+    doc.add(new IntField("foo", -3, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), 
+                         Collections.singletonMap("foo", Type.SORTED_SET_INTEGER));
+    AtomicReader ar = ir.leaves().get(0).reader();
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(-3, NumericUtils.prefixCodedToInt(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(5, NumericUtils.prefixCodedToInt(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
+  public void testSortedSetFloat() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
+    doc.add(new IntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), 
+                         Collections.singletonMap("foo", Type.SORTED_SET_FLOAT));
+    AtomicReader ar = ir.leaves().get(0).reader();
+    
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(Float.floatToRawIntBits(-3f), NumericUtils.prefixCodedToInt(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(Float.floatToRawIntBits(5f), NumericUtils.prefixCodedToInt(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
+  public void testSortedSetLong() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new LongField("foo", 5, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LongField("foo", 5, Field.Store.NO));
+    doc.add(new LongField("foo", -3, Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), 
+        Collections.singletonMap("foo", Type.SORTED_SET_LONG));
+    AtomicReader ar = ir.leaves().get(0).reader();
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(-3, NumericUtils.prefixCodedToLong(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(5, NumericUtils.prefixCodedToLong(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
+  public void testSortedSetDouble() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    
+    Document doc = new Document();
+    doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
+    iw.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
+    doc.add(new LongField("foo", Double.doubleToRawLongBits(-3d), Field.Store.NO));
+    iw.addDocument(doc);
+    
+    iw.forceMerge(1);
+    iw.shutdown();
+    
+    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), 
+        Collections.singletonMap("foo", Type.SORTED_SET_DOUBLE));
+    AtomicReader ar = ir.leaves().get(0).reader();
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
+    assertNoSilentInsanity(ar, "foo", DocValuesType.SORTED_SET);
+    assertEquals(2, v.getValueCount());
+    
+    v.setDocument(0);
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    v.setDocument(1);
+    assertEquals(0, v.nextOrd());
+    assertEquals(1, v.nextOrd());
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
+    
+    BytesRef value = new BytesRef();
+    v.lookupOrd(0, value);
+    assertEquals(Double.doubleToRawLongBits(-3d), NumericUtils.prefixCodedToLong(value));
+    
+    v.lookupOrd(1, value);
+    assertEquals(Double.doubleToRawLongBits(5d), NumericUtils.prefixCodedToLong(value));
+    
+    ir.close();
+    dir.close();
+  }
+  
+  private void assertNoSilentInsanity(AtomicReader reader, String field, DocValuesType type) throws IOException {
+    Set<DocValuesType> insaneTypes = EnumSet.allOf(DocValuesType.class);
+    insaneTypes.remove(type);
+    
+    for (DocValuesType t : insaneTypes) {
+      tryToBeInsane(reader, field, type, t);
+    }
+  }
+  
+  private void tryToBeInsane(AtomicReader reader, String field, DocValuesType actualType, DocValuesType insaneType) throws IOException {
+    try {
+      switch(insaneType) {
+        case NUMERIC:
+          reader.getNumericDocValues(field);
+          break;
+        case SORTED:
+          reader.getSortedDocValues(field);
+          break;
+        case BINARY:
+          reader.getBinaryDocValues(field);
+          break;
+        case SORTED_SET:
+          reader.getSortedSetDocValues(field);
+        default:
+          throw new AssertionError();
+      }
+      fail("didn't get expected exception: actual=" + actualType + ",insane=" + insaneType);
+    } catch (IllegalStateException expected) {}
+  }
+}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java
index 1c7b32a..aef52ea 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreProvider.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.index.IndexReader; // for javadocs
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.FieldCache; // for javadocs
 
 /**
  * An instance of this subclass should be returned by
@@ -32,7 +31,7 @@
  * <p>Since Lucene 2.9, queries operate on each segment of an index separately,
  * so the protected {@link #context} field can be used to resolve doc IDs,
  * as the supplied <code>doc</code> ID is per-segment and without knowledge
- * of the IndexReader you cannot access the document or {@link FieldCache}.
+ * of the IndexReader you cannot access the document or DocValues.
  * 
  * @lucene.experimental
  * @since 2.9.2
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
index 950b07d..ffbef5f 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
@@ -20,12 +20,12 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.UnicodeUtil;
@@ -44,12 +44,12 @@
   protected final CharsRef spareChars = new CharsRef();
 
   public DocTermsIndexDocValues(ValueSource vs, AtomicReaderContext context, String field) throws IOException {
-    try {
-      termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
-    } catch (RuntimeException e) {
-      throw new DocTermsIndexException(field, e);
-    }
+    this(vs, open(context, field));
+  }
+  
+  protected DocTermsIndexDocValues(ValueSource vs, SortedDocValues termsIndex) {
     this.vs = vs;
+    this.termsIndex = termsIndex;
   }
 
   protected abstract String toTerm(String readableValue);
@@ -162,6 +162,15 @@
     };
   }
 
+  // TODO: why?
+  static SortedDocValues open(AtomicReaderContext context, String field) throws IOException {
+    try {
+      return DocValues.getSorted(context.reader(), field);
+    } catch (RuntimeException e) {
+      throw new DocTermsIndexException(field, e);
+    }
+  }
+  
   /**
    * Custom Exception to be thrown when the DocTermsIndex for a field cannot be generated
    */
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/BytesRefFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/BytesRefFieldSource.java
index 5d33ef3..9d00ff3 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/BytesRefFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/BytesRefFieldSource.java
@@ -22,13 +22,15 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.DocTermsIndexDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.mutable.MutableValue;
+import org.apache.lucene.util.mutable.MutableValueStr;
 
 /**
  * An implementation for retrieving {@link FunctionValues} instances for string based fields.
@@ -42,11 +44,12 @@
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     final FieldInfo fieldInfo = readerContext.reader().getFieldInfos().fieldInfo(field);
+
     // To be sorted or not to be sorted, that is the question
     // TODO: do it cleaner?
     if (fieldInfo != null && fieldInfo.getDocValuesType() == DocValuesType.BINARY) {
-      final BinaryDocValues binaryValues = FieldCache.DEFAULT.getTerms(readerContext.reader(), field, true);
-      final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(readerContext.reader(), field);
+      final BinaryDocValues binaryValues = DocValues.getBinary(readerContext.reader(), field);
+      final Bits docsWithField = DocValues.getDocsWithField(readerContext.reader(), field);
       return new FunctionValues() {
 
         @Override
@@ -76,6 +79,31 @@
         public String toString(int doc) {
           return description() + '=' + strVal(doc);
         }
+
+        @Override
+        public ValueFiller getValueFiller() {
+          return new ValueFiller() {
+            private final MutableValueStr mval = new MutableValueStr();
+
+            @Override
+            public MutableValue getValue() {
+              return mval;
+            }
+
+            @Override
+            public void fillValue(int doc) {
+              mval.exists = docsWithField.get(doc);
+              if (mval.exists) {
+                binaryValues.get(doc, mval.value);
+              } else {
+                mval.value.bytes = BytesRef.EMPTY_BYTES;
+                mval.value.offset = 0;
+                mval.value.length = 0;
+              }
+            }
+          };
+        }
+
       };
     } else {
       return new DocTermsIndexDocValues(this, readerContext, field) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
index 89d4a69..1d5fd53 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
@@ -20,31 +20,24 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDouble;
 
 /**
- * Obtains double field values from {@link FieldCache#getDoubles} and makes
+ * Obtains double field values from {@link AtomicReader#getNumericDocValues} and makes
  * those values available as other numeric types, casting as needed.
  */
 public class DoubleFieldSource extends FieldCacheSource {
 
-  protected final FieldCache.DoubleParser parser;
-
   public DoubleFieldSource(String field) {
-    this(field, null);
-  }
-
-  public DoubleFieldSource(String field, FieldCache.DoubleParser parser) {
     super(field);
-    this.parser = parser;
   }
 
   @Override
@@ -54,12 +47,12 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Doubles arr = cache.getDoubles(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
     return new DoubleDocValues(this) {
       @Override
       public double doubleVal(int doc) {
-        return arr.get(doc);
+        return Double.longBitsToDouble(arr.get(doc));
       }
 
       @Override
@@ -79,29 +72,24 @@
 
           @Override
           public void fillValue(int doc) {
-            mval.value = arr.get(doc);
+            mval.value = doubleVal(doc);
             mval.exists = mval.value != 0 || valid.get(doc);
           }
         };
       }
-
-
-      };
-
+    };
   }
 
   @Override
   public boolean equals(Object o) {
     if (o.getClass() != DoubleFieldSource.class) return false;
     DoubleFieldSource other = (DoubleFieldSource) o;
-    return super.equals(other)
-      && (this.parser == null ? other.parser == null :
-          this.parser.getClass() == other.parser.getClass());
+    return super.equals(other);
   }
 
   @Override
   public int hashCode() {
-    int h = parser == null ? Double.class.hashCode() : parser.getClass().hashCode();
+    int h = Double.class.hashCode();
     h += super.hashCode();
     return h;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
index cd83c94..bd8f7fd 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
@@ -20,31 +20,31 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
 
 /**
- * Obtains int field values from {@link FieldCache#getInts} and makes
+ * Obtains int field values from {@link AtomicReader#getNumericDocValues} and makes
  * those values available as other numeric types, casting as needed.
  * strVal of the value is not the int value, but its string (displayed) value
  */
 public class EnumFieldSource extends FieldCacheSource {
   static final Integer DEFAULT_VALUE = -1;
 
-  final FieldCache.IntParser parser;
   final Map<Integer, String> enumIntToStringMap;
   final Map<String, Integer> enumStringToIntMap;
 
-  public EnumFieldSource(String field, FieldCache.IntParser parser, Map<Integer, String> enumIntToStringMap, Map<String, Integer> enumStringToIntMap) {
+  public EnumFieldSource(String field, Map<Integer, String> enumIntToStringMap, Map<String, Integer> enumStringToIntMap) {
     super(field);
-    this.parser = parser;
     this.enumIntToStringMap = enumIntToStringMap;
     this.enumStringToIntMap = enumStringToIntMap;
   }
@@ -98,55 +98,29 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Ints arr = cache.getInts(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
 
     return new IntDocValues(this) {
       final MutableValueInt val = new MutableValueInt();
 
       @Override
-      public float floatVal(int doc) {
-        return (float) arr.get(doc);
-      }
-
-      @Override
       public int intVal(int doc) {
-        return arr.get(doc);
-      }
-
-      @Override
-      public long longVal(int doc) {
-        return (long) arr.get(doc);
-      }
-
-      @Override
-      public double doubleVal(int doc) {
-        return (double) arr.get(doc);
+        return (int) arr.get(doc);
       }
 
       @Override
       public String strVal(int doc) {
-        Integer intValue = arr.get(doc);
+        Integer intValue = intVal(doc);
         return intValueToStringValue(intValue);
       }
 
       @Override
-      public Object objectVal(int doc) {
-        return valid.get(doc) ? arr.get(doc) : null;
-      }
-
-      @Override
       public boolean exists(int doc) {
         return valid.get(doc);
       }
 
       @Override
-      public String toString(int doc) {
-        return description() + '=' + strVal(doc);
-      }
-
-
-      @Override
       public ValueSourceScorer getRangeScorer(IndexReader reader, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
         Integer lower = stringValueToIntValue(lowerVal);
         Integer upper = stringValueToIntValue(upperVal);
@@ -171,7 +145,7 @@
         return new ValueSourceScorer(reader, this) {
           @Override
           public boolean matchesValue(int doc) {
-            int val = arr.get(doc);
+            int val = intVal(doc);
             // only check for deleted if it's the default value
             // if (val==0 && reader.isDeleted(doc)) return false;
             return val >= ll && val <= uu;
@@ -191,13 +165,11 @@
 
           @Override
           public void fillValue(int doc) {
-            mval.value = arr.get(doc);
+            mval.value = intVal(doc);
             mval.exists = valid.get(doc);
           }
         };
       }
-
-
     };
   }
 
@@ -211,7 +183,6 @@
 
     if (!enumIntToStringMap.equals(that.enumIntToStringMap)) return false;
     if (!enumStringToIntMap.equals(that.enumStringToIntMap)) return false;
-    if (!parser.equals(that.parser)) return false;
 
     return true;
   }
@@ -219,7 +190,6 @@
   @Override
   public int hashCode() {
     int result = super.hashCode();
-    result = 31 * result + parser.hashCode();
     result = 31 * result + enumIntToStringMap.hashCode();
     result = 31 * result + enumStringToIntMap.hashCode();
     return result;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java
index ebdb72c..0e0ee80 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FieldCacheSource.java
@@ -18,26 +18,20 @@
 package org.apache.lucene.queries.function.valuesource;
 
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldCache;
 
 /**
  * A base class for ValueSource implementations that retrieve values for
- * a single field from the {@link org.apache.lucene.search.FieldCache}.
+ * a single field from DocValues.
  *
  *
  */
 public abstract class FieldCacheSource extends ValueSource {
   protected final String field;
-  protected final FieldCache cache = FieldCache.DEFAULT;
 
   public FieldCacheSource(String field) {
     this.field=field;
   }
 
-  public FieldCache getFieldCache() {
-    return cache;
-  }
-
   public String getField() {
     return field;
   }
@@ -51,13 +45,12 @@
   public boolean equals(Object o) {
     if (!(o instanceof FieldCacheSource)) return false;
     FieldCacheSource other = (FieldCacheSource)o;
-    return this.field.equals(other.field)
-           && this.cache == other.cache;
+    return this.field.equals(other.field);
   }
 
   @Override
   public int hashCode() {
-    return cache.hashCode() + field.hashCode();
+    return field.hashCode();
   }
 
 }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
index 3c7f3b1..36d578b 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
@@ -20,29 +20,24 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueFloat;
 
 /**
- * Obtains float field values from {@link FieldCache#getFloats} and makes those
+ * Obtains float field values from {@link AtomicReader#getNumericDocValues} and makes those
  * values available as other numeric types, casting as needed.
  */
 public class FloatFieldSource extends FieldCacheSource {
 
-  protected final FieldCache.FloatParser parser;
-
   public FloatFieldSource(String field) {
-    this(field, null);
-  }
-
-  public FloatFieldSource(String field, FieldCache.FloatParser parser) {
     super(field);
-    this.parser = parser;
   }
 
   @Override
@@ -52,18 +47,13 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Floats arr = cache.getFloats(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
 
     return new FloatDocValues(this) {
       @Override
       public float floatVal(int doc) {
-        return arr.get(doc);
-      }
-
-      @Override
-      public Object objectVal(int doc) {
-        return valid.get(doc) ? arr.get(doc) : null;
+        return Float.intBitsToFloat((int)arr.get(doc));
       }
 
       @Override
@@ -83,7 +73,7 @@
 
           @Override
           public void fillValue(int doc) {
-            mval.value = arr.get(doc);
+            mval.value = floatVal(doc);
             mval.exists = mval.value != 0 || valid.get(doc);
           }
         };
@@ -96,14 +86,12 @@
   public boolean equals(Object o) {
     if (o.getClass() !=  FloatFieldSource.class) return false;
     FloatFieldSource other = (FloatFieldSource)o;
-    return super.equals(other)
-      && (this.parser==null ? other.parser==null :
-          this.parser.getClass() == other.parser.getClass());
+    return super.equals(other);
   }
 
   @Override
   public int hashCode() {
-    int h = parser==null ? Float.class.hashCode() : parser.getClass().hashCode();
+    int h = Float.class.hashCode();
     h += super.hashCode();
     return h;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java
index a6ca74e..9659cc2 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/IntFieldSource.java
@@ -20,30 +20,26 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
 
 /**
- * Obtains int field values from {@link FieldCache#getInts} and makes those
+ * Obtains int field values from {@link AtomicReader#getNumericDocValues} and makes those
  * values available as other numeric types, casting as needed.
  */
 public class IntFieldSource extends FieldCacheSource {
-  final FieldCache.IntParser parser;
 
   public IntFieldSource(String field) {
-    this(field, null);
-  }
-
-  public IntFieldSource(String field, FieldCache.IntParser parser) {
     super(field);
-    this.parser = parser;
   }
 
   @Override
@@ -54,40 +50,20 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Ints arr = cache.getInts(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
     
     return new IntDocValues(this) {
       final MutableValueInt val = new MutableValueInt();
-      
-      @Override
-      public float floatVal(int doc) {
-        return (float)arr.get(doc);
-      }
 
       @Override
       public int intVal(int doc) {
-        return arr.get(doc);
-      }
-
-      @Override
-      public long longVal(int doc) {
-        return (long)arr.get(doc);
-      }
-
-      @Override
-      public double doubleVal(int doc) {
-        return (double)arr.get(doc);
+        return (int) arr.get(doc);
       }
 
       @Override
       public String strVal(int doc) {
-        return Integer.toString(arr.get(doc));
-      }
-
-      @Override
-      public Object objectVal(int doc) {
-        return valid.get(doc) ? arr.get(doc) : null;
+        return Integer.toString(intVal(doc));
       }
 
       @Override
@@ -96,11 +72,6 @@
       }
 
       @Override
-      public String toString(int doc) {
-        return description() + '=' + intVal(doc);
-      }
-
-      @Override
       public ValueFiller getValueFiller() {
         return new ValueFiller() {
           private final MutableValueInt mval = new MutableValueInt();
@@ -112,13 +83,11 @@
 
           @Override
           public void fillValue(int doc) {
-            mval.value = arr.get(doc);
+            mval.value = intVal(doc);
             mval.exists = mval.value != 0 || valid.get(doc);
           }
         };
       }
-
-      
     };
   }
 
@@ -126,14 +95,12 @@
   public boolean equals(Object o) {
     if (o.getClass() !=  IntFieldSource.class) return false;
     IntFieldSource other = (IntFieldSource)o;
-    return super.equals(other)
-      && (this.parser==null ? other.parser==null :
-          this.parser.getClass() == other.parser.getClass());
+    return super.equals(other);
   }
 
   @Override
   public int hashCode() {
-    int h = parser==null ? Integer.class.hashCode() : parser.getClass().hashCode();
+    int h = Integer.class.hashCode();
     h += super.hashCode();
     return h;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
index 173ca57..172892b 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
@@ -22,6 +22,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.ReaderUtil;
@@ -30,7 +31,6 @@
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.packed.PackedInts;
 
 /**
  * Use a field value and find the Document Frequency within another field.
@@ -56,7 +56,7 @@
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException
   {
-    final BinaryDocValues terms = cache.getTerms(readerContext.reader(), field, false, PackedInts.FAST);
+    final BinaryDocValues terms = DocValues.getBinary(readerContext.reader(), field);
     final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
     Terms t = MultiFields.getTerms(top, qfield);
     final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator(null);
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java
index 6270531..c63867e 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/LongFieldSource.java
@@ -20,31 +20,24 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueLong;
 
 /**
- * Obtains long field values from {@link FieldCache#getLongs} and makes those
+ * Obtains long field values from {@link AtomicReader#getNumericDocValues} and makes those
  * values available as other numeric types, casting as needed.
  */
 public class LongFieldSource extends FieldCacheSource {
 
-  protected final FieldCache.LongParser parser;
-
   public LongFieldSource(String field) {
-    this(field, null);
-  }
-
-  public LongFieldSource(String field, FieldCache.LongParser parser) {
     super(field);
-    this.parser = parser;
   }
 
   @Override
@@ -66,8 +59,8 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Longs arr = cache.getLongs(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
     
     return new LongDocValues(this) {
       @Override
@@ -124,14 +117,12 @@
   public boolean equals(Object o) {
     if (o.getClass() != this.getClass()) return false;
     LongFieldSource other = (LongFieldSource) o;
-    return super.equals(other)
-      && (this.parser == null ? other.parser == null :
-          this.parser.getClass() == other.parser.getClass());
+    return super.equals(other);
   }
 
   @Override
   public int hashCode() {
-    int h = parser == null ? this.getClass().hashCode() : parser.getClass().hashCode();
+    int h = getClass().hashCode();
     h += super.hashCode();
     return h;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SortedSetFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SortedSetFieldSource.java
new file mode 100644
index 0000000..eda61c5
--- /dev/null
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SortedSetFieldSource.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.queries.function.valuesource;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.queries.function.FunctionValues;
+import org.apache.lucene.queries.function.docvalues.DocTermsIndexDocValues;
+import org.apache.lucene.search.SortedSetSelector;
+
+/**
+ * Retrieves {@link FunctionValues} instances for multi-valued string based fields.
+ * <p>
+ * A SortedSetDocValues contains multiple values for a field, so this 
+ * technique "selects" a value as the representative value for the document.
+ * 
+ * @see SortedSetSelector
+ */
+public class SortedSetFieldSource extends FieldCacheSource {
+  protected final SortedSetSelector.Type selector;
+  
+  public SortedSetFieldSource(String field) {
+    this(field, SortedSetSelector.Type.MIN);
+  }
+  
+  public SortedSetFieldSource(String field, SortedSetSelector.Type selector) {
+    super(field);
+    this.selector = selector;
+  }
+
+  @Override
+  public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
+    SortedSetDocValues sortedSet = DocValues.getSortedSet(readerContext.reader(), field);
+    SortedDocValues view = SortedSetSelector.wrap(sortedSet, selector);
+    return new DocTermsIndexDocValues(this, view) {
+      @Override
+      protected String toTerm(String readableValue) {
+        return readableValue;
+      }
+
+      @Override
+      public Object objectVal(int doc) {
+        return strVal(doc);
+      }
+    };
+  }
+  
+  @Override
+  public String description() {
+    return "sortedset(" + field + ",selector=" + selector + ')';
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((selector == null) ? 0 : selector.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!super.equals(obj)) return false;
+    if (getClass() != obj.getClass()) return false;
+    SortedSetFieldSource other = (SortedSetFieldSource) obj;
+    if (selector != other.selector) return false;
+    return true;
+  }
+}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
index 7210104..4f32e58 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.CheckHits;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryUtils;
@@ -39,7 +38,9 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.Term;
 
 /**
@@ -66,11 +67,6 @@
    */
   @Test
   public void testCustomScoreFloat() throws Exception {
-    // INT field can be parsed as float
-    doTestCustomScore(INT_AS_FLOAT_VALUESOURCE, 1.0);
-    doTestCustomScore(INT_AS_FLOAT_VALUESOURCE, 5.0);
-
-    // same values, but in float format
     doTestCustomScore(FLOAT_VALUESOURCE, 1.0);
     doTestCustomScore(FLOAT_VALUESOURCE, 6.0);
   }
@@ -164,7 +160,7 @@
 
     @Override
     protected CustomScoreProvider getCustomScoreProvider(AtomicReaderContext context) throws IOException {
-      final FieldCache.Ints values = FieldCache.DEFAULT.getInts(context.reader(), INT_FIELD, false);
+      final NumericDocValues values = DocValues.getNumeric(context.reader(), INT_FIELD);
       return new CustomScoreProvider(context) {
         @Override
         public float customScore(int doc, float subScore, float valSrcScore) {
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
index 3dccd7f..0a8d50d 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
@@ -1,7 +1,5 @@
 package org.apache.lucene.queries.function;
 
-import java.io.IOException;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
@@ -9,15 +7,14 @@
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FloatField;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -60,21 +57,7 @@
   protected static final String INT_FIELD = "iii";
   protected static final String FLOAT_FIELD = "fff";
 
-  private static final FieldCache.FloatParser CUSTOM_FLOAT_PARSER = new FieldCache.FloatParser() {
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return FieldCache.NUMERIC_UTILS_INT_PARSER.termsEnum(terms);
-    }
-    
-    @Override
-    public float parseFloat(BytesRef term) {
-      return (float) FieldCache.NUMERIC_UTILS_INT_PARSER.parseInt(term);
-    }
-  };
-
   protected ValueSource INT_VALUESOURCE = new IntFieldSource(INT_FIELD);
-  protected ValueSource INT_AS_FLOAT_VALUESOURCE = new FloatFieldSource(INT_FIELD, CUSTOM_FLOAT_PARSER);
   protected ValueSource FLOAT_VALUESOURCE = new FloatFieldSource(FLOAT_FIELD);
 
   private static final String DOC_TEXT_LINES[] = {
@@ -152,6 +135,7 @@
     
     f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
     d.add(f);
+    d.add(new SortedDocValuesField(ID_FIELD, new BytesRef(id2String(scoreAndID))));
 
     FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
     customType2.setOmitNorms(true);
@@ -160,9 +144,11 @@
 
     f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
     d.add(f);
+    d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
 
     f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
     d.add(f);
+    d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));
 
     iw.addDocument(d);
     log("added: " + d);
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
index 7aab17e..f350358 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
@@ -53,8 +53,6 @@
   /** Test that FieldScoreQuery of Type.FLOAT returns docs in expected order. */
   @Test
   public void testRankFloat () throws Exception {
-    // INT field can be parsed as float
-    doTestRank(INT_AS_FLOAT_VALUESOURCE);
     // same values, but in flot format
     doTestRank(FLOAT_VALUESOURCE);
   }
@@ -88,8 +86,6 @@
   /** Test that FieldScoreQuery of Type.FLOAT returns the expected scores. */
   @Test
   public void testExactScoreFloat () throws  Exception {
-    // INT field can be parsed as float
-    doTestExactScore(INT_AS_FLOAT_VALUESOURCE);
     // same values, but in flot format
     doTestExactScore(FLOAT_VALUESOURCE);
   }
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
index 0fb7f19..e5cbc51 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -48,12 +49,15 @@
 
     Document doc = new Document();
     Field field = new IntField("value", 0, Field.Store.YES);
+    Field dvField = new NumericDocValuesField("value", 0);
     doc.add(field);
+    doc.add(dvField);
 
     // Save docs unsorted (decreasing value n, n-1, ...)
     final int NUM_VALS = 5;
     for (int val = NUM_VALS; val > 0; val--) {
       field.setIntValue(val);
+      dvField.setLongValue(val);
       writer.addDocument(doc);
     }
 
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java
deleted file mode 100644
index 745652a..0000000
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java
+++ /dev/null
@@ -1,155 +0,0 @@
-package org.apache.lucene.queries.function;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queries.function.valuesource.OrdFieldSource;
-import org.apache.lucene.queries.function.valuesource.ReverseOrdFieldSource;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopDocs;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test search based on OrdFieldSource and ReverseOrdFieldSource.
- * <p/>
- * Tests here create an index with a few documents, each having
- * an indexed "id" field.
- * The ord values of this field are later used for scoring.
- * <p/>
- * The order tests use Hits to verify that docs are ordered as expected.
- * <p/>
- * The exact score tests use TopDocs top to verify the exact score.
- */
-public class TestOrdValues extends FunctionTestSetup {
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    createIndex(false);
-  }
-
-  /**
-   * Test OrdFieldSource
-   */
-  @Test
-  public void testOrdFieldRank() throws Exception {
-    doTestRank(ID_FIELD, true);
-  }
-
-  /**
-   * Test ReverseOrdFieldSource
-   */
-  @Test
-  public void testReverseOrdFieldRank() throws Exception {
-    doTestRank(ID_FIELD, false);
-  }
-
-  // Test that queries based on reverse/ordFieldScore scores correctly
-  private void doTestRank(String field, boolean inOrder) throws Exception {
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    ValueSource vs;
-    if (inOrder) {
-      vs = new OrdFieldSource(field);
-    } else {
-      vs = new ReverseOrdFieldSource(field);
-    }
-
-    Query q = new FunctionQuery(vs);
-    log("test: " + q);
-    QueryUtils.check(random(), q, s);
-    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-    assertEquals("All docs should be matched!", N_DOCS, h.length);
-    String prevID = inOrder
-            ? "IE"   // greater than all ids of docs in this test ("ID0001", etc.)
-            : "IC";  // smaller than all ids of docs in this test ("ID0001", etc.)
-
-    for (int i = 0; i < h.length; i++) {
-      String resID = s.doc(h[i].doc).get(ID_FIELD);
-      log(i + ".   score=" + h[i].score + "  -  " + resID);
-      log(s.explain(q, h[i].doc));
-      if (inOrder) {
-        assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.compareTo(prevID) < 0);
-      } else {
-        assertTrue("res id " + resID + " should be > prev res id " + prevID, resID.compareTo(prevID) > 0);
-      }
-      prevID = resID;
-    }
-    r.close();
-  }
-
-  /**
-   * Test exact score for OrdFieldSource
-   */
-  @Test
-  public void testOrdFieldExactScore() throws Exception {
-    doTestExactScore(ID_FIELD, true);
-  }
-
-  /**
-   * Test exact score for ReverseOrdFieldSource
-   */
-  @Test
-  public void testReverseOrdFieldExactScore() throws Exception {
-    doTestExactScore(ID_FIELD, false);
-  }
-
-
-  // Test that queries based on reverse/ordFieldScore returns docs with expected score.
-  private void doTestExactScore(String field, boolean inOrder) throws Exception {
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    ValueSource vs;
-    if (inOrder) {
-      vs = new OrdFieldSource(field);
-    } else {
-      vs = new ReverseOrdFieldSource(field);
-    }
-    Query q = new FunctionQuery(vs);
-    TopDocs td = s.search(q, null, 1000);
-    assertEquals("All docs should be matched!", N_DOCS, td.totalHits);
-    ScoreDoc sd[] = td.scoreDocs;
-    for (int i = 0; i < sd.length; i++) {
-      float score = sd[i].score;
-      String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD);
-      log("-------- " + i + ". Explain doc " + id);
-      log(s.explain(q, sd[i].doc));
-      float expectedScore = N_DOCS - i - 1;
-      assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
-      String expectedId = inOrder
-              ? id2String(N_DOCS - i) // in-order ==> larger  values first
-              : id2String(i + 1);     // reverse  ==> smaller values first
-      assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
-    }
-    r.close();
-  }
-  
-  // LUCENE-1250
-  public void testEqualsNull() throws Exception {
-    OrdFieldSource ofs = new OrdFieldSource("f");
-    assertFalse(ofs.equals(null));
-    
-    ReverseOrdFieldSource rofs = new ReverseOrdFieldSource("f");
-    assertFalse(rofs.equals(null));
-  }
-
-}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java
new file mode 100644
index 0000000..d45594c
--- /dev/null
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java
@@ -0,0 +1,61 @@
+package org.apache.lucene.queries.function;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Collections;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+
+@SuppressCodecs({"Lucene40", "Lucene41"}) // avoid codecs that don't support sortedset
+public class TestSortedSetFieldSource extends LuceneTestCase {
+  public void testSimple() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
+    Document doc = new Document();
+    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
+    doc.add(newStringField("id", "2", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
+    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
+    doc.add(newStringField("id", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.forceMerge(1);
+    writer.shutdown();
+
+    DirectoryReader ir = DirectoryReader.open(dir);
+    AtomicReader ar = getOnlySegmentReader(ir);
+    
+    ValueSource vs = new SortedSetFieldSource("value");
+    FunctionValues values = vs.getValues(Collections.emptyMap(), ar.getContext());
+    assertEquals("baz", values.strVal(0));
+    assertEquals("bar", values.strVal(1)); 
+    ir.close();
+    dir.close();
+  }
+}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
index c319636..90927d3 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
@@ -27,6 +27,8 @@
 import org.apache.lucene.document.FloatField;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
@@ -101,26 +103,44 @@
     Document document = new Document();
     Field idField = new StringField("id", "", Field.Store.NO);
     document.add(idField);
+    Field idDVField = new SortedDocValuesField("id", new BytesRef());
+    document.add(idDVField);
     Field doubleField = new DoubleField("double", 0d, Field.Store.NO);
     document.add(doubleField);
+    Field doubleDVField = new NumericDocValuesField("double", 0);
+    document.add(doubleDVField);
     Field floatField = new FloatField("float", 0f, Field.Store.NO);
     document.add(floatField);
+    Field floatDVField = new NumericDocValuesField("float", 0);
+    document.add(floatDVField);
     Field intField = new IntField("int", 0, Field.Store.NO);
     document.add(intField);
+    Field intDVField = new NumericDocValuesField("int", 0);
+    document.add(intDVField);
     Field longField = new LongField("long", 0L, Field.Store.NO);
     document.add(longField);
+    Field longDVField = new NumericDocValuesField("long", 0);
+    document.add(longDVField);
     Field stringField = new StringField("string", "", Field.Store.NO);
     document.add(stringField);
+    Field stringDVField = new SortedDocValuesField("string", new BytesRef());
+    document.add(stringDVField);
     Field textField = new TextField("text", "", Field.Store.NO);
     document.add(textField);
     
     for (String [] doc : documents) {
       idField.setStringValue(doc[0]);
+      idDVField.setBytesValue(new BytesRef(doc[0]));
       doubleField.setDoubleValue(Double.valueOf(doc[1]));
+      doubleDVField.setLongValue(Double.doubleToRawLongBits(Double.valueOf(doc[1])));
       floatField.setFloatValue(Float.valueOf(doc[2]));
+      floatDVField.setLongValue(Float.floatToRawIntBits(Float.valueOf(doc[2])));
       intField.setIntValue(Integer.valueOf(doc[3]));
+      intDVField.setLongValue(Integer.valueOf(doc[3]));
       longField.setLongValue(Long.valueOf(doc[4]));
+      longDVField.setLongValue(Long.valueOf(doc[4]));
       stringField.setStringValue(doc[5]);
+      stringDVField.setBytesValue(new BytesRef(doc[5]));
       textField.setStringValue(doc[6]);
       iw.addDocument(document);
     }
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java
index e46ec4e..5224c1d 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedStringComparator.java
@@ -22,7 +22,7 @@
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -95,8 +95,8 @@
 
   @Override
   public FieldComparator<String> setNextReader(AtomicReaderContext context) throws IOException {
-    currentDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, true);
-    docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
+    currentDocTerms = DocValues.getBinary(context.reader(), field);
+    docsWithField = DocValues.getDocsWithField(context.reader(), field);
     return this;
   }
   
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
index d511c89..ecf4c55 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
@@ -21,7 +21,7 @@
 
 import org.apache.lucene.search.MultiTermQueryWrapperFilter;
 import org.apache.lucene.search.NumericRangeFilter; // javadoc
-import org.apache.lucene.search.FieldCacheRangeFilter; // javadoc
+import org.apache.lucene.search.DocValuesRangeFilter; // javadoc
 
 /**
  * A Filter that restricts search results to a range of term
@@ -33,7 +33,7 @@
  * for numerical ranges; use {@link NumericRangeFilter} instead.
  *
  * <p>If you construct a large number of range filters with different ranges but on the 
- * same field, {@link FieldCacheRangeFilter} may have significantly better performance. 
+ * same field, {@link DocValuesRangeFilter} may have significantly better performance. 
  * @deprecated Index collation keys with CollationKeyAnalyzer or ICUCollationKeyAnalyzer instead.
  * This class will be removed in Lucene 5.0
  */
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SortedSetSortField.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SortedSetSortField.java
deleted file mode 100644
index e6b2933..0000000
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SortedSetSortField.java
+++ /dev/null
@@ -1,328 +0,0 @@
-package org.apache.lucene.sandbox.queries;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.RandomAccessOrds;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.util.BytesRef;
-
-/** 
- * SortField for {@link SortedSetDocValues}.
- * <p>
- * A SortedSetDocValues contains multiple values for a field, so sorting with
- * this technique "selects" a value as the representative sort value for the document.
- * <p>
- * By default, the minimum value in the set is selected as the sort value, but
- * this can be customized. Selectors other than the default do have some limitations
- * (see below) to ensure that all selections happen in constant-time for performance.
- * <p>
- * Like sorting by string, this also supports sorting missing values as first or last,
- * via {@link #setMissingValue(Object)}.
- * <p>
- * Limitations:
- * <ul>
- *   <li>Fields containing {@link Integer#MAX_VALUE} or more unique values
- *       are unsupported.
- *   <li>Selectors other than the default ({@link Selector#MIN}) require 
- *       optional codec support. However several codecs provided by Lucene, 
- *       including the current default codec, support this.
- * </ul>
- */
-public class SortedSetSortField extends SortField {
-  
-  /** Selects a value from the document's set to use as the sort value */
-  public static enum Selector {
-    /** 
-     * Selects the minimum value in the set 
-     */
-    MIN,
-    /** 
-     * Selects the maximum value in the set 
-     */
-    MAX,
-    /** 
-     * Selects the middle value in the set.
-     * <p>
-     * If the set has an even number of values, the lower of the middle two is chosen.
-     */
-    MIDDLE_MIN,
-    /** 
-     * Selects the middle value in the set.
-     * <p>
-     * If the set has an even number of values, the higher of the middle two is chosen
-     */
-    MIDDLE_MAX
-  }
-  
-  private final Selector selector;
-  
-  /**
-   * Creates a sort, possibly in reverse, by the minimum value in the set 
-   * for the document.
-   * @param field Name of field to sort by.  Must not be null.
-   * @param reverse True if natural order should be reversed.
-   */
-  public SortedSetSortField(String field, boolean reverse) {
-    this(field, reverse, Selector.MIN);
-  }
-
-  /**
-   * Creates a sort, possibly in reverse, specifying how the sort value from 
-   * the document's set is selected.
-   * @param field Name of field to sort by.  Must not be null.
-   * @param reverse True if natural order should be reversed.
-   * @param selector custom selector for choosing the sort value from the set.
-   * <p>
-   * NOTE: selectors other than {@link Selector#MIN} require optional codec support.
-   */
-  public SortedSetSortField(String field, boolean reverse, Selector selector) {
-    super(field, SortField.Type.CUSTOM, reverse);
-    if (selector == null) {
-      throw new NullPointerException();
-    }
-    this.selector = selector;
-  }
-  
-  /** Returns the selector in use for this sort */
-  public Selector getSelector() {
-    return selector;
-  }
-
-  @Override
-  public int hashCode() {
-    return 31 * super.hashCode() + selector.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (!super.equals(obj)) return false;
-    if (getClass() != obj.getClass()) return false;
-    SortedSetSortField other = (SortedSetSortField) obj;
-    if (selector != other.selector) return false;
-    return true;
-  }
-  
-  @Override
-  public String toString() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append("<sortedset" + ": \"").append(getField()).append("\">");
-    if (getReverse()) buffer.append('!');
-    if (missingValue != null) {
-      buffer.append(" missingValue=");
-      buffer.append(missingValue);
-    }
-    buffer.append(" selector=");
-    buffer.append(selector);
-
-    return buffer.toString();
-  }
-
-  /**
-   * Set how missing values (the empty set) are sorted.
-   * <p>
-   * Note that this must be {@link #STRING_FIRST} or {@link #STRING_LAST}.
-   */
-  @Override
-  public void setMissingValue(Object missingValue) {
-    if (missingValue != STRING_FIRST && missingValue != STRING_LAST) {
-      throw new IllegalArgumentException("For SORTED_SET type, missing value must be either STRING_FIRST or STRING_LAST");
-    }
-    this.missingValue = missingValue;
-  }
-  
-  @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
-    return new FieldComparator.TermOrdValComparator(numHits, getField(), missingValue == STRING_LAST) {
-      @Override
-      protected SortedDocValues getSortedDocValues(AtomicReaderContext context, String field) throws IOException {
-        SortedSetDocValues sortedSet = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
-        
-        if (sortedSet.getValueCount() >= Integer.MAX_VALUE) {
-          throw new UnsupportedOperationException("fields containing more than " + (Integer.MAX_VALUE-1) + " unique terms are unsupported");
-        }
-        
-        SortedDocValues singleton = DocValues.unwrapSingleton(sortedSet);
-        if (singleton != null) {
-          // it's actually single-valued in practice, but indexed as multi-valued,
-          // so just sort on the underlying single-valued dv directly.
-          // regardless of selector type, this optimization is safe!
-          return singleton;
-        } else if (selector == Selector.MIN) {
-          return new MinValue(sortedSet);
-        } else {
-          if (sortedSet instanceof RandomAccessOrds == false) {
-            throw new UnsupportedOperationException("codec does not support random access ordinals, cannot use selector: " + selector);
-          }
-          RandomAccessOrds randomOrds = (RandomAccessOrds) sortedSet;
-          switch(selector) {
-            case MAX: return new MaxValue(randomOrds);
-            case MIDDLE_MIN: return new MiddleMinValue(randomOrds);
-            case MIDDLE_MAX: return new MiddleMaxValue(randomOrds);
-            case MIN: 
-            default: 
-              throw new AssertionError();
-          }
-        }
-      }
-    };
-  }
-  
-  /** Wraps a SortedSetDocValues and returns the first ordinal (min) */
-  static class MinValue extends SortedDocValues {
-    final SortedSetDocValues in;
-    
-    MinValue(SortedSetDocValues in) {
-      this.in = in;
-    }
-
-    @Override
-    public int getOrd(int docID) {
-      in.setDocument(docID);
-      return (int) in.nextOrd();
-    }
-
-    @Override
-    public void lookupOrd(int ord, BytesRef result) {
-      in.lookupOrd(ord, result);
-    }
-
-    @Override
-    public int getValueCount() {
-      return (int) in.getValueCount();
-    }
-
-    @Override
-    public int lookupTerm(BytesRef key) {
-      return (int) in.lookupTerm(key);
-    }
-  }
-  
-  /** Wraps a SortedSetDocValues and returns the last ordinal (max) */
-  static class MaxValue extends SortedDocValues {
-    final RandomAccessOrds in;
-    
-    MaxValue(RandomAccessOrds in) {
-      this.in = in;
-    }
-
-    @Override
-    public int getOrd(int docID) {
-      in.setDocument(docID);
-      final int count = in.cardinality();
-      if (count == 0) {
-        return -1;
-      } else {
-        return (int) in.ordAt(count-1);
-      }
-    }
-
-    @Override
-    public void lookupOrd(int ord, BytesRef result) {
-      in.lookupOrd(ord, result);
-    }
-
-    @Override
-    public int getValueCount() {
-      return (int) in.getValueCount();
-    }
-    
-    @Override
-    public int lookupTerm(BytesRef key) {
-      return (int) in.lookupTerm(key);
-    }
-  }
-  
-  /** Wraps a SortedSetDocValues and returns the middle ordinal (or min of the two) */
-  static class MiddleMinValue extends SortedDocValues {
-    final RandomAccessOrds in;
-    
-    MiddleMinValue(RandomAccessOrds in) {
-      this.in = in;
-    }
-
-    @Override
-    public int getOrd(int docID) {
-      in.setDocument(docID);
-      final int count = in.cardinality();
-      if (count == 0) {
-        return -1;
-      } else {
-        return (int) in.ordAt((count-1) >>> 1);
-      }
-    }
-
-    @Override
-    public void lookupOrd(int ord, BytesRef result) {
-      in.lookupOrd(ord, result);
-    }
-
-    @Override
-    public int getValueCount() {
-      return (int) in.getValueCount();
-    }
-    
-    @Override
-    public int lookupTerm(BytesRef key) {
-      return (int) in.lookupTerm(key);
-    }
-  }
-  
-  /** Wraps a SortedSetDocValues and returns the middle ordinal (or max of the two) */
-  static class MiddleMaxValue extends SortedDocValues {
-    final RandomAccessOrds in;
-    
-    MiddleMaxValue(RandomAccessOrds in) {
-      this.in = in;
-    }
-
-    @Override
-    public int getOrd(int docID) {
-      in.setDocument(docID);
-      final int count = in.cardinality();
-      if (count == 0) {
-        return -1;
-      } else {
-        return (int) in.ordAt(count >>> 1);
-      }
-    }
-
-    @Override
-    public void lookupOrd(int ord, BytesRef result) {
-      in.lookupOrd(ord, result);
-    }
-
-    @Override
-    public int getValueCount() {
-      return (int) in.getValueCount();
-    }
-    
-    @Override
-    public int lookupTerm(BytesRef key) {
-      return (int) in.lookupTerm(key);
-    }
-  }
-}
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
index d3a331a..ade74c2 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
@@ -5,11 +5,13 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -58,6 +60,8 @@
       String value = TestUtil.randomUnicodeString(random());
       Field field = newStringField("field", value, Field.Store.YES);
       doc.add(field);
+      Field dvField = new SortedDocValuesField("field", new BytesRef(value));
+      doc.add(dvField);
       iw.addDocument(doc);
     }
     splitDoc = TestUtil.randomUnicodeString(random());
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortField.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortField.java
deleted file mode 100644
index 22df798..0000000
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSortedSetSortField.java
+++ /dev/null
@@ -1,225 +0,0 @@
-package org.apache.lucene.sandbox.queries;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-
-/** Simple tests for SortedSetSortField */
-public class TestSortedSetSortField extends LuceneTestCase {
-  
-  public void testForward() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "baz", Field.Store.NO));
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedSetSortField("value", false));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'baz'
-    assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
-    assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    
-    ir.close();
-    dir.close();
-  }
-  
-  public void testReverse() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "baz", Field.Store.NO));
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    writer.addDocument(doc);
-
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedSetSortField("value", true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'baz'
-    assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
-    assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    
-    ir.close();
-    dir.close();
-  }
-  
-  public void testMissingFirst() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "baz", Field.Store.NO));
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedSetSortField("value", false);
-    sortField.setMissingValue(SortField.STRING_FIRST);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // 'bar' comes before 'baz'
-    // null comes first
-    assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id"));
-    assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    
-    ir.close();
-    dir.close();
-  }
-  
-  public void testMissingLast() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "baz", Field.Store.NO));
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedSetSortField("value", false);
-    sortField.setMissingValue(SortField.STRING_LAST);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // 'bar' comes before 'baz'
-    assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
-    assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    // null comes last
-    assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
-    
-    ir.close();
-    dir.close();
-  }
-  
-  public void testSingleton() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "baz", Field.Store.NO));
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.shutdown();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedSetSortField("value", false));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(2, td.totalHits);
-    // 'bar' comes before 'baz'
-    assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
-    assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
-    
-    ir.close();
-    dir.close();
-  }
-  
-  public void testEmptyIndex() throws Exception {
-    IndexSearcher empty = newSearcher(new MultiReader());
-    Query query = new TermQuery(new Term("contents", "foo"));
-  
-    Sort sort = new Sort();
-    sort.setSort(new SortedSetSortField("sortedset", false));
-    TopDocs td = empty.search(query, null, 10, sort, true, true);
-    assertEquals(0, td.totalHits);
-    
-    // for an empty index, any selector should work
-    for (SortedSetSortField.Selector v : SortedSetSortField.Selector.values()) {
-      sort.setSort(new SortedSetSortField("sortedset", false, v));
-      td = empty.search(query, null, 10, sort, true, true);
-      assertEquals(0, td.totalHits);
-    }
-  }
-  
-  public void testEquals() throws Exception {
-    SortField sf = new SortedSetSortField("a", false);
-    assertFalse(sf.equals(null));
-    
-    assertEquals(sf, sf);
-    
-    SortField sf2 = new SortedSetSortField("a", false);
-    assertEquals(sf, sf2);
-    assertEquals(sf.hashCode(), sf2.hashCode());
-    
-    assertFalse(sf.equals(new SortedSetSortField("a", true)));
-    assertFalse(sf.equals(new SortedSetSortField("b", false)));
-    assertFalse(sf.equals(new SortedSetSortField("a", false, SortedSetSortField.Selector.MAX)));
-    assertFalse(sf.equals("foo"));
-  }
-}
diff --git a/lucene/spatial/build.xml b/lucene/spatial/build.xml
index 463ae43..c590bee 100644
--- a/lucene/spatial/build.xml
+++ b/lucene/spatial/build.xml
@@ -32,6 +32,7 @@
     <path refid="base.classpath"/>
     <path refid="spatialjar"/>
     <pathelement path="${queries.jar}" />
+    <pathelement path="${misc.jar}" />
   </path>
 
   <path id="test.classpath">
@@ -40,12 +41,13 @@
     <pathelement path="src/test-files" />
   </path>
 
-  <target name="compile-core" depends="jar-queries,common.compile-core" />
+  <target name="compile-core" depends="jar-queries,jar-misc,common.compile-core" />
 
-  <target name="javadocs" depends="javadocs-queries,compile-core">
+  <target name="javadocs" depends="javadocs-queries,javadocs-misc,compile-core">
     <invoke-module-javadoc>
       <links>
         <link href="../queries"/>
+        <link href="../misc"/>
       </links>
     </invoke-module-javadoc>
   </target>
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/DisjointSpatialFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/DisjointSpatialFilter.java
index 5109c0f..6dc8e7a 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/DisjointSpatialFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/DisjointSpatialFilter.java
@@ -17,11 +17,12 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.queries.ChainedFilter;
 import org.apache.lucene.search.BitsFilteredDocIdSet;
 import org.apache.lucene.search.DocIdSet;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
@@ -48,7 +49,7 @@
    * @param strategy Needed to compute intersects
    * @param args Used in spatial intersection
    * @param field This field is used to determine which docs have spatial data via
-   *               {@link org.apache.lucene.search.FieldCache#getDocsWithField(org.apache.lucene.index.AtomicReader, String)}.
+   *               {@link AtomicReader#getDocsWithField(String)}.
    *              Passing null will assume all docs have spatial data.
    */
   public DisjointSpatialFilter(SpatialStrategy strategy, SpatialArgs args, String field) {
@@ -92,7 +93,7 @@
       // which is nice but loading it in this way might be slower than say using an
       // intersects filter against the world bounds. So do we add a method to the
       // strategy, perhaps?  But the strategy can't cache it.
-      docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
+      docsWithField = DocValues.getDocsWithField(context.reader(), field);
 
       final int maxDoc = context.reader().maxDoc();
       if (docsWithField.length() != maxDoc )
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
index f229c87..267fb0e 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
@@ -41,8 +41,7 @@
  *   <li>What types of query shapes can be used?</li>
  *   <li>What types of query operations are supported?
  *   This might vary per shape.</li>
- *   <li>Does it use the {@link org.apache.lucene.search.FieldCache},
- *   or some other type of cache?  When?
+ *   <li>Does it use some type of cache?  When?
  * </ul>
  * If a strategy only supports certain shapes at index or query time, then in
  * general it will throw an exception if given an incompatible one.  It will not
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java
index 2a3b2a7..68f2fbe 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java
@@ -20,10 +20,11 @@
 import com.spatial4j.core.shape.Rectangle;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
@@ -64,13 +65,13 @@
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     AtomicReader reader = readerContext.reader();
-    final FieldCache.Doubles minX = FieldCache.DEFAULT.getDoubles(reader, strategy.field_minX, true);
-    final FieldCache.Doubles minY = FieldCache.DEFAULT.getDoubles(reader, strategy.field_minY, true);
-    final FieldCache.Doubles maxX = FieldCache.DEFAULT.getDoubles(reader, strategy.field_maxX, true);
-    final FieldCache.Doubles maxY = FieldCache.DEFAULT.getDoubles(reader, strategy.field_maxY, true);
+    final NumericDocValues minX = DocValues.getNumeric(reader, strategy.field_minX);
+    final NumericDocValues minY = DocValues.getNumeric(reader, strategy.field_minY);
+    final NumericDocValues maxX = DocValues.getNumeric(reader, strategy.field_maxX);
+    final NumericDocValues maxY = DocValues.getNumeric(reader, strategy.field_maxY);
 
-    final Bits validMinX = FieldCache.DEFAULT.getDocsWithField(reader, strategy.field_minX);
-    final Bits validMaxX = FieldCache.DEFAULT.getDocsWithField(reader, strategy.field_maxX);
+    final Bits validMinX = DocValues.getDocsWithField(reader, strategy.field_minX);
+    final Bits validMaxX = DocValues.getDocsWithField(reader, strategy.field_maxX);
 
     return new FunctionValues() {
       //reused
@@ -78,13 +79,13 @@
 
       @Override
       public float floatVal(int doc) {
-        double minXVal = minX.get(doc);
-        double maxXVal = maxX.get(doc);
+        double minXVal = Double.longBitsToDouble(minX.get(doc));
+        double maxXVal = Double.longBitsToDouble(maxX.get(doc));
         // make sure it has minX and area
         if ((minXVal != 0 || validMinX.get(doc)) && (maxXVal != 0 || validMaxX.get(doc))) {
           rect.reset(
               minXVal, maxXVal,
-              minY.get(doc), maxY.get(doc));
+              Double.longBitsToDouble(minY.get(doc)), Double.longBitsToDouble(maxY.get(doc)));
           return (float) similarity.score(rect, null);
         } else {
           return (float) similarity.score(null, null);
@@ -96,8 +97,8 @@
         // make sure it has minX and area
         if (validMinX.get(doc) && validMaxX.get(doc)) {
           rect.reset(
-              minX.get(doc), maxX.get(doc),
-              minY.get(doc), maxY.get(doc));
+              Double.longBitsToDouble(minX.get(doc)), Double.longBitsToDouble(maxX.get(doc)),
+              Double.longBitsToDouble(minY.get(doc)), Double.longBitsToDouble(maxY.get(doc)));
           Explanation exp = new Explanation();
           similarity.score(rect, exp);
           return exp;
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
index ced3f65..0e0171f 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
@@ -25,6 +25,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
@@ -64,7 +65,7 @@
  * The {@link #makeBBoxAreaSimilarityValueSource(com.spatial4j.core.shape.Rectangle)}
  * works by calculating the query bbox overlap percentage against the indexed
  * shape overlap percentage. The indexed shape's coordinates are retrieved from
- * the {@link org.apache.lucene.search.FieldCache}.
+ * {@link AtomicReader#getNumericDocValues}
  *
  * @lucene.experimental
  */
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
index ba9621d..30bde03 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
@@ -21,9 +21,10 @@
 import com.spatial4j.core.shape.Point;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
@@ -65,10 +66,10 @@
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     AtomicReader reader = readerContext.reader();
 
-    final FieldCache.Doubles ptX = FieldCache.DEFAULT.getDoubles(reader, strategy.getFieldNameX(), true);
-    final FieldCache.Doubles ptY = FieldCache.DEFAULT.getDoubles(reader, strategy.getFieldNameY(), true);
-    final Bits validX =  FieldCache.DEFAULT.getDocsWithField(reader, strategy.getFieldNameX());
-    final Bits validY =  FieldCache.DEFAULT.getDocsWithField(reader, strategy.getFieldNameY());
+    final NumericDocValues ptX = DocValues.getNumeric(reader, strategy.getFieldNameX());
+    final NumericDocValues ptY = DocValues.getNumeric(reader, strategy.getFieldNameY());
+    final Bits validX =  DocValues.getDocsWithField(reader, strategy.getFieldNameX());
+    final Bits validY =  DocValues.getDocsWithField(reader, strategy.getFieldNameY());
 
     return new FunctionValues() {
 
@@ -87,7 +88,7 @@
         // make sure it has minX and area
         if (validX.get(doc)) {
           assert validY.get(doc);
-          return calculator.distance(from, ptX.get(doc), ptY.get(doc)) * multiplier;
+          return calculator.distance(from, Double.longBitsToDouble(ptX.get(doc)), Double.longBitsToDouble(ptY.get(doc))) * multiplier;
         }
         return nullValue;
       }
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
index 618645b..d01d782 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
@@ -32,6 +32,8 @@
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -42,7 +44,9 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomGaussian;
@@ -59,15 +63,26 @@
 
   protected SpatialContext ctx;//subclass must initialize
 
+  Map<String,Type> uninvertMap = new HashMap<>();
+  
   @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();
-
+    // TODO: change this module to index docvalues instead of uninverting
+    uninvertMap.clear();
+    uninvertMap.put("bbox__minX", Type.DOUBLE);
+    uninvertMap.put("bbox__maxX", Type.DOUBLE);
+    uninvertMap.put("bbox__minY", Type.DOUBLE);
+    uninvertMap.put("bbox__maxY", Type.DOUBLE);
+    uninvertMap.put("pointvector__x", Type.DOUBLE);
+    uninvertMap.put("pointvector__y", Type.DOUBLE);
+    uninvertMap.put("SpatialOpRecursivePrefixTreeTest", Type.SORTED);
+    
     directory = newDirectory();
     final Random random = random();
     indexWriter = new RandomIndexWriter(random,directory, newIndexWriterConfig(random));
-    indexReader = indexWriter.getReader();
+    indexReader = UninvertingReader.wrap(indexWriter.getReader(), uninvertMap);
     indexSearcher = newSearcher(indexReader);
   }
 
@@ -110,8 +125,11 @@
 
   protected void commit() throws IOException {
     indexWriter.commit();
-    IOUtils.close(indexReader);
-    indexReader = indexWriter.getReader();
+    DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader);
+    if (newReader != null) {
+      IOUtils.close(indexReader);
+      indexReader = newReader;
+    }
     indexSearcher = newSearcher(indexReader);
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
index 2cb7cab..d4030b4 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
@@ -154,83 +154,6 @@
     farsiIndex.close();
   }
   
-  // Test using various international locales with accented characters (which
-  // sort differently depending on locale)
-  //
-  // Copied (and slightly modified) from 
-  // org.apache.lucene.search.TestSort.testInternationalSort()
-  //  
-  // TODO: this test is really fragile. there are already 3 different cases,
-  // depending upon unicode version.
-  public void testCollationKeySort(Analyzer usAnalyzer,
-                                   Analyzer franceAnalyzer,
-                                   Analyzer swedenAnalyzer,
-                                   Analyzer denmarkAnalyzer,
-                                   String usResult,
-                                   String frResult,
-                                   String svResult,
-                                   String dkResult) throws Exception {
-    Directory indexStore = newDirectory();
-    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
-
-    // document data:
-    // the tracer field is used to determine which document was hit
-    String[][] sortData = new String[][] {
-      // tracer contents US                 France             Sweden (sv_SE)     Denmark (da_DK)
-      {  "A",   "x",     "p\u00EAche",      "p\u00EAche",      "p\u00EAche",      "p\u00EAche"      },
-      {  "B",   "y",     "HAT",             "HAT",             "HAT",             "HAT"             },
-      {  "C",   "x",     "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9" },
-      {  "D",   "y",     "HUT",             "HUT",             "HUT",             "HUT"             },
-      {  "E",   "x",     "peach",           "peach",           "peach",           "peach"           },
-      {  "F",   "y",     "H\u00C5T",        "H\u00C5T",        "H\u00C5T",        "H\u00C5T"        },
-      {  "G",   "x",     "sin",             "sin",             "sin",             "sin"             },
-      {  "H",   "y",     "H\u00D8T",        "H\u00D8T",        "H\u00D8T",        "H\u00D8T"        },
-      {  "I",   "x",     "s\u00EDn",        "s\u00EDn",        "s\u00EDn",        "s\u00EDn"        },
-      {  "J",   "y",     "HOT",             "HOT",             "HOT",             "HOT"             },
-    };
-
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-    
-    for (int i = 0 ; i < sortData.length ; ++i) {
-      Document doc = new Document();
-      doc.add(new Field("tracer", sortData[i][0], customType));
-      doc.add(new TextField("contents", sortData[i][1], Field.Store.NO));
-      if (sortData[i][2] != null) 
-        doc.add(new TextField("US", usAnalyzer.tokenStream("US", sortData[i][2])));
-      if (sortData[i][3] != null) 
-        doc.add(new TextField("France", franceAnalyzer.tokenStream("France", sortData[i][3])));
-      if (sortData[i][4] != null)
-        doc.add(new TextField("Sweden", swedenAnalyzer.tokenStream("Sweden", sortData[i][4])));
-      if (sortData[i][5] != null) 
-        doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", sortData[i][5])));
-      writer.addDocument(doc);
-    }
-    writer.forceMerge(1);
-    writer.shutdown();
-    IndexReader reader = DirectoryReader.open(indexStore);
-    IndexSearcher searcher = new IndexSearcher(reader);
-
-    Sort sort = new Sort();
-    Query queryX = new TermQuery(new Term ("contents", "x"));
-    Query queryY = new TermQuery(new Term ("contents", "y"));
-    
-    sort.setSort(new SortField("US", SortField.Type.STRING));
-    assertMatches(searcher, queryY, sort, usResult);
-
-    sort.setSort(new SortField("France", SortField.Type.STRING));
-    assertMatches(searcher, queryX, sort, frResult);
-
-    sort.setSort(new SortField("Sweden", SortField.Type.STRING));
-    assertMatches(searcher, queryY, sort, svResult);
-
-    sort.setSort(new SortField("Denmark", SortField.Type.STRING));
-    assertMatches(searcher, queryY, sort, dkResult);
-    reader.close();
-    indexStore.close();
-  }
-    
   // Make sure the documents returned by the search match the expected list
   // Copied from TestSort.java
   private void assertMatches(IndexSearcher searcher, Query query, Sort sort, 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index 0681f29..f61ad31 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -23,7 +23,6 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -33,7 +32,6 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -47,7 +45,6 @@
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
@@ -1278,73 +1275,6 @@
     dir.close();
   }
   
-  private void doTestMissingVsFieldCache(final long minValue, final long maxValue) throws Exception {
-    doTestMissingVsFieldCache(new LongProducer() {
-      @Override
-      long next() {
-        return TestUtil.nextLong(random(), minValue, maxValue);
-      }
-    });
-  }
-  
-  private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
-    assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field indexedField = newStringField("indexed", "", Field.Store.NO);
-    Field dvField = new NumericDocValuesField("dv", 0);
-
-    
-    // index some docs
-    int numDocs = atLeast(300);
-    // numDocs should be always > 256 so that in case of a codec that optimizes
-    // for numbers of values <= 256, all storage layouts are tested
-    assert numDocs > 256;
-    for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
-      long value = longs.next();
-      indexedField.setStringValue(Long.toString(value));
-      dvField.setLongValue(value);
-      Document doc = new Document();
-      doc.add(idField);
-      // 1/4 of the time we neglect to add the fields
-      if (random().nextInt(4) > 0) {
-        doc.add(indexedField);
-        doc.add(dvField);
-      }
-      writer.addDocument(doc);
-      if (random().nextInt(31) == 0) {
-        writer.commit();
-      }
-    }
-    
-    // delete some docs
-    int numDeletions = random().nextInt(numDocs/10);
-    for (int i = 0; i < numDeletions; i++) {
-      int id = random().nextInt(numDocs);
-      writer.deleteDocuments(new Term("id", Integer.toString(id)));
-    }
-
-    // merge some segments and ensure that at least one of them has more than
-    // 256 values
-    writer.forceMerge(numDocs / 256);
-
-    writer.shutdown();
-    
-    // compare
-    DirectoryReader ir = DirectoryReader.open(dir);
-    for (AtomicReaderContext context : ir.leaves()) {
-      AtomicReader r = context.reader();
-      Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed");
-      Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv");
-      assertEquals(expected, actual);
-    }
-    ir.close();
-    dir.close();
-  }
-  
   public void testBooleanNumericsVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -1359,13 +1289,6 @@
     }
   }
   
-  public void testByteMissingVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestMissingVsFieldCache(Byte.MIN_VALUE, Byte.MAX_VALUE);
-    }
-  }
-  
   public void testShortNumericsVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -1373,13 +1296,6 @@
     }
   }
   
-  public void testShortMissingVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestMissingVsFieldCache(Short.MIN_VALUE, Short.MAX_VALUE);
-    }
-  }
-  
   public void testIntNumericsVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -1387,13 +1303,6 @@
     }
   }
   
-  public void testIntMissingVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestMissingVsFieldCache(Integer.MIN_VALUE, Integer.MAX_VALUE);
-    }
-  }
-  
   public void testLongNumericsVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -1401,13 +1310,6 @@
     }
   }
   
-  public void testLongMissingVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestMissingVsFieldCache(Long.MIN_VALUE, Long.MAX_VALUE);
-    }
-  }
-  
   private void doTestBinaryVsStoredFields(int minLength, int maxLength) throws Exception {
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
@@ -1535,57 +1437,6 @@
     dir.close();
   }
   
-  private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field indexedField = new StringField("indexed", "", Field.Store.NO);
-    Field dvField = new SortedDocValuesField("dv", new BytesRef());
-    doc.add(idField);
-    doc.add(indexedField);
-    doc.add(dvField);
-    
-    // index some docs
-    int numDocs = atLeast(300);
-    for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
-      final int length;
-      if (minLength == maxLength) {
-        length = minLength; // fixed length
-      } else {
-        length = TestUtil.nextInt(random(), minLength, maxLength);
-      }
-      String value = TestUtil.randomSimpleString(random(), length);
-      indexedField.setStringValue(value);
-      dvField.setBytesValue(new BytesRef(value));
-      writer.addDocument(doc);
-      if (random().nextInt(31) == 0) {
-        writer.commit();
-      }
-    }
-    
-    // delete some docs
-    int numDeletions = random().nextInt(numDocs/10);
-    for (int i = 0; i < numDeletions; i++) {
-      int id = random().nextInt(numDocs);
-      writer.deleteDocuments(new Term("id", Integer.toString(id)));
-    }
-    writer.shutdown();
-    
-    // compare
-    DirectoryReader ir = DirectoryReader.open(dir);
-    for (AtomicReaderContext context : ir.leaves()) {
-      AtomicReader r = context.reader();
-      SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
-      SortedDocValues actual = r.getSortedDocValues("dv");
-      assertEquals(r.maxDoc(), expected, actual);
-    }
-    ir.close();
-    dir.close();
-  }
-  
   public void testSortedFixedLengthVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -1594,21 +1445,6 @@
     }
   }
   
-  public void testSortedFixedLengthVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      int fixedLength = TestUtil.nextInt(random(), 1, 10);
-      doTestSortedVsFieldCache(fixedLength, fixedLength);
-    }
-  }
-  
-  public void testSortedVariableLengthVsFieldCache() throws Exception {
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestSortedVsFieldCache(1, 10);
-    }
-  }
-  
   public void testSortedVariableLengthVsStoredFields() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -2173,206 +2009,6 @@
     }
   }
 
-  private void assertEquals(Bits expected, Bits actual) throws Exception {
-    assertEquals(expected.length(), actual.length());
-    for (int i = 0; i < expected.length(); i++) {
-      assertEquals(expected.get(i), actual.get(i));
-    }
-  }
-  
-  private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
-    assertEquals(maxDoc, new SingletonSortedSetDocValues(expected), new SingletonSortedSetDocValues(actual));
-  }
-  
-  private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
-    // can be null for the segment if no docs actually had any SortedDocValues
-    // in this case FC.getDocTermsOrds returns EMPTY
-    if (actual == null) {
-      assertEquals(DocValues.EMPTY_SORTED_SET, expected);
-      return;
-    }
-    assertEquals(expected.getValueCount(), actual.getValueCount());
-    // compare ord lists
-    for (int i = 0; i < maxDoc; i++) {
-      expected.setDocument(i);
-      actual.setDocument(i);
-      long expectedOrd;
-      while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) {
-        assertEquals(expectedOrd, actual.nextOrd());
-      }
-      assertEquals(NO_MORE_ORDS, actual.nextOrd());
-    }
-    
-    // compare ord dictionary
-    BytesRef expectedBytes = new BytesRef();
-    BytesRef actualBytes = new BytesRef();
-    for (long i = 0; i < expected.getValueCount(); i++) {
-      expected.lookupTerm(expectedBytes);
-      actual.lookupTerm(actualBytes);
-      assertEquals(expectedBytes, actualBytes);
-    }
-    
-    // compare termsenum
-    assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
-  }
-  
-  private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
-    BytesRef ref;
-    
-    // sequential next() through all terms
-    while ((ref = expected.next()) != null) {
-      assertEquals(ref, actual.next());
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    assertNull(actual.next());
-    
-    // sequential seekExact(ord) through all terms
-    for (long i = 0; i < numOrds; i++) {
-      expected.seekExact(i);
-      actual.seekExact(i);
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // sequential seekExact(BytesRef) through all terms
-    for (long i = 0; i < numOrds; i++) {
-      expected.seekExact(i);
-      assertTrue(actual.seekExact(expected.term()));
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // sequential seekCeil(BytesRef) through all terms
-    for (long i = 0; i < numOrds; i++) {
-      expected.seekExact(i);
-      assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // random seekExact(ord)
-    for (long i = 0; i < numOrds; i++) {
-      long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
-      expected.seekExact(randomOrd);
-      actual.seekExact(randomOrd);
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // random seekExact(BytesRef)
-    for (long i = 0; i < numOrds; i++) {
-      long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
-      expected.seekExact(randomOrd);
-      actual.seekExact(expected.term());
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // random seekCeil(BytesRef)
-    for (long i = 0; i < numOrds; i++) {
-      BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
-      SeekStatus expectedStatus = expected.seekCeil(target);
-      assertEquals(expectedStatus, actual.seekCeil(target));
-      if (expectedStatus != SeekStatus.END) {
-        assertEquals(expected.ord(), actual.ord());
-        assertEquals(expected.term(), actual.term());
-      }
-    }
-  }
-  
-  private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    
-    // index some docs
-    int numDocs = atLeast(300);
-    for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
-      doc.add(idField);
-      final int length;
-      if (minLength == maxLength) {
-        length = minLength; // fixed length
-      } else {
-        length = TestUtil.nextInt(random(), minLength, maxLength);
-      }
-      int numValues = random().nextInt(17);
-      // create a random list of strings
-      List<String> values = new ArrayList<>();
-      for (int v = 0; v < numValues; v++) {
-        values.add(TestUtil.randomSimpleString(random(), length));
-      }
-      
-      // add in any order to the indexed field
-      ArrayList<String> unordered = new ArrayList<>(values);
-      Collections.shuffle(unordered, random());
-      for (String v : values) {
-        doc.add(newStringField("indexed", v, Field.Store.NO));
-      }
-
-      // add in any order to the dv field
-      ArrayList<String> unordered2 = new ArrayList<>(values);
-      Collections.shuffle(unordered2, random());
-      for (String v : unordered2) {
-        doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
-      }
-
-      writer.addDocument(doc);
-      if (random().nextInt(31) == 0) {
-        writer.commit();
-      }
-    }
-    
-    // delete some docs
-    int numDeletions = random().nextInt(numDocs/10);
-    for (int i = 0; i < numDeletions; i++) {
-      int id = random().nextInt(numDocs);
-      writer.deleteDocuments(new Term("id", Integer.toString(id)));
-    }
-    
-    // compare per-segment
-    DirectoryReader ir = writer.getReader();
-    for (AtomicReaderContext context : ir.leaves()) {
-      AtomicReader r = context.reader();
-      SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed");
-      SortedSetDocValues actual = r.getSortedSetDocValues("dv");
-      assertEquals(r.maxDoc(), expected, actual);
-    }
-    ir.close();
-    
-    writer.forceMerge(1);
-    
-    // now compare again after the merge
-    ir = writer.getReader();
-    AtomicReader ar = getOnlySegmentReader(ir);
-    SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed");
-    SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
-    assertEquals(ir.maxDoc(), expected, actual);
-    ir.close();
-    
-    writer.shutdown();
-    dir.close();
-  }
-  
-  public void testSortedSetFixedLengthVsUninvertedField() throws Exception {
-    assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      int fixedLength = TestUtil.nextInt(random(), 1, 10);
-      doTestSortedSetVsUninvertedField(fixedLength, fixedLength);
-    }
-  }
-  
-  public void testSortedSetVariableLengthVsUninvertedField() throws Exception {
-    assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
-    int numIterations = atLeast(1);
-    for (int i = 0; i < numIterations; i++) {
-      doTestSortedSetVsUninvertedField(1, 10);
-    }
-  }
-
   public void testGCDCompression() throws Exception {
     int numIterations = atLeast(1);
     for (int i = 0; i < numIterations; i++) {
@@ -2606,172 +2242,6 @@
     ir.close();
     directory.close();
   }
-
-  // LUCENE-4853
-  public void testHugeBinaryValues() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    // FSDirectory because SimpleText will consume gobbs of
-    // space when storing big binary values:
-    Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
-    boolean doFixed = random().nextBoolean();
-    int numDocs;
-    int fixedLength = 0;
-    if (doFixed) {
-      // Sometimes make all values fixed length since some
-      // codecs have different code paths for this:
-      numDocs = TestUtil.nextInt(random(), 10, 20);
-      fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024);
-    } else {
-      numDocs = TestUtil.nextInt(random(), 100, 200);
-    }
-    IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
-    List<byte[]> docBytes = new ArrayList<>();
-    long totalBytes = 0;
-    for(int docID=0;docID<numDocs;docID++) {
-      // we don't use RandomIndexWriter because it might add
-      // more docvalues than we expect !!!!
-
-      // Must be > 64KB in size to ensure more than 2 pages in
-      // PagedBytes would be needed:
-      int numBytes;
-      if (doFixed) {
-        numBytes = fixedLength;
-      } else if (docID == 0 || random().nextInt(5) == 3) {
-        numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024);
-      } else {
-        numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024);
-      }
-      totalBytes += numBytes;
-      if (totalBytes > 5 * 1024*1024) {
-        break;
-      }
-      byte[] bytes = new byte[numBytes];
-      random().nextBytes(bytes);
-      docBytes.add(bytes);
-      Document doc = new Document();      
-      BytesRef b = new BytesRef(bytes);
-      b.length = bytes.length;
-      doc.add(new BinaryDocValuesField("field", b));
-      doc.add(new StringField("id", ""+docID, Field.Store.YES));
-      try {
-        w.addDocument(doc);
-      } catch (IllegalArgumentException iae) {
-        if (iae.getMessage().indexOf("is too large") == -1) {
-          throw iae;
-        } else {
-          // OK: some codecs can't handle binary DV > 32K
-          assertFalse(codecAcceptsHugeBinaryValues("field"));
-          w.rollback();
-          d.close();
-          return;
-        }
-      }
-    }
-    
-    DirectoryReader r;
-    try {
-      r = w.getReader();
-    } catch (IllegalArgumentException iae) {
-      if (iae.getMessage().indexOf("is too large") == -1) {
-        throw iae;
-      } else {
-        assertFalse(codecAcceptsHugeBinaryValues("field"));
-
-        // OK: some codecs can't handle binary DV > 32K
-        w.rollback();
-        d.close();
-        return;
-      }
-    }
-    w.shutdown();
-
-    AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
-
-    BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
-    for(int docID=0;docID<docBytes.size();docID++) {
-      StoredDocument doc = ar.document(docID);
-      BytesRef bytes = new BytesRef();
-      s.get(docID, bytes);
-      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
-      assertEquals(expected.length, bytes.length);
-      assertEquals(new BytesRef(expected), bytes);
-    }
-
-    assertTrue(codecAcceptsHugeBinaryValues("field"));
-
-    ar.close();
-    d.close();
-  }
-
-  // TODO: get this out of here and into the deprecated codecs (4.0, 4.2)
-  public void testHugeBinaryValueLimit() throws Exception {
-    // We only test DVFormats that have a limit
-    assumeFalse("test requires codec with limits on max binary field length", codecAcceptsHugeBinaryValues("field"));
-    Analyzer analyzer = new MockAnalyzer(random());
-    // FSDirectory because SimpleText will consume gobbs of
-    // space when storing big binary values:
-    Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
-    boolean doFixed = random().nextBoolean();
-    int numDocs;
-    int fixedLength = 0;
-    if (doFixed) {
-      // Sometimes make all values fixed length since some
-      // codecs have different code paths for this:
-      numDocs = TestUtil.nextInt(random(), 10, 20);
-      fixedLength = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
-    } else {
-      numDocs = TestUtil.nextInt(random(), 100, 200);
-    }
-    IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
-    List<byte[]> docBytes = new ArrayList<>();
-    long totalBytes = 0;
-    for(int docID=0;docID<numDocs;docID++) {
-      // we don't use RandomIndexWriter because it might add
-      // more docvalues than we expect !!!!
-
-      // Must be > 64KB in size to ensure more than 2 pages in
-      // PagedBytes would be needed:
-      int numBytes;
-      if (doFixed) {
-        numBytes = fixedLength;
-      } else if (docID == 0 || random().nextInt(5) == 3) {
-        numBytes = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
-      } else {
-        numBytes = TestUtil.nextInt(random(), 1, Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH);
-      }
-      totalBytes += numBytes;
-      if (totalBytes > 5 * 1024*1024) {
-        break;
-      }
-      byte[] bytes = new byte[numBytes];
-      random().nextBytes(bytes);
-      docBytes.add(bytes);
-      Document doc = new Document();      
-      BytesRef b = new BytesRef(bytes);
-      b.length = bytes.length;
-      doc.add(new BinaryDocValuesField("field", b));
-      doc.add(new StringField("id", ""+docID, Field.Store.YES));
-      w.addDocument(doc);
-    }
-    
-    DirectoryReader r = w.getReader();
-    w.shutdown();
-
-    AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
-
-    BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
-    for(int docID=0;docID<docBytes.size();docID++) {
-      StoredDocument doc = ar.document(docID);
-      BytesRef bytes = new BytesRef();
-      s.get(docID, bytes);
-      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
-      assertEquals(expected.length, bytes.length);
-      assertEquals(new BytesRef(expected), bytes);
-    }
-
-    ar.close();
-    d.close();
-  }
   
   /** Tests dv against stored fields with threads (binary/numeric/sorted, no missing) */
   public void testThreads() throws Exception {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index 853eea2..7e11a39 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -43,10 +43,10 @@
 import org.apache.lucene.document.FloatField;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
@@ -289,6 +289,7 @@
       FieldType ft = new FieldType(IntField.TYPE_STORED);
       ft.setNumericPrecisionStep(Integer.MAX_VALUE);
       doc.add(new IntField("id", id, ft));
+      doc.add(new NumericDocValuesField("id", id));
       w.addDocument(doc);
     }
     final DirectoryReader r = w.getReader();
@@ -298,12 +299,12 @@
 
     for(AtomicReaderContext ctx : r.leaves()) {
       final AtomicReader sub = ctx.reader();
-      final FieldCache.Ints ids = FieldCache.DEFAULT.getInts(sub, "id", false);
+      final NumericDocValues ids = DocValues.getNumeric(sub, "id");
       for(int docID=0;docID<sub.numDocs();docID++) {
         final StoredDocument doc = sub.document(docID);
         final Field f = (Field) doc.getField("nf");
         assertTrue("got f=" + f, f instanceof StoredField);
-        assertEquals(answers[ids.get(docID)], f.numericValue());
+        assertEquals(answers[(int) ids.get(docID)], f.numericValue());
       }
     }
     r.close();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index 4e7a11a..eb1fa1c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -130,11 +130,6 @@
     }
   }
   
-  public static void purgeFieldCache(IndexReader r) throws IOException {
-    // this is just a hack, to get an atomic reader that contains all subreaders for insanity checks
-    FieldCache.DEFAULT.purgeByCacheKey(SlowCompositeReaderWrapper.wrap(r).getCoreCacheKey());
-  }
-  
   /** This is a MultiReader that can be used for randomly wrapping other readers
    * without creating FieldCache insanity.
    * The trick is to use an opaque/fake cache key. */
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index c87b922..d6b8e9c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -105,8 +105,6 @@
 import org.apache.lucene.index.TieredMergePolicy;
 import org.apache.lucene.search.AssertingIndexSearcher;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache.CacheEntry;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -121,7 +119,6 @@
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NRTCachingDirectory;
 import org.apache.lucene.store.RateLimitedDirectoryWrapper;
-import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
 import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.automaton.RegExp;
@@ -630,7 +627,6 @@
     .around(threadAndTestNameRule)
     .around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
     .around(new TestRuleSetupAndRestoreInstanceEnv())
-    .around(new TestRuleFieldCacheSanity())
     .around(parentChainCallRule);
 
   private static final Map<String,FieldType> fieldToType = new HashMap<String,FieldType>();
@@ -742,48 +738,6 @@
   }
 
   /**
-   * Asserts that FieldCacheSanityChecker does not detect any
-   * problems with FieldCache.DEFAULT.
-   * <p>
-   * If any problems are found, they are logged to System.err
-   * (allong with the msg) when the Assertion is thrown.
-   * </p>
-   * <p>
-   * This method is called by tearDown after every test method,
-   * however IndexReaders scoped inside test methods may be garbage
-   * collected prior to this method being called, causing errors to
-   * be overlooked. Tests are encouraged to keep their IndexReaders
-   * scoped at the class level, or to explicitly call this method
-   * directly in the same scope as the IndexReader.
-   * </p>
-   *
-   * @see org.apache.lucene.util.FieldCacheSanityChecker
-   */
-  protected static void assertSaneFieldCaches(final String msg) {
-    final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
-    Insanity[] insanity = null;
-    try {
-      try {
-        insanity = FieldCacheSanityChecker.checkSanity(entries);
-      } catch (RuntimeException e) {
-        dumpArray(msg + ": FieldCache", entries, System.err);
-        throw e;
-      }
-
-      assertEquals(msg + ": Insane FieldCache usage(s) found",
-                   0, insanity.length);
-      insanity = null;
-    } finally {
-
-      // report this in the event of any exception/failure
-      // if no failure, then insanity will be null anyway
-      if (null != insanity) {
-        dumpArray(msg + ": Insane FieldCache usage(s)", insanity, System.err);
-      }
-    }
-  }
-
-  /**
    * Returns a number of at least <code>i</code>
    * <p>
    * The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleFieldCacheSanity.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleFieldCacheSanity.java
deleted file mode 100644
index 7ad81a5..0000000
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleFieldCacheSanity.java
+++ /dev/null
@@ -1,68 +0,0 @@
-package org.apache.lucene.util;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.util.FieldCacheSanityChecker; // javadocs
-import org.junit.rules.TestRule;
-import org.junit.runner.Description;
-import org.junit.runners.model.Statement;
-
-/**
- * This rule will fail the test if it has insane field caches.
- * <p>
- * calling assertSaneFieldCaches here isn't as useful as having test
- * classes call it directly from the scope where the index readers
- * are used, because they could be gc'ed just before this tearDown
- * method is called.
- * <p>
- * But it's better then nothing.
- * <p>
- * If you are testing functionality that you know for a fact
- * "violates" FieldCache sanity, then you should either explicitly
- * call purgeFieldCache at the end of your test method, or refactor
- * your Test class so that the inconsistent FieldCache usages are
- * isolated in distinct test methods
- * 
- * @see FieldCacheSanityChecker
- */
-public class TestRuleFieldCacheSanity implements TestRule {
-  
-  @Override
-  public Statement apply(final Statement s, final Description d) {
-    return new Statement() {
-      @Override
-      public void evaluate() throws Throwable {
-        s.evaluate();
-
-        Throwable problem = null;
-        try {
-          LuceneTestCase.assertSaneFieldCaches(d.getDisplayName());
-        } catch (Throwable t) {
-          problem = t;
-        }
-
-        FieldCache.DEFAULT.purgeAllCaches();
-
-        if (problem != null) {
-          Rethrow.rethrow(problem);
-        }
-      }
-    };
-  }  
-}
diff --git a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
index a04e979..4291917 100644
--- a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
+++ b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
@@ -34,10 +34,11 @@
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocTermOrdsRangeFilter;
-import org.apache.lucene.search.FieldCacheRangeFilter;
+import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.util.ResourceLoader;
@@ -224,6 +225,15 @@
   public SortField getSortField(SchemaField field, boolean top) {
     return getStringSort(field, top);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY; 
+    } else {
+      return Type.SORTED;
+    }
+  }
 
   @Override
   public Analyzer getIndexAnalyzer() {
@@ -270,7 +280,7 @@
           return new ConstantScoreQuery(DocTermOrdsRangeFilter.newBytesRefRange(
               field.getName(), low, high, minInclusive, maxInclusive));
         } else {
-          return new ConstantScoreQuery(FieldCacheRangeFilter.newBytesRefRange(
+          return new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange(
               field.getName(), low, high, minInclusive, maxInclusive));
         } 
     } else {
diff --git a/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java b/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java
index eac8664..35b5d58 100644
--- a/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java
+++ b/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java
@@ -33,7 +33,6 @@
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.solr.analytics.expression.ExpressionFactory;
 import org.apache.solr.analytics.request.ExpressionRequest;
 import org.apache.solr.analytics.util.AnalyticsParams;
@@ -357,7 +356,7 @@
       if (sourceType!=NUMBER_TYPE&&sourceType!=FIELD_TYPE) {
         return null;
       }
-      return new IntFieldSource(expressionString, FieldCache.NUMERIC_UTILS_INT_PARSER) {
+      return new IntFieldSource(expressionString) {
         public String description() {
           return field;
         }
@@ -366,7 +365,7 @@
       if (sourceType!=NUMBER_TYPE&&sourceType!=FIELD_TYPE) {
         return null;
       }
-      return new LongFieldSource(expressionString, FieldCache.NUMERIC_UTILS_LONG_PARSER) {
+      return new LongFieldSource(expressionString) {
         public String description() {
           return field;
         }
@@ -375,7 +374,7 @@
       if (sourceType!=NUMBER_TYPE&&sourceType!=FIELD_TYPE) {
         return null;
       }
-      return new FloatFieldSource(expressionString, FieldCache.NUMERIC_UTILS_FLOAT_PARSER) {
+      return new FloatFieldSource(expressionString) {
         public String description() {
           return field;
         }
@@ -384,7 +383,7 @@
       if (sourceType!=NUMBER_TYPE&&sourceType!=FIELD_TYPE) {
         return null;
       }
-      return new DoubleFieldSource(expressionString, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER) {
+      return new DoubleFieldSource(expressionString) {
         public String description() {
           return field;
         }
@@ -393,7 +392,7 @@
       if (sourceType!=DATE_TYPE&&sourceType!=FIELD_TYPE) {
         return null;
       }
-      return new DateFieldSource(expressionString, AnalyticsParsers.DEFAULT_DATE_PARSER) {
+      return new DateFieldSource(expressionString) {
         public String description() {
           return field;
         }
diff --git a/solr/core/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java b/solr/core/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
index b27fe70..0e4eceb 100644
--- a/solr/core/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
+++ b/solr/core/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
@@ -18,14 +18,9 @@
 package org.apache.solr.analytics.util;
 
 import java.io.IOException;
-import java.text.ParseException;
 import java.util.Arrays;
 import java.util.Date;
 
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.LongParser;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.solr.schema.FieldType;
@@ -61,31 +56,7 @@
       return AnalyticsParsers.STRING_PARSER;
     }
   }
-  
-  /** Long Parser that takes in String representations of dates and
-   *  converts them into longs
-   */
-  public final static LongParser DEFAULT_DATE_PARSER = new LongParser() {
-    @SuppressWarnings("deprecation")
-    @Override
-    public long parseLong(BytesRef term) {
-      try {
-        return TrieDateField.parseDate(term.utf8ToString()).getTime();
-      } catch (ParseException e) {
-        System.err.println("Cannot parse date "+term.utf8ToString());
-        return 0;
-      }
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_DATE_PARSER"; 
-    }
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-  };
-  
+
   /**
    * For use in classes that grab values by docValue.
    * Converts a BytesRef object into the correct readable text.
diff --git a/solr/core/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java b/solr/core/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
index 2b0dbae..c002e35 100644
--- a/solr/core/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
+++ b/solr/core/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
@@ -18,17 +18,18 @@
 package org.apache.solr.analytics.util.valuesource;
 
 import java.io.IOException;
-import java.text.ParseException;
 import java.util.Date;
 import java.util.Map;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDate;
 import org.apache.solr.schema.TrieDateField;
@@ -39,16 +40,12 @@
  */
 public class DateFieldSource extends LongFieldSource {
 
-  public DateFieldSource(String field) throws ParseException {
-    super(field, null);
-  }
-
-  public DateFieldSource(String field, FieldCache.LongParser parser) {
-    super(field, parser);
+  public DateFieldSource(String field) {
+    super(field);
   }
 
   public long externalToLong(String extVal) {
-    return parser.parseLong(new BytesRef(extVal));
+    return NumericUtils.prefixCodedToLong(new BytesRef(extVal));
   }
 
   public Object longToObject(long val) {
@@ -62,8 +59,8 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Longs arr = cache.getLongs(readerContext.reader(), field, parser, true);
-    final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
+    final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
+    final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
     return new LongDocValues(this) {
       @Override
       public long longVal(int doc) {
@@ -110,16 +107,12 @@
   public boolean equals(Object o) {
     if (o.getClass() != this.getClass()) return false;
     DateFieldSource other = (DateFieldSource) o;
-    if (parser==null) {
-      return field.equals(other.field);
-    } else {
-      return field.equals(other.field) && parser.equals(other.parser);
-    }
+    return field.equals(other.field);
   }
 
   @Override
   public int hashCode() {
-    int h = parser == null ? this.getClass().hashCode() : parser.getClass().hashCode();
+    int h = this.getClass().hashCode();
     h += super.hashCode();
     return h;
   }
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 567fc30..9d51278 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -1461,7 +1461,7 @@
       if (newestSearcher != null && (nrt || indexDirFile.equals(newIndexDirFile))) {
 
         DirectoryReader newReader;
-        DirectoryReader currentReader = newestSearcher.get().getIndexReader();
+        DirectoryReader currentReader = newestSearcher.get().getRawReader();
 
         // SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
         
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 64b5690..a8d9a7d 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -27,7 +28,6 @@
 import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.TopDocsCollector;
@@ -188,7 +188,7 @@
 
     SolrIndexSearcher searcher = req.getSearcher();
     AtomicReader reader = searcher.getAtomicReader();
-    SortedDocValues values = FieldCache.DEFAULT.getTermsIndex(reader, field);
+    SortedDocValues values = DocValues.getSorted(reader, field);
     FixedBitSet groupBits = new FixedBitSet(values.getValueCount());
     DocList docList = rb.getResults().docList;
     IntOpenHashSet collapsedSet = new IntOpenHashSet(docList.size()*2);
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java b/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
index 70d0f38..28020bc 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
@@ -25,10 +25,10 @@
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -100,7 +100,7 @@
   // Currently only used by UnInvertedField stats
   public boolean facetTermNum(int docID, int statsTermNum) throws IOException {
     if (topLevelSortedValues == null) {
-      topLevelSortedValues = FieldCache.DEFAULT.getTermsIndex(topLevelReader, name);
+      topLevelSortedValues = DocValues.getSorted(topLevelReader, name);
     }
     
     int term = topLevelSortedValues.getOrd(docID);
diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
index 3075a06..b36481d 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
@@ -37,6 +37,7 @@
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.request.DocValuesStats;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.UnInvertedField;
 import org.apache.solr.schema.FieldType;
@@ -315,9 +316,8 @@
         NamedList<?> stv;
 
         if (sf.multiValued() || ft.multiValuedFieldCache()) {
-          //use UnInvertedField for multivalued fields
-          UnInvertedField uif = UnInvertedField.getUnInvertedField(statsField, searcher);
-          stv = uif.getStats(searcher, docs, calcDistinct, facets).getStatsValues();
+          // TODO: should this also be used for single-valued string fields? (should work fine)
+          stv = DocValuesStats.getCounts(searcher, sf.getName(), docs, calcDistinct, facets).getStatsValues();
         } else {
           stv = getFieldCacheStats(statsField, calcDistinct, facets);
         }
diff --git a/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java b/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java
index c931c59..6fe4584 100644
--- a/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java
@@ -60,10 +60,13 @@
     SchemaField schemaField = searcher.getSchema().getField(fieldName);
     FieldType ft = schemaField.getType();
     NamedList<Integer> res = new NamedList<>();
+    
+    // TODO: remove multiValuedFieldCache(), check dv type / uninversion type?
+    final boolean multiValued = schemaField.multiValued() || ft.multiValuedFieldCache();
 
     final SortedSetDocValues si; // for term lookups only
     OrdinalMap ordinalMap = null; // for mapping per-segment ords to global ones
-    if (schemaField.multiValued()) {
+    if (multiValued) {
       si = searcher.getAtomicReader().getSortedSetDocValues(fieldName);
       if (si instanceof MultiSortedSetDocValues) {
         ordinalMap = ((MultiSortedSetDocValues)si).mapping;
@@ -126,7 +129,7 @@
           disi = dis.iterator();
         }
         if (disi != null) {
-          if (schemaField.multiValued()) {
+          if (multiValued) {
             SortedSetDocValues sub = leaf.reader().getSortedSetDocValues(fieldName);
             if (sub == null) {
               sub = DocValues.EMPTY_SORTED_SET;
diff --git a/solr/core/src/java/org/apache/solr/request/DocValuesStats.java b/solr/core/src/java/org/apache/solr/request/DocValuesStats.java
new file mode 100644
index 0000000..1e9898f
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/request/DocValuesStats.java
@@ -0,0 +1,198 @@
+package org.apache.solr.request;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.MultiDocValues.MultiSortedDocValues;
+import org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues;
+import org.apache.lucene.index.MultiDocValues.OrdinalMap;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.handler.component.FieldFacetStats;
+import org.apache.solr.handler.component.StatsValues;
+import org.apache.solr.handler.component.StatsValuesFactory;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.DocSet;
+import org.apache.solr.search.SolrIndexSearcher;
+
+/**
+ * Computes term stats for docvalues field (single or multivalued).
+ * <p>
+ * Instead of working on a top-level reader view (binary-search per docid),
+ * it collects per-segment, but maps ordinals to global ordinal space using
+ * MultiDocValues' OrdinalMap.
+ */
+public class DocValuesStats {
+  private DocValuesStats() {}
+  
+  public static StatsValues getCounts(SolrIndexSearcher searcher, String fieldName, DocSet docs, boolean calcDistinct, String[] facet) throws IOException {
+    SchemaField schemaField = searcher.getSchema().getField(fieldName);
+    FieldType ft = schemaField.getType();
+    StatsValues res = StatsValuesFactory.createStatsValues(schemaField, calcDistinct);
+    
+    //Initialize facetstats, if facets have been passed in
+    final FieldFacetStats[] facetStats = new FieldFacetStats[facet.length];
+    int upto = 0;
+    for (String facetField : facet) {
+      SchemaField facetSchemaField = searcher.getSchema().getField(facetField);
+      facetStats[upto++] = new FieldFacetStats(searcher, facetField, schemaField, facetSchemaField, calcDistinct);
+    }
+    
+    // TODO: remove multiValuedFieldCache(), check dv type / uninversion type?
+    final boolean multiValued = schemaField.multiValued() || ft.multiValuedFieldCache();
+
+    SortedSetDocValues si; // for term lookups only
+    OrdinalMap ordinalMap = null; // for mapping per-segment ords to global ones
+    if (multiValued) {
+      si = searcher.getAtomicReader().getSortedSetDocValues(fieldName);
+      if (si instanceof MultiSortedSetDocValues) {
+        ordinalMap = ((MultiSortedSetDocValues)si).mapping;
+      }
+    } else {
+      SortedDocValues single = searcher.getAtomicReader().getSortedDocValues(fieldName);
+      si = single == null ? null : DocValues.singleton(single);
+      if (single instanceof MultiSortedDocValues) {
+        ordinalMap = ((MultiSortedDocValues)single).mapping;
+      }
+    }
+    if (si == null) {
+      si = DocValues.EMPTY_SORTED_SET;
+    }
+    if (si.getValueCount() >= Integer.MAX_VALUE) {
+      throw new UnsupportedOperationException("Currently this stats method is limited to " + Integer.MAX_VALUE + " unique terms");
+    }
+
+    DocSet missing = docs.andNot( searcher.getDocSet(new TermRangeQuery(fieldName, null, null, false, false)));
+
+    final int nTerms = (int) si.getValueCount();   
+    
+    // count collection array only needs to be as big as the number of terms we are
+    // going to collect counts for.
+    final int[] counts = new int[nTerms];
+    
+    Filter filter = docs.getTopFilter();
+    List<AtomicReaderContext> leaves = searcher.getTopReaderContext().leaves();
+    for (int subIndex = 0; subIndex < leaves.size(); subIndex++) {
+      AtomicReaderContext leaf = leaves.get(subIndex);
+      DocIdSet dis = filter.getDocIdSet(leaf, null); // solr docsets already exclude any deleted docs
+      DocIdSetIterator disi = null;
+      if (dis != null) {
+        disi = dis.iterator();
+      }
+      if (disi != null) {
+        int docBase = leaf.docBase;
+        if (multiValued) {
+          SortedSetDocValues sub = leaf.reader().getSortedSetDocValues(fieldName);
+          if (sub == null) {
+            sub = DocValues.EMPTY_SORTED_SET;
+          }
+          final SortedDocValues singleton = DocValues.unwrapSingleton(sub);
+          if (singleton != null) {
+            // some codecs may optimize SORTED_SET storage for single-valued fields
+            accumSingle(counts, docBase, facetStats, singleton, disi, subIndex, ordinalMap);
+          } else {
+            accumMulti(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap);
+          }
+        } else {
+          SortedDocValues sub = leaf.reader().getSortedDocValues(fieldName);
+          if (sub == null) {
+            sub = DocValues.EMPTY_SORTED;
+          }
+          accumSingle(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap);
+        }
+      }
+    }
+    
+    // add results in index order
+    BytesRef value = new BytesRef();
+    for (int ord = 0; ord < counts.length; ord++) {
+      int count = counts[ord];
+      if (count > 0) {
+        si.lookupOrd(ord, value);
+        res.accumulate(value, count);
+        for (FieldFacetStats f : facetStats) {
+          f.accumulateTermNum(ord, value);
+        }
+      }
+    }
+
+    res.addMissing(missing.size());
+    if (facetStats.length > 0) {
+      for (FieldFacetStats f : facetStats) {
+        Map<String, StatsValues> facetStatsValues = f.facetStatsValues;
+        FieldType facetType = searcher.getSchema().getFieldType(f.name);
+        for (Map.Entry<String,StatsValues> entry : facetStatsValues.entrySet()) {
+          String termLabel = entry.getKey();
+          int missingCount = searcher.numDocs(new TermQuery(new Term(f.name, facetType.toInternal(termLabel))), missing);
+          entry.getValue().addMissing(missingCount);
+        }
+        res.addFacet(f.name, facetStatsValues);
+      }
+    }
+    return res;
+  }
+
+  /** accumulates per-segment single-valued stats */
+  static void accumSingle(int counts[], int docBase, FieldFacetStats[] facetStats, SortedDocValues si, DocIdSetIterator disi, int subIndex, OrdinalMap map) throws IOException {
+    int doc;
+    while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      int term = si.getOrd(doc);
+      if (term >= 0) {
+        if (map != null) {
+          term = (int) map.getGlobalOrd(subIndex, term);
+        }
+        counts[term]++;
+        for (FieldFacetStats f : facetStats) {
+          f.facetTermNum(docBase + doc, term);
+        }
+      }
+    }
+  }
+  
+  /** accumulates per-segment multi-valued stats */
+  static void accumMulti(int counts[], int docBase, FieldFacetStats[] facetStats, SortedSetDocValues si, DocIdSetIterator disi, int subIndex, OrdinalMap map) throws IOException {
+    int doc;
+    while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      si.setDocument(doc);
+      long ord;
+      while ((ord = si.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
+        int term = (int) ord;
+        if (map != null) {
+          term = (int) map.getGlobalOrd(subIndex, term);
+        }
+        counts[term]++;
+        for (FieldFacetStats f : facetStats) {
+          f.facetTermNum(docBase + doc, term);
+        }
+      }
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/request/NumericFacets.java b/solr/core/src/java/org/apache/solr/request/NumericFacets.java
index d88fecf..2f2dd7d 100644
--- a/solr/core/src/java/org/apache/solr/request/NumericFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/NumericFacets.java
@@ -30,16 +30,16 @@
 
 import org.apache.lucene.document.FieldType.NumericType;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
-import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.PriorityQueue;
 import org.apache.lucene.util.StringHelper;
 import org.apache.solr.common.params.FacetParams;
@@ -144,7 +144,7 @@
     final HashTable hashTable = new HashTable();
     final Iterator<AtomicReaderContext> ctxIt = leaves.iterator();
     AtomicReaderContext ctx = null;
-    FieldCache.Longs longs = null;
+    NumericDocValues longs = null;
     Bits docsWithField = null;
     int missingCount = 0;
     for (DocIterator docsIt = docs.iterator(); docsIt.hasNext(); ) {
@@ -156,39 +156,39 @@
         assert doc >= ctx.docBase;
         switch (numericType) {
           case LONG:
-            longs = FieldCache.DEFAULT.getLongs(ctx.reader(), fieldName, true);
+            longs = DocValues.getNumeric(ctx.reader(), fieldName);
             break;
           case INT:
-            final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(ctx.reader(), fieldName, true);
-            longs = new FieldCache.Longs() {
-              @Override
-              public long get(int docID) {
-                return ints.get(docID);
-              }
-            };
+            longs = DocValues.getNumeric(ctx.reader(), fieldName);
             break;
           case FLOAT:
-            final FieldCache.Floats floats = FieldCache.DEFAULT.getFloats(ctx.reader(), fieldName, true);
-            longs = new FieldCache.Longs() {
+            final NumericDocValues floats = DocValues.getNumeric(ctx.reader(), fieldName);
+            // TODO: this bit flipping should probably be moved to tie-break in the PQ comparator
+            longs = new NumericDocValues() {
               @Override
               public long get(int docID) {
-                return NumericUtils.floatToSortableInt(floats.get(docID));
+                long bits = floats.get(docID);
+                if (bits<0) bits ^= 0x7fffffffffffffffL;
+                return bits;
               }
             };
             break;
           case DOUBLE:
-            final FieldCache.Doubles doubles = FieldCache.DEFAULT.getDoubles(ctx.reader(), fieldName, true);
-            longs = new FieldCache.Longs() {
+            final NumericDocValues doubles = DocValues.getNumeric(ctx.reader(), fieldName);
+            // TODO: this bit flipping should probably be moved to tie-break in the PQ comparator
+            longs = new NumericDocValues() {
               @Override
               public long get(int docID) {
-                return NumericUtils.doubleToSortableLong(doubles.get(docID));
+                long bits = doubles.get(docID);
+                if (bits<0) bits ^= 0x7fffffffffffffffL;
+                return bits;
               }
             };
             break;
           default:
             throw new AssertionError();
         }
-        docsWithField = FieldCache.DEFAULT.getDocsWithField(ctx.reader(), fieldName);
+        docsWithField = DocValues.getDocsWithField(ctx.reader(), fieldName);
       }
       long v = longs.get(doc - ctx.docBase);
       if (v != 0 || docsWithField.get(doc - ctx.docBase)) {
diff --git a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
index 6ad399c..5caf34b 100644
--- a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
+++ b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
@@ -22,11 +22,11 @@
 import java.util.concurrent.*;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
@@ -236,7 +236,7 @@
     BytesRef tempBR = new BytesRef();
 
     void countTerms() throws IOException {
-      si = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldName);
+      si = DocValues.getSorted(context.reader(), fieldName);
       // SolrCore.log.info("reader= " + reader + "  FC=" + System.identityHashCode(si));
 
       if (prefix!=null) {
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index 38aeb0c..c9e686f 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -38,6 +38,8 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.MultiDocsEnum;
@@ -46,8 +48,9 @@
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
@@ -83,6 +86,7 @@
 import org.apache.solr.search.DocSet;
 import org.apache.solr.search.Grouping;
 import org.apache.solr.search.HashDocSet;
+import org.apache.solr.search.Insanity;
 import org.apache.solr.search.QParser;
 import org.apache.solr.search.QueryParsing;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -378,18 +382,13 @@
 
     final boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
     
-    if (method == null && ft.getNumericType() != null && !sf.multiValued()) {
+    if (ft.getNumericType() != null && !sf.multiValued()) {
       // the per-segment approach is optimal for numeric field types since there
       // are no global ords to merge and no need to create an expensive
       // top-level reader
       method = FacetMethod.FCS;
     }
 
-    if (ft.getNumericType() != null && sf.hasDocValues()) {
-      // only fcs is able to leverage the numeric field caches
-      method = FacetMethod.FCS;
-    }
-
     if (method == null) {
       // TODO: default to per-segment or not?
       method = FacetMethod.FC;
@@ -430,14 +429,7 @@
           }
           break;
         case FC:
-          if (sf.hasDocValues()) {
-            counts = DocValuesFacets.getCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
-          } else if (multiToken || TrieField.getMainValuePrefix(ft) != null) {
-            UnInvertedField uif = UnInvertedField.getUnInvertedField(field, searcher);
-            counts = uif.getCounts(searcher, base, offset, limit, mincount,missing,sort,prefix);
-          } else {
-            counts = getFieldCacheCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
-          }
+          counts = DocValuesFacets.getCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
           break;
         default:
           throw new AssertionError();
@@ -458,7 +450,7 @@
                                              String sort,
                                              String prefix) throws IOException {
     GroupingSpecification groupingSpecification = rb.getGroupingSpec();
-    String groupField  = groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
+    final String groupField  = groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
     if (groupField == null) {
       throw new SolrException (
           SolrException.ErrorCode.BAD_REQUEST,
@@ -467,8 +459,24 @@
     }
 
     BytesRef prefixBR = prefix != null ? new BytesRef(prefix) : null;
-    TermGroupFacetCollector collector = TermGroupFacetCollector.createTermGroupFacetCollector(groupField, field, multiToken, prefixBR, 128);
-    searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), collector);
+    final TermGroupFacetCollector collector = TermGroupFacetCollector.createTermGroupFacetCollector(groupField, field, multiToken, prefixBR, 128);
+    
+    SchemaField sf = searcher.getSchema().getFieldOrNull(groupField);
+    
+    if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
+      // its a single-valued numeric field: we must currently create insanity :(
+      // there isnt a GroupedFacetCollector that works on numerics right now...
+      searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), new FilterCollector(collector) {
+        @Override
+        public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
+          AtomicReader insane = Insanity.wrapInsanity(context.reader(), groupField);
+          return in.getLeafCollector(insane.getContext());
+        }
+      });
+    } else {
+      searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), collector);
+    }
+    
     boolean orderByCount = sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY);
     TermGroupFacetCollector.GroupedFacetResult result 
       = collector.mergeSegmentResults(limit < 0 ? Integer.MAX_VALUE : 
@@ -622,152 +630,6 @@
     return docs.andNotSize(hasVal);
   }
 
-
-  /**
-   * Use the Lucene FieldCache to get counts for each unique field value in <code>docs</code>.
-   * The field must have at most one indexed token per document.
-   */
-  public static NamedList<Integer> getFieldCacheCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort, String prefix) throws IOException {
-    // TODO: If the number of terms is high compared to docs.size(), and zeros==false,
-    //  we should use an alternate strategy to avoid
-    //  1) creating another huge int[] for the counts
-    //  2) looping over that huge int[] looking for the rare non-zeros.
-    //
-    // Yet another variation: if docs.size() is small and termvectors are stored,
-    // then use them instead of the FieldCache.
-    //
-
-    // TODO: this function is too big and could use some refactoring, but
-    // we also need a facet cache, and refactoring of SimpleFacets instead of
-    // trying to pass all the various params around.
-
-    FieldType ft = searcher.getSchema().getFieldType(fieldName);
-    NamedList<Integer> res = new NamedList<>();
-
-    SortedDocValues si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), fieldName);
-
-    final BytesRef br = new BytesRef();
-
-    final BytesRef prefixRef;
-    if (prefix == null) {
-      prefixRef = null;
-    } else if (prefix.length()==0) {
-      prefix = null;
-      prefixRef = null;
-    } else {
-      prefixRef = new BytesRef(prefix);
-    }
-
-    int startTermIndex, endTermIndex;
-    if (prefix!=null) {
-      startTermIndex = si.lookupTerm(prefixRef);
-      if (startTermIndex<0) startTermIndex=-startTermIndex-1;
-      prefixRef.append(UnicodeUtil.BIG_TERM);
-      endTermIndex = si.lookupTerm(prefixRef);
-      assert endTermIndex < 0;
-      endTermIndex = -endTermIndex-1;
-    } else {
-      startTermIndex=-1;
-      endTermIndex=si.getValueCount();
-    }
-
-    final int nTerms=endTermIndex-startTermIndex;
-    int missingCount = -1; 
-    final CharsRef charsRef = new CharsRef(10);
-    if (nTerms>0 && docs.size() >= mincount) {
-
-      // count collection array only needs to be as big as the number of terms we are
-      // going to collect counts for.
-      final int[] counts = new int[nTerms];
-
-      DocIterator iter = docs.iterator();
-
-      while (iter.hasNext()) {
-        int term = si.getOrd(iter.nextDoc());
-        int arrIdx = term-startTermIndex;
-        if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
-      }
-
-      if (startTermIndex == -1) {
-        missingCount = counts[0];
-      }
-
-      // IDEA: we could also maintain a count of "other"... everything that fell outside
-      // of the top 'N'
-
-      int off=offset;
-      int lim=limit>=0 ? limit : Integer.MAX_VALUE;
-
-      if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
-        int maxsize = limit>0 ? offset+limit : Integer.MAX_VALUE-1;
-        maxsize = Math.min(maxsize, nTerms);
-        LongPriorityQueue queue = new LongPriorityQueue(Math.min(maxsize,1000), maxsize, Long.MIN_VALUE);
-
-        int min=mincount-1;  // the smallest value in the top 'N' values
-        for (int i=(startTermIndex==-1)?1:0; i<nTerms; i++) {
-          int c = counts[i];
-          if (c>min) {
-            // NOTE: we use c>min rather than c>=min as an optimization because we are going in
-            // index order, so we already know that the keys are ordered.  This can be very
-            // important if a lot of the counts are repeated (like zero counts would be).
-
-            // smaller term numbers sort higher, so subtract the term number instead
-            long pair = (((long)c)<<32) + (Integer.MAX_VALUE - i);
-            boolean displaced = queue.insert(pair);
-            if (displaced) min=(int)(queue.top() >>> 32);
-          }
-        }
-
-        // if we are deep paging, we don't have to order the highest "offset" counts.
-        int collectCount = Math.max(0, queue.size() - off);
-        assert collectCount <= lim;
-
-        // the start and end indexes of our list "sorted" (starting with the highest value)
-        int sortedIdxStart = queue.size() - (collectCount - 1);
-        int sortedIdxEnd = queue.size() + 1;
-        final long[] sorted = queue.sort(collectCount);
-
-        for (int i=sortedIdxStart; i<sortedIdxEnd; i++) {
-          long pair = sorted[i];
-          int c = (int)(pair >>> 32);
-          int tnum = Integer.MAX_VALUE - (int)pair;
-          si.lookupOrd(startTermIndex+tnum, br);
-          ft.indexedToReadable(br, charsRef);
-          res.add(charsRef.toString(), c);
-        }
-      
-      } else {
-        // add results in index order
-        int i=(startTermIndex==-1)?1:0;
-        if (mincount<=0) {
-          // if mincount<=0, then we won't discard any terms and we know exactly
-          // where to start.
-          i+=off;
-          off=0;
-        }
-
-        for (; i<nTerms; i++) {          
-          int c = counts[i];
-          if (c<mincount || --off>=0) continue;
-          if (--lim<0) break;
-          si.lookupOrd(startTermIndex+i, br);
-          ft.indexedToReadable(br, charsRef);
-          res.add(charsRef.toString(), c);
-        }
-      }
-    }
-
-    if (missing) {
-      if (missingCount < 0) {
-        missingCount = getFieldMissingCount(searcher,docs,fieldName);
-      }
-      res.add(null, missingCount);
-    }
-    
-    return res;
-  }
-
-
   /**
    * Returns a list of terms in the specified field along with the 
    * corresponding count of documents in the set that match that constraint.
diff --git a/solr/core/src/java/org/apache/solr/request/UnInvertedField.java b/solr/core/src/java/org/apache/solr/request/UnInvertedField.java
index 4e591ba..9ab7d84 100644
--- a/solr/core/src/java/org/apache/solr/request/UnInvertedField.java
+++ b/solr/core/src/java/org/apache/solr/request/UnInvertedField.java
@@ -23,12 +23,12 @@
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.lucene.index.AtomicReader;
-import org.apache.lucene.index.DocTermOrds;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.uninverting.DocTermOrds;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.FixedBitSet;
diff --git a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java
index 3b1ddd1..df9cf7d 100644
--- a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java
@@ -40,6 +40,7 @@
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialArgsParser;
 import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.response.TextResponseWriter;
@@ -129,6 +130,11 @@
   }
 
   @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
+
+  @Override
   public List<StorableField> createFields(SchemaField field, Object val, float boost) {
     String shapeStr = null;
     Shape shape = null;
diff --git a/solr/core/src/java/org/apache/solr/schema/BinaryField.java b/solr/core/src/java/org/apache/solr/schema/BinaryField.java
index ad22555..20c03ac 100644
--- a/solr/core/src/java/org/apache/solr/schema/BinaryField.java
+++ b/solr/core/src/java/org/apache/solr/schema/BinaryField.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.common.util.Base64;
 import org.apache.solr.response.TextResponseWriter;
@@ -44,6 +45,15 @@
     throw new RuntimeException("Cannot sort on a Binary field");
   }
 
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    // TODO: maybe just return null?
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY;
+    } else {
+      return Type.BINARY;
+    }
+  }
 
   @Override
   public String toExternal(StorableField f) {
diff --git a/solr/core/src/java/org/apache/solr/schema/BoolField.java b/solr/core/src/java/org/apache/solr/schema/BoolField.java
index c6376cb..bc45d0a 100644
--- a/solr/core/src/java/org/apache/solr/schema/BoolField.java
+++ b/solr/core/src/java/org/apache/solr/schema/BoolField.java
@@ -25,15 +25,15 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.GeneralField;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.BoolDocValues;
-import org.apache.lucene.queries.function.valuesource.OrdFieldSource;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.mutable.MutableValue;
@@ -41,6 +41,7 @@
 import org.apache.solr.analysis.SolrAnalyzer;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
+import org.apache.solr.search.function.OrdFieldSource;
 /**
  *
  */
@@ -52,6 +53,15 @@
   }
 
   @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY;
+    } else {
+      return Type.SORTED;
+    }
+  }
+
+  @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
     return new BoolFieldSource(field.name);
@@ -179,7 +189,7 @@
 
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), field);
+    final SortedDocValues sindex = DocValues.getSorted(readerContext.reader(), field);
 
     // figure out what ord maps to true
     int nord = sindex.getValueCount();
diff --git a/solr/core/src/java/org/apache/solr/schema/CollationField.java b/solr/core/src/java/org/apache/solr/schema/CollationField.java
index 37797b9..ec98bc4 100644
--- a/solr/core/src/java/org/apache/solr/schema/CollationField.java
+++ b/solr/core/src/java/org/apache/solr/schema/CollationField.java
@@ -38,10 +38,11 @@
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocTermOrdsRangeFilter;
-import org.apache.lucene.search.FieldCacheRangeFilter;
+import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.util.ResourceLoader;
@@ -199,6 +200,15 @@
   public SortField getSortField(SchemaField field, boolean top) {
     return getStringSort(field, top);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY;
+    } else {
+      return Type.SORTED;
+    }
+  }
 
   @Override
   public Analyzer getIndexAnalyzer() {
@@ -245,7 +255,7 @@
           return new ConstantScoreQuery(DocTermOrdsRangeFilter.newBytesRefRange(
               field.getName(), low, high, minInclusive, maxInclusive));
         } else {
-          return new ConstantScoreQuery(FieldCacheRangeFilter.newBytesRefRange(
+          return new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange(
               field.getName(), low, high, minInclusive, maxInclusive));
         } 
     } else {
diff --git a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
index 7379e0f..ca0c785 100644
--- a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
+++ b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FieldValueFilter;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.queries.ChainedFilter;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -343,6 +344,11 @@
     // Convert all values to default currency for sorting.
     return (new RawCurrencyValueSource(field, defaultCurrency, null)).getSortField(reverse);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
 
   @Override
   public void write(TextResponseWriter writer, String name, StorableField field) throws IOException {
diff --git a/solr/core/src/java/org/apache/solr/schema/EnumField.java b/solr/core/src/java/org/apache/solr/schema/EnumField.java
index 3a9e121..d46d1fa 100644
--- a/solr/core/src/java/org/apache/solr/schema/EnumField.java
+++ b/solr/core/src/java/org/apache/solr/schema/EnumField.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.EnumFieldSource;
 import org.apache.lucene.search.*;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.NumericUtils;
@@ -178,10 +179,19 @@
   public SortField getSortField(SchemaField field, boolean top) {
     field.checkSortability();
     final Object missingValue = Integer.MIN_VALUE;
-    SortField sf = new SortField(field.getName(), FieldCache.NUMERIC_UTILS_INT_PARSER, top);
+    SortField sf = new SortField(field.getName(), SortField.Type.INT, top);
     sf.setMissingValue(missingValue);
     return sf;
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_INTEGER;
+    } else {
+      return Type.INTEGER;
+    }
+  }
 
   /**
    * {@inheritDoc}
@@ -189,7 +199,7 @@
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
-    return new EnumFieldSource(field.getName(), FieldCache.NUMERIC_UTILS_INT_PARSER, enumIntToStringMap, enumStringToIntMap);
+    return new EnumFieldSource(field.getName(), enumIntToStringMap, enumStringToIntMap);
   }
 
   /**
@@ -230,7 +240,7 @@
     Query query = null;
     final boolean matchOnly = field.hasDocValues() && !field.indexed();
     if (matchOnly) {
-      query = new ConstantScoreQuery(FieldCacheRangeFilter.newIntRange(field.getName(),
+      query = new ConstantScoreQuery(DocValuesRangeFilter.newIntRange(field.getName(),
               min == null ? null : minValue,
               max == null ? null : maxValue,
               minInclusive, maxInclusive));
diff --git a/solr/core/src/java/org/apache/solr/schema/ExternalFileField.java b/solr/core/src/java/org/apache/solr/schema/ExternalFileField.java
index e445c60..98e76af 100644
--- a/solr/core/src/java/org/apache/solr/schema/ExternalFileField.java
+++ b/solr/core/src/java/org/apache/solr/schema/ExternalFileField.java
@@ -19,6 +19,7 @@
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
@@ -90,6 +91,11 @@
     FileFloatSource source = getFileFloatSource(field);
     return source.getSortField(reverse);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser parser) {
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index 1f62f8d..1927e38 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -32,8 +32,8 @@
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocTermOrdsRangeFilter;
 import org.apache.lucene.search.DocTermOrdsRewriteMethod;
-import org.apache.lucene.search.FieldCacheRangeFilter;
-import org.apache.lucene.search.FieldCacheRewriteMethod;
+import org.apache.lucene.search.DocValuesRangeFilter;
+import org.apache.lucene.search.DocValuesRewriteMethod;
 import org.apache.lucene.search.MultiTermQuery;
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
@@ -41,6 +41,7 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.uninverting.UninvertingReader;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.UnicodeUtil;
@@ -446,6 +447,16 @@
     query.setRewriteMethod(sf.getType().getRewriteMethod(parser, sf));
     return query;
   }
+  
+  /**
+   * DocValues is not enabled for a field, but its indexed, docvalues can be constructed 
+   * on the fly (uninverted, aka fieldcache) on the first request to sort, facet, etc. 
+   * This specifies the structure to use.
+   * 
+   * @param sf field instance
+   * @return type to uninvert, or {@code null} (to disallow uninversion for the field)
+   */
+  public abstract UninvertingReader.Type getUninversionType(SchemaField sf);
 
   /**
    * Default analyzer for types that only produce 1 verbatim token...
@@ -687,7 +698,7 @@
             part2 == null ? null : new BytesRef(toInternal(part2)),
             minInclusive, maxInclusive));
       } else {
-        return new ConstantScoreQuery(FieldCacheRangeFilter.newStringRange(
+        return new ConstantScoreQuery(DocValuesRangeFilter.newStringRange(
             field.getName(), 
             part1 == null ? null : toInternal(part1),
             part2 == null ? null : toInternal(part2),
@@ -731,7 +742,7 @@
    */
   public MultiTermQuery.RewriteMethod getRewriteMethod(QParser parser, SchemaField field) {
     if (!field.indexed() && field.hasDocValues()) {
-      return field.multiValued() ? new DocTermOrdsRewriteMethod() : new FieldCacheRewriteMethod();
+      return field.multiValued() ? new DocTermOrdsRewriteMethod() : new DocValuesRewriteMethod();
     } else {
       return MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
     }
diff --git a/solr/core/src/java/org/apache/solr/schema/GeoHashField.java b/solr/core/src/java/org/apache/solr/schema/GeoHashField.java
index e8d7b95..bbe7a72 100644
--- a/solr/core/src/java/org/apache/solr/schema/GeoHashField.java
+++ b/solr/core/src/java/org/apache/solr/schema/GeoHashField.java
@@ -22,6 +22,8 @@
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+
 import com.spatial4j.core.context.SpatialContext;
 import com.spatial4j.core.io.GeohashUtils;
 import com.spatial4j.core.shape.Point;
@@ -47,6 +49,15 @@
   public SortField getSortField(SchemaField field, boolean top) {
     return getStringSort(field, top);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY;
+    } else {
+      return Type.SORTED;
+    }
+  }
 
     //QUESTION: Should we do a fast and crude one?  Or actually check distances
   //Fast and crude could use EdgeNGrams, but that would require a different
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index d7f3124..387486c 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -19,10 +19,15 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.uninverting.UninvertingReader;
 import org.apache.lucene.util.Version;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -357,6 +362,22 @@
     indexAnalyzer = new SolrIndexAnalyzer();
     queryAnalyzer = new SolrQueryAnalyzer();
   }
+  
+  public Map<String,UninvertingReader.Type> getUninversionMap(IndexReader reader) {
+    Map<String,UninvertingReader.Type> map = new HashMap<>();
+    for (FieldInfo f : MultiFields.getMergedFieldInfos(reader)) {
+      if (f.hasDocValues() == false && f.isIndexed()) {
+        SchemaField sf = getFieldOrNull(f.name);
+        if (sf != null) {
+          UninvertingReader.Type type = sf.getType().getUninversionType(sf);
+          if (type != null) {
+            map.put(f.name, type);
+          }
+        }
+      }
+    }
+    return map;
+  }
 
   /**
    * Writes the schema in schema.xml format to the given writer 
diff --git a/solr/core/src/java/org/apache/solr/schema/LatLonType.java b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
index 2763c84..de5bc61 100644
--- a/solr/core/src/java/org/apache/solr/schema/LatLonType.java
+++ b/solr/core/src/java/org/apache/solr/schema/LatLonType.java
@@ -41,6 +41,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.Bits;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
@@ -241,6 +242,11 @@
   public SortField getSortField(SchemaField field, boolean top) {
     throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Sorting not supported on LatLonType " + field.getName());
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
 
 
 
diff --git a/solr/core/src/java/org/apache/solr/schema/PointType.java b/solr/core/src/java/org/apache/solr/schema/PointType.java
index 3f6ffa5..6cba8b4 100644
--- a/solr/core/src/java/org/apache/solr/schema/PointType.java
+++ b/solr/core/src/java/org/apache/solr/schema/PointType.java
@@ -25,6 +25,7 @@
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.SolrParams;
@@ -119,6 +120,11 @@
   public SortField getSortField(SchemaField field, boolean top) {
     throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Sorting not supported on PointType " + field.getName());
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
 
   @Override
   /**
diff --git a/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java b/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
index 11ffc92..1079402 100644
--- a/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
+++ b/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
@@ -30,12 +30,18 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SortedSetSortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.AttributeFactory;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.AttributeSource.State;
 import org.apache.solr.analysis.SolrAnalyzer;
 import org.apache.solr.response.TextResponseWriter;
+import org.apache.solr.search.QParser;
+import org.apache.solr.search.Sorting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -112,10 +118,21 @@
     }
     return f;
   }
-
+  
   @Override
   public SortField getSortField(SchemaField field, boolean top) {
-    return getStringSort(field, top);
+    field.checkSortability();
+    return Sorting.getTextSortField(field.getName(), top, field.sortMissingLast(), field.sortMissingFirst());
+  }
+  
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser parser) {
+    return new SortedSetFieldSource(field.getName());
+  }
+
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return Type.SORTED_SET_BINARY;
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
index c0d0ec0..aa516fd 100644
--- a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
+++ b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
@@ -30,6 +30,7 @@
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
 import org.apache.lucene.search.*;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
 
@@ -92,6 +93,11 @@
   public SortField getSortField(SchemaField field, boolean reverse) {
     return new SortField(field.getName(), randomComparatorSource, reverse);
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return null;
+  }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
diff --git a/solr/core/src/java/org/apache/solr/schema/StrField.java b/solr/core/src/java/org/apache/solr/schema/StrField.java
index 9fc4320..7e6a913 100644
--- a/solr/core/src/java/org/apache/solr/schema/StrField.java
+++ b/solr/core/src/java/org/apache/solr/schema/StrField.java
@@ -28,6 +28,7 @@
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
@@ -63,6 +64,15 @@
   }
 
   @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      return Type.SORTED_SET_BINARY;
+    } else {
+      return Type.SORTED;
+    }
+  }
+
+  @Override
   public void write(TextResponseWriter writer, String name, StorableField f) throws IOException {
     writer.writeStr(name, f.stringValue(), true);
   }
diff --git a/solr/core/src/java/org/apache/solr/schema/TextField.java b/solr/core/src/java/org/apache/solr/schema/TextField.java
index 669dea6..cd7b708 100644
--- a/solr/core/src/java/org/apache/solr/schema/TextField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TextField.java
@@ -18,15 +18,19 @@
 package org.apache.solr.schema;
 
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.*;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.QueryBuilder;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
+import org.apache.solr.search.Sorting;
 
 import java.util.Map;
 import java.io.IOException;
@@ -93,7 +97,18 @@
   @Override
   public SortField getSortField(SchemaField field, boolean reverse) {
     /* :TODO: maybe warn if isTokenized(), but doesn't use LimitTokenCountFilter in it's chain? */
-    return getStringSort(field, reverse);
+    field.checkSortability();
+    return Sorting.getTextSortField(field.getName(), reverse, field.sortMissingLast(), field.sortMissingFirst());
+  }
+  
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser parser) {
+    return new SortedSetFieldSource(field.getName());
+  }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    return Type.SORTED_SET_BINARY;
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
index 967522b..aaa1c52 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
@@ -30,6 +30,7 @@
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 
@@ -365,6 +366,11 @@
   }
 
   @Override
+  public Type getUninversionType(SchemaField sf) {
+    return wrappedField.getUninversionType(sf);
+  }
+
+  @Override
   public Object marshalSortValue(Object value) {
     return value;
   }
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index ea4a558..37c2e70 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -39,11 +39,11 @@
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCacheRangeFilter;
+import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.NumericUtils;
@@ -153,7 +153,7 @@
         else if( sortMissingFirst ) {
           missingValue = top ? Integer.MAX_VALUE : Integer.MIN_VALUE;
         }
-        sf = new SortField( field.getName(), FieldCache.NUMERIC_UTILS_INT_PARSER, top);
+        sf = new SortField( field.getName(), SortField.Type.INT, top);
         sf.setMissingValue(missingValue);
         return sf;
       
@@ -164,7 +164,7 @@
         else if( sortMissingFirst ) {
           missingValue = top ? Float.POSITIVE_INFINITY : Float.NEGATIVE_INFINITY;
         }
-        sf = new SortField( field.getName(), FieldCache.NUMERIC_UTILS_FLOAT_PARSER, top);
+        sf = new SortField( field.getName(), SortField.Type.FLOAT, top);
         sf.setMissingValue(missingValue);
         return sf;
       
@@ -176,7 +176,7 @@
         else if( sortMissingFirst ) {
           missingValue = top ? Long.MAX_VALUE : Long.MIN_VALUE;
         }
-        sf = new SortField( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER, top);
+        sf = new SortField( field.getName(), SortField.Type.LONG, top);
         sf.setMissingValue(missingValue);
         return sf;
         
@@ -187,7 +187,7 @@
         else if( sortMissingFirst ) {
           missingValue = top ? Double.POSITIVE_INFINITY : Double.NEGATIVE_INFINITY;
         }
-        sf = new SortField( field.getName(), FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, top);
+        sf = new SortField( field.getName(), SortField.Type.DOUBLE, top);
         sf.setMissingValue(missingValue);
         return sf;
         
@@ -195,21 +195,54 @@
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + field.name);
     }
   }
+  
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      switch (type) {
+        case INTEGER:
+          return Type.SORTED_SET_INTEGER;
+        case LONG:
+        case DATE:
+          return Type.SORTED_SET_LONG;
+        case FLOAT:
+          return Type.SORTED_SET_FLOAT;
+        case DOUBLE:
+          return Type.SORTED_SET_DOUBLE;
+        default:
+          throw new AssertionError();
+      }
+    } else {
+      switch (type) {
+        case INTEGER:
+          return Type.INTEGER;
+        case LONG:
+        case DATE:
+          return Type.LONG;
+        case FLOAT:
+          return Type.FLOAT;
+        case DOUBLE:
+          return Type.DOUBLE;
+        default:
+          throw new AssertionError();
+      }
+    }
+  }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
     switch (type) {
       case INTEGER:
-        return new IntFieldSource( field.getName(), FieldCache.NUMERIC_UTILS_INT_PARSER );
+        return new IntFieldSource( field.getName());
       case FLOAT:
-        return new FloatFieldSource( field.getName(), FieldCache.NUMERIC_UTILS_FLOAT_PARSER );
+        return new FloatFieldSource( field.getName());
       case DATE:
-        return new TrieDateFieldSource( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER );        
+        return new TrieDateFieldSource( field.getName());        
       case LONG:
-        return new LongFieldSource( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER );
+        return new LongFieldSource( field.getName());
       case DOUBLE:
-        return new DoubleFieldSource( field.getName(), FieldCache.NUMERIC_UTILS_DOUBLE_PARSER );
+        return new DoubleFieldSource( field.getName());
       default:
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + field.name);
     }
@@ -274,7 +307,7 @@
     switch (type) {
       case INTEGER:
         if (matchOnly) {
-          query = new ConstantScoreQuery(FieldCacheRangeFilter.newIntRange(field.getName(),
+          query = new ConstantScoreQuery(DocValuesRangeFilter.newIntRange(field.getName(),
                 min == null ? null : Integer.parseInt(min),
                 max == null ? null : Integer.parseInt(max),
                 minInclusive, maxInclusive));
@@ -287,7 +320,7 @@
         break;
       case FLOAT:
         if (matchOnly) {
-          query = new ConstantScoreQuery(FieldCacheRangeFilter.newFloatRange(field.getName(),
+          query = new ConstantScoreQuery(DocValuesRangeFilter.newFloatRange(field.getName(),
                 min == null ? null : Float.parseFloat(min),
                 max == null ? null : Float.parseFloat(max),
                 minInclusive, maxInclusive));
@@ -300,7 +333,7 @@
         break;
       case LONG:
         if (matchOnly) {
-          query = new ConstantScoreQuery(FieldCacheRangeFilter.newLongRange(field.getName(),
+          query = new ConstantScoreQuery(DocValuesRangeFilter.newLongRange(field.getName(),
                 min == null ? null : Long.parseLong(min),
                 max == null ? null : Long.parseLong(max),
                 minInclusive, maxInclusive));
@@ -313,7 +346,7 @@
         break;
       case DOUBLE:
         if (matchOnly) {
-          query = new ConstantScoreQuery(FieldCacheRangeFilter.newDoubleRange(field.getName(),
+          query = new ConstantScoreQuery(DocValuesRangeFilter.newDoubleRange(field.getName(),
                 min == null ? null : Double.parseDouble(min),
                 max == null ? null : Double.parseDouble(max),
                 minInclusive, maxInclusive));
@@ -326,7 +359,7 @@
         break;
       case DATE:
         if (matchOnly) {
-          query = new ConstantScoreQuery(FieldCacheRangeFilter.newLongRange(field.getName(),
+          query = new ConstantScoreQuery(DocValuesRangeFilter.newLongRange(field.getName(),
                 min == null ? null : dateField.parseMath(null, min).getTime(),
                 max == null ? null : dateField.parseMath(null, max).getTime(),
                 minInclusive, maxInclusive));
@@ -706,8 +739,8 @@
 
 class TrieDateFieldSource extends LongFieldSource {
 
-  public TrieDateFieldSource(String field, FieldCache.LongParser parser) {
-    super(field, parser);
+  public TrieDateFieldSource(String field) {
+    super(field);
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 367a6ca..e2ddab4 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -27,7 +27,9 @@
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -37,7 +39,6 @@
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -288,11 +289,7 @@
 
         SortedDocValues docValues = null;
         FunctionQuery funcQuery = null;
-        if(schemaField.hasDocValues()) {
-          docValues = searcher.getAtomicReader().getSortedDocValues(this.field);
-        } else {
-          docValues = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), this.field);
-        }
+        docValues = DocValues.getSorted(searcher.getAtomicReader(), this.field);
 
         FieldType fieldType = null;
 
@@ -794,7 +791,7 @@
 
   private class IntValueCollapse extends FieldValueCollapse {
 
-    private FieldCache.Ints vals;
+    private NumericDocValues vals;
     private IntCompare comp;
     private int nullVal;
     private int[] ordVals;
@@ -829,11 +826,11 @@
     }
 
     public void setNextReader(AtomicReaderContext context) throws IOException {
-      this.vals = FieldCache.DEFAULT.getInts(context.reader(), this.field, false);
+      this.vals = DocValues.getNumeric(context.reader(), this.field);
     }
 
     public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
-      int val = vals.get(contextDoc);
+      int val = (int) vals.get(contextDoc);
       if(ord > -1) {
         if(comp.test(val, ordVals[ord])) {
           ords[ord] = globalDoc;
@@ -863,7 +860,7 @@
 
   private class LongValueCollapse extends FieldValueCollapse {
 
-    private FieldCache.Longs vals;
+    private NumericDocValues vals;
     private LongCompare comp;
     private long nullVal;
     private long[] ordVals;
@@ -897,7 +894,7 @@
     }
 
     public void setNextReader(AtomicReaderContext context) throws IOException {
-      this.vals = FieldCache.DEFAULT.getLongs(context.reader(), this.field, false);
+      this.vals = DocValues.getNumeric(context.reader(), this.field);
     }
 
     public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
@@ -931,7 +928,7 @@
 
   private class FloatValueCollapse extends FieldValueCollapse {
 
-    private FieldCache.Floats vals;
+    private NumericDocValues vals;
     private FloatCompare comp;
     private float nullVal;
     private float[] ordVals;
@@ -966,11 +963,11 @@
     }
 
     public void setNextReader(AtomicReaderContext context) throws IOException {
-      this.vals = FieldCache.DEFAULT.getFloats(context.reader(), this.field, false);
+      this.vals = DocValues.getNumeric(context.reader(), this.field);
     }
 
     public void collapse(int ord, int contextDoc, int globalDoc) throws IOException {
-      float val = vals.get(contextDoc);
+      float val = Float.intBitsToFloat((int)vals.get(contextDoc));
       if(ord > -1) {
         if(comp.test(val, ordVals[ord])) {
           ords[ord] = globalDoc;
diff --git a/solr/core/src/java/org/apache/solr/search/Insanity.java b/solr/core/src/java/org/apache/solr/search/Insanity.java
new file mode 100644
index 0000000..2026b15
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/Insanity.java
@@ -0,0 +1,129 @@
+package org.apache.solr.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.FilterAtomicReader;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.uninverting.UninvertingReader;
+
+/** 
+ * Lucene 5.0 removes "accidental" insanity, so you must explicitly
+ * create it.
+ * <p>
+ * This class creates insanity for two specific situations:
+ * <ul>
+ *   <li>calling {@code ord} or {@code rord} functions on a single-valued numeric field.
+ *   <li>doing grouped faceting ({@code group.facet}) on a single-valued numeric field.
+ * </ul>
+ */
+@Deprecated
+public class Insanity {
+  
+  /** 
+   * Returns a view over {@code sane} where {@code insaneField} is a string
+   * instead of a numeric.
+   */
+  public static AtomicReader wrapInsanity(AtomicReader sane, String insaneField) {
+    return new UninvertingReader(new InsaneReader(sane, insaneField),
+                                 Collections.singletonMap(insaneField, UninvertingReader.Type.SORTED));
+  }
+  
+  /** Hides the proper numeric dv type for the field */
+  private static class InsaneReader extends FilterAtomicReader {
+    final String insaneField;
+    final FieldInfos fieldInfos;
+    
+    InsaneReader(AtomicReader in, String insaneField) {
+      super(in);
+      this.insaneField = insaneField;
+      ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
+      for (FieldInfo fi : in.getFieldInfos()) {
+        if (fi.name.equals(insaneField)) {
+          filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
+                                          fi.hasPayloads(), fi.getIndexOptions(), null, fi.getNormType(), null));
+        } else {
+          filteredInfos.add(fi);
+        }
+      }
+      fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
+    }
+
+    @Override
+    public NumericDocValues getNumericDocValues(String field) throws IOException {
+      if (insaneField.equals(field)) {
+        return null;
+      } else {
+        return in.getNumericDocValues(field);
+      }
+    }
+
+    @Override
+    public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+      if (insaneField.equals(field)) {
+        return null;
+      } else {
+        return in.getBinaryDocValues(field);
+      }
+    }
+
+    @Override
+    public SortedDocValues getSortedDocValues(String field) throws IOException {
+      if (insaneField.equals(field)) {
+        return null;
+      } else {
+        return in.getSortedDocValues(field);
+      }
+    }
+
+    @Override
+    public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+      if (insaneField.equals(field)) {
+        return null;
+      } else {
+        return in.getSortedSetDocValues(field);
+      }
+    }
+
+    @Override
+    public FieldInfos getFieldInfos() {
+      return fieldInfos;
+    }
+
+    // important to override these, so fieldcaches are shared on what we wrap
+    
+    @Override
+    public Object getCoreCacheKey() {
+      return in.getCoreCacheKey();
+    }
+
+    @Override
+    public Object getCombinedCoreAndDeletesKey() {
+      return in.getCombinedCoreAndDeletesKey();
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
index 93d3686..76551b0 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java
@@ -19,33 +19,26 @@
 
 import java.net.URL;
 
+import org.apache.lucene.uninverting.UninvertingReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrInfoMBean;
 
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.FieldCache.CacheEntry;
-import org.apache.lucene.util.FieldCacheSanityChecker;
-import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
-
 /**
- * A SolrInfoMBean that provides introspection of the Lucene FieldCache, this is <b>NOT</b> a cache that is managed by Solr.
+ * A SolrInfoMBean that provides introspection of the Solr FieldCache
  *
  */
 public class SolrFieldCacheMBean implements SolrInfoMBean {
 
-  protected FieldCacheSanityChecker checker = new FieldCacheSanityChecker();
-
   @Override
   public String getName() { return this.getClass().getName(); }
   @Override
   public String getVersion() { return SolrCore.version; }
   @Override
   public String getDescription() {
-    return "Provides introspection of the Lucene FieldCache, "
-      +    "this is **NOT** a cache that is managed by Solr.";
+    return "Provides introspection of the Solr FieldCache ";
   }
   @Override
   public Category getCategory() { return Category.CACHE; } 
@@ -60,27 +53,10 @@
   @Override
   public NamedList getStatistics() {
     NamedList stats = new SimpleOrderedMap();
-    CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
+    String[] entries = UninvertingReader.getUninvertedStats();
     stats.add("entries_count", entries.length);
     for (int i = 0; i < entries.length; i++) {
-      CacheEntry e = entries[i];
-      stats.add("entry#" + i, e.toString());
-    }
-
-    Insanity[] insanity = checker.check(entries);
-
-    stats.add("insanity_count", insanity.length);
-    for (int i = 0; i < insanity.length; i++) {
-
-      /** RAM estimation is both CPU and memory intensive... we don't want to do it unless asked.
-      // we only estimate the size of insane entries
-      for (CacheEntry e : insanity[i].getCacheEntries()) {
-        // don't re-estimate if we've already done it.
-        if (null == e.getEstimatedSize()) e.estimateSize();
-      }
-      **/
-      
-      stats.add("insanity#" + i, insanity[i].toString());
+      stats.add("entry#" + i, entries[i]);
     }
     return stats;
   }
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index 1361bc7..50e82bf 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -89,6 +89,7 @@
 import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
@@ -169,9 +170,12 @@
   private DirectoryFactory directoryFactory;
   
   private final AtomicReader atomicReader;
+  // only for addIndexes etc (no fieldcache)
+  private final DirectoryReader rawReader;
+  
   private String path;
   private final boolean reserveDirectory;
-  private final boolean createdDirectory; 
+  private boolean createdDirectory; 
   
   private static DirectoryReader getReader(SolrCore core, SolrIndexConfig config, DirectoryFactory directoryFactory, String path) throws IOException {
     DirectoryReader reader = null;
@@ -184,18 +188,27 @@
     }
     return reader;
   }
+  
+  // TODO: wrap elsewhere and return a "map" from the schema that overrides get() ?
+  // this reader supports reopen
+  private static DirectoryReader wrapReader(SolrCore core, DirectoryReader reader) {
+    assert reader != null;
+    return UninvertingReader.wrap(reader, core.getLatestSchema().getUninversionMap(reader));
+  }
 
   public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, SolrIndexConfig config, String name, boolean enableCache, DirectoryFactory directoryFactory) throws IOException {
     // we don't need to reserve the directory because we get it from the factory
-    this(core, path, schema, config, name, null, true, enableCache, false, directoryFactory);
+    this(core, path, schema, config, name, getReader(core, config, directoryFactory, path), true, enableCache, false, directoryFactory);
+    this.createdDirectory = true;
   }
 
   public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, SolrIndexConfig config, String name, DirectoryReader r, boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory) throws IOException {
-    super(r == null ? getReader(core, config, directoryFactory, path) : r);
+    super(wrapReader(core, r));
 
     this.path = path;
     this.directoryFactory = directoryFactory;
     this.reader = (DirectoryReader) super.readerContext.reader();
+    this.rawReader = r;
     this.atomicReader = SlowCompositeReaderWrapper.wrap(this.reader);
     this.core = core;
     this.schema = schema;
@@ -211,7 +224,6 @@
     Directory dir = getIndexReader().directory();
     
     this.reserveDirectory = reserveDirectory;
-    this.createdDirectory = r == null;
     if (reserveDirectory) {
       // keep the directory from being released while we use it
       directoryFactory.incRef(dir);
@@ -303,6 +315,11 @@
     return atomicReader;
   }
   
+  /** Raw reader (no fieldcaches etc). Useful for operations like addIndexes */
+  public final DirectoryReader getRawReader() {
+    return rawReader;
+  }
+  
   @Override
   public final DirectoryReader getIndexReader() {
     assert reader == super.getIndexReader();
@@ -351,7 +368,7 @@
     
     long cpg = reader.getIndexCommit().getGeneration();
     try {
-      if (closeReader) reader.decRef();
+      if (closeReader) rawReader.decRef();
     } catch (Exception e) {
       SolrException.log(log, "Problem dec ref'ing reader", e);
     }
diff --git a/solr/core/src/java/org/apache/solr/search/Sorting.java b/solr/core/src/java/org/apache/solr/search/Sorting.java
index bacf1c7..94cd0e0 100644
--- a/solr/core/src/java/org/apache/solr/search/Sorting.java
+++ b/solr/core/src/java/org/apache/solr/search/Sorting.java
@@ -40,12 +40,23 @@
    * @return SortField
    */
   public static SortField getStringSortField(String fieldName, boolean reverse, boolean nullLast, boolean nullFirst) {
+    SortField sortField = new SortField(fieldName, SortField.Type.STRING, reverse);
+    applyMissingFirstLast(sortField, reverse, nullLast, nullFirst);
+    return sortField;
+  }
+
+  /** Like {@link #getStringSortField}) except safe for tokenized fields */
+  public static SortField getTextSortField(String fieldName, boolean reverse, boolean nullLast, boolean nullFirst) {
+    SortField sortField = new SortedSetSortField(fieldName, reverse);
+    applyMissingFirstLast(sortField, reverse, nullLast, nullFirst);
+    return sortField;
+  }
+  
+  private static void applyMissingFirstLast(SortField in, boolean reverse, boolean nullLast, boolean nullFirst) {
     if (nullFirst && nullLast) {
       throw new IllegalArgumentException("Cannot specify missing values as both first and last");
     }
-
-    SortField sortField = new SortField(fieldName, SortField.Type.STRING, reverse);
-
+    
     // 4 cases:
     // missingFirst / forward: default lucene behavior
     // missingFirst / reverse: set sortMissingLast
@@ -53,12 +64,11 @@
     // missingLast  / reverse: default lucene behavior
     
     if (nullFirst && reverse) {
-      sortField.setMissingValue(SortField.STRING_LAST);
+      in.setMissingValue(SortField.STRING_LAST);
     } else if (nullLast && !reverse) {
-      sortField.setMissingValue(SortField.STRING_LAST);
+      in.setMissingValue(SortField.STRING_LAST);
     }
-
-    return sortField;
   }
+    
 }
 
diff --git a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
index 8637974..2053132 100644
--- a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
@@ -42,6 +42,8 @@
 import org.apache.solr.schema.*;
 
 import org.apache.solr.search.function.CollapseScoreFunction;
+import org.apache.solr.search.function.OrdFieldSource;
+import org.apache.solr.search.function.ReverseOrdFieldSource;
 import org.apache.solr.search.function.distance.*;
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
 
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java b/solr/core/src/java/org/apache/solr/search/function/OrdFieldSource.java
similarity index 69%
rename from lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java
rename to solr/core/src/java/org/apache/solr/search/function/OrdFieldSource.java
index ab937a1..0a1aa15 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/OrdFieldSource.java
+++ b/solr/core/src/java/org/apache/solr/search/function/OrdFieldSource.java
@@ -15,27 +15,32 @@
  * limitations under the License.
  */
 
-package org.apache.lucene.queries.function.valuesource;
+package org.apache.solr.search.function;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.index.CompositeReader;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.Insanity;
+import org.apache.solr.search.SolrIndexSearcher;
 
 /**
- * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getStringIndex().
+ * Obtains the ordinal of the field value from {@link AtomicReader#getSortedDocValues}.
  * <br>
  * The native lucene index order is used to assign an ordinal value for each field value.
  * <br>Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1.
@@ -65,13 +70,33 @@
   }
 
 
-  // TODO: this is trappy? perhaps this query instead should make you pass a slow reader yourself?
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     final int off = readerContext.docBase;
-    final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
-    final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
-    final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
+    final AtomicReader r;
+    Object o = context.get("searcher");
+    if (o instanceof SolrIndexSearcher) {
+      SolrIndexSearcher is = (SolrIndexSearcher) o;
+      SchemaField sf = is.getSchema().getFieldOrNull(field);
+      if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
+        // its a single-valued numeric field: we must currently create insanity :(
+        List<AtomicReaderContext> leaves = is.getIndexReader().leaves();
+        AtomicReader insaneLeaves[] = new AtomicReader[leaves.size()];
+        int upto = 0;
+        for (AtomicReaderContext raw : leaves) {
+          insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field);
+        }
+        r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves));
+      } else {
+        // reuse ordinalmap
+        r = ((SolrIndexSearcher)o).getAtomicReader();
+      }
+    } else {
+      IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
+      r = SlowCompositeReaderWrapper.wrap(topReader);
+    }
+    // if its e.g. tokenized/multivalued, emulate old behavior of single-valued fc
+    final SortedDocValues sindex = SortedSetSelector.wrap(DocValues.getSortedSet(r, field), SortedSetSelector.Type.MIN);
     return new IntDocValues(this) {
       protected String toTerm(String readableValue) {
         return readableValue;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java b/solr/core/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java
similarity index 65%
rename from lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java
rename to solr/core/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java
index 2d3bc8f..6567735 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ReverseOrdFieldSource.java
+++ b/solr/core/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java
@@ -15,25 +15,31 @@
  * limitations under the License.
  */
 
-package org.apache.lucene.queries.function.valuesource;
+package org.apache.solr.search.function;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.CompositeReader;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.Insanity;
+import org.apache.solr.search.SolrIndexSearcher;
 
 /**
- * Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getTermsIndex()
+ * Obtains the ordinal of the field value from {@link AtomicReader#getSortedDocValues}
  * and reverses the order.
  * <br>
  * The native lucene index order is used to assign an ordinal value for each field value.
@@ -65,14 +71,33 @@
     return "rord("+field+')';
   }
 
-  // TODO: this is trappy? perhaps this query instead should make you pass a slow reader yourself?
   @Override
   public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
-    final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
     final int off = readerContext.docBase;
-
-    final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
+    final AtomicReader r;
+    Object o = context.get("searcher");
+    if (o instanceof SolrIndexSearcher) {
+      SolrIndexSearcher is = (SolrIndexSearcher) o;
+      SchemaField sf = is.getSchema().getFieldOrNull(field);
+      if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
+        // its a single-valued numeric field: we must currently create insanity :(
+        List<AtomicReaderContext> leaves = is.getIndexReader().leaves();
+        AtomicReader insaneLeaves[] = new AtomicReader[leaves.size()];
+        int upto = 0;
+        for (AtomicReaderContext raw : leaves) {
+          insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field);
+        }
+        r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves));
+      } else {
+        // reuse ordinalmap
+        r = ((SolrIndexSearcher)o).getAtomicReader();
+      }
+    } else {
+      IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
+      r = SlowCompositeReaderWrapper.wrap(topReader);
+    }
+    // if its e.g. tokenized/multivalued, emulate old behavior of single-valued fc
+    final SortedDocValues sindex = SortedSetSelector.wrap(DocValues.getSortedSet(r, field), SortedSetSelector.Type.MIN);
     final int end = sindex.getValueCount();
 
     return new IntDocValues(this) {
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
index e842513..8e94d43 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
@@ -19,8 +19,10 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 
+import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.MultiCollector;
@@ -28,8 +30,12 @@
 import org.apache.lucene.search.TimeLimitingCollector;
 import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.BitDocSet;
 import org.apache.solr.search.DocSet;
 import org.apache.solr.search.DocSetCollector;
@@ -157,16 +163,25 @@
 
   private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
     Command firstCommand = commands.get(0);
-    AbstractAllGroupHeadsCollector termAllGroupHeadsCollector =
-        TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getSortWithinGroup());
-    if (collectors.isEmpty()) {
-      searchWithTimeLimiter(query, filter, termAllGroupHeadsCollector);
+    String field = firstCommand.getKey();
+    SchemaField sf = searcher.getSchema().getField(field);
+    FieldType fieldType = sf.getType();
+    
+    final AbstractAllGroupHeadsCollector allGroupHeadsCollector;
+    if (fieldType.getNumericType() != null) {
+      ValueSource vs = fieldType.getValueSource(sf, null);
+      allGroupHeadsCollector = new FunctionAllGroupHeadsCollector(vs, new HashMap<Object,Object>(), firstCommand.getSortWithinGroup());
     } else {
-      collectors.add(termAllGroupHeadsCollector);
+      allGroupHeadsCollector = TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getSortWithinGroup());
+    }
+    if (collectors.isEmpty()) {
+      searchWithTimeLimiter(query, filter, allGroupHeadsCollector);
+    } else {
+      collectors.add(allGroupHeadsCollector);
       searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
     }
 
-    return new BitDocSet(termAllGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
+    return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
   }
 
   private DocSet computeDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
new file mode 100644
index 0000000..3629487
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
@@ -0,0 +1,160 @@
+package org.apache.solr.search.grouping.distributed.command;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.lucene.search.grouping.GroupDocs;
+import org.apache.lucene.search.grouping.SearchGroup;
+import org.apache.lucene.search.grouping.TopGroups;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.mutable.MutableValue;
+import org.apache.lucene.util.mutable.MutableValueDate;
+import org.apache.lucene.util.mutable.MutableValueDouble;
+import org.apache.lucene.util.mutable.MutableValueFloat;
+import org.apache.lucene.util.mutable.MutableValueInt;
+import org.apache.lucene.util.mutable.MutableValueLong;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.schema.TrieField;
+
+/** 
+ * this is a transition class: for numeric types we use function-based distributed grouping,
+ * otherwise term-based. so for now we internally use function-based but pretend like we did 
+ * it all with bytes, to not change any wire serialization etc.
+ */
+class GroupConverter {
+  
+  static Collection<SearchGroup<BytesRef>> fromMutable(SchemaField field, Collection<SearchGroup<MutableValue>> values) {
+    if (values == null) {
+      return null;
+    }
+    FieldType fieldType = field.getType();
+    List<SearchGroup<BytesRef>> result = new ArrayList<>(values.size());
+    for (SearchGroup<MutableValue> original : values) {
+      SearchGroup<BytesRef> converted = new SearchGroup<BytesRef>();
+      converted.sortValues = original.sortValues;
+      if (original.groupValue.exists) {
+        BytesRef binary = new BytesRef();
+        fieldType.readableToIndexed(original.groupValue.toString(), binary);
+        converted.groupValue = binary;
+      } else {
+        converted.groupValue = null;
+      }
+      result.add(converted);
+    }
+    return result;
+  }
+  
+  static Collection<SearchGroup<MutableValue>> toMutable(SchemaField field, Collection<SearchGroup<BytesRef>> values) {
+    FieldType fieldType = field.getType();
+    List<SearchGroup<MutableValue>> result = new ArrayList<>(values.size());
+    for (SearchGroup<BytesRef> original : values) {
+      SearchGroup<MutableValue> converted = new SearchGroup<MutableValue>();
+      converted.sortValues = original.sortValues; // ?
+      TrieField.TrieTypes type = ((TrieField)fieldType).getType();
+      final MutableValue v;
+      switch (type) {
+        case INTEGER:
+          MutableValueInt mutableInt = new MutableValueInt();
+          if (original.groupValue == null) {
+            mutableInt.value = 0;
+            mutableInt.exists = false;
+          } else {
+            mutableInt.value = (Integer) fieldType.toObject(field, original.groupValue);
+          }
+          v = mutableInt;
+          break;
+        case FLOAT:
+          MutableValueFloat mutableFloat = new MutableValueFloat();
+          if (original.groupValue == null) {
+            mutableFloat.value = 0;
+            mutableFloat.exists = false;
+          } else {
+            mutableFloat.value = (Float) fieldType.toObject(field, original.groupValue);
+          }
+          v = mutableFloat;
+          break;
+        case DOUBLE:
+          MutableValueDouble mutableDouble = new MutableValueDouble();
+          if (original.groupValue == null) {
+            mutableDouble.value = 0;
+            mutableDouble.exists = false;
+          } else {
+            mutableDouble.value = (Double) fieldType.toObject(field, original.groupValue);
+          }
+          v = mutableDouble;
+          break;
+        case LONG:
+          MutableValueLong mutableLong = new MutableValueLong();
+          if (original.groupValue == null) {
+            mutableLong.value = 0;
+            mutableLong.exists = false;
+          } else {
+            mutableLong.value = (Long) fieldType.toObject(field, original.groupValue);
+          }
+          v = mutableLong;
+          break;
+        case DATE:
+          MutableValueDate mutableDate = new MutableValueDate();
+          if (original.groupValue == null) {
+            mutableDate.value = 0;
+            mutableDate.exists = false;
+          } else {
+            mutableDate.value = ((Date)fieldType.toObject(field, original.groupValue)).getTime();
+          }
+          v = mutableDate;
+          break;
+        default:
+          throw new AssertionError();
+      }
+      converted.groupValue = v;
+      result.add(converted);
+    }
+    return result;
+  }
+  
+  static TopGroups<BytesRef> fromMutable(SchemaField field, TopGroups<MutableValue> values) {
+    if (values == null) {
+      return null;
+    }
+    
+    FieldType fieldType = field.getType();
+    
+    @SuppressWarnings("unchecked")
+    GroupDocs<BytesRef> groupDocs[] = new GroupDocs[values.groups.length];
+    
+    for (int i = 0; i < values.groups.length; i++) {
+      GroupDocs<MutableValue> original = values.groups[i];
+      final BytesRef groupValue;
+      if (original.groupValue.exists) {
+        BytesRef binary = new BytesRef();
+        fieldType.readableToIndexed(original.groupValue.toString(), binary);
+        groupValue = binary;
+      } else {
+        groupValue = null;
+      }
+      groupDocs[i] = new GroupDocs<BytesRef>(original.score, original.maxScore, original.totalHits, original.scoreDocs, groupValue, original.groupSortValues);
+    }
+    
+    return new TopGroups<BytesRef>(values.groupSort, values.withinGroupSort, values.totalHitCount, values.totalGroupedHitCount, groupDocs, values.maxScore);
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
index b079847..c153e74 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
@@ -17,12 +17,18 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
+import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
+import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
+import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.grouping.Command;
 
@@ -76,8 +82,8 @@
   private final int topNGroups;
   private final boolean includeGroupCount;
 
-  private TermFirstPassGroupingCollector firstPassGroupingCollector;
-  private TermAllGroupsCollector allGroupsCollector;
+  private AbstractFirstPassGroupingCollector firstPassGroupingCollector;
+  private AbstractAllGroupsCollector allGroupsCollector;
 
   private SearchGroupsFieldCommand(SchemaField field, Sort groupSort, int topNGroups, boolean includeGroupCount) {
     this.field = field;
@@ -89,12 +95,23 @@
   @Override
   public List<Collector> create() throws IOException {
     List<Collector> collectors = new ArrayList<>();
+    FieldType fieldType = field.getType();
     if (topNGroups > 0) {
-      firstPassGroupingCollector = new TermFirstPassGroupingCollector(field.getName(), groupSort, topNGroups);
+      if (fieldType.getNumericType() != null) {
+        ValueSource vs = fieldType.getValueSource(field, null);
+        firstPassGroupingCollector = new FunctionFirstPassGroupingCollector(vs, new HashMap<Object,Object>(), groupSort, topNGroups);
+      } else {
+        firstPassGroupingCollector = new TermFirstPassGroupingCollector(field.getName(), groupSort, topNGroups);
+      }
       collectors.add(firstPassGroupingCollector);
     }
     if (includeGroupCount) {
-      allGroupsCollector = new TermAllGroupsCollector(field.getName());
+      if (fieldType.getNumericType() != null) {
+        ValueSource vs = fieldType.getValueSource(field, null);
+        allGroupsCollector = new FunctionAllGroupsCollector(vs, new HashMap<Object,Object>());
+      } else {
+        allGroupsCollector = new TermAllGroupsCollector(field.getName());
+      }
       collectors.add(allGroupsCollector);
     }
     return collectors;
@@ -104,7 +121,11 @@
   public Pair<Integer, Collection<SearchGroup<BytesRef>>> result() {
     final Collection<SearchGroup<BytesRef>> topGroups;
     if (topNGroups > 0) {
-      topGroups = firstPassGroupingCollector.getTopGroups(0, true);
+      if (field.getType().getNumericType() != null) {
+        topGroups = GroupConverter.fromMutable(field, firstPassGroupingCollector.getTopGroups(0, true));
+      } else {
+        topGroups = firstPassGroupingCollector.getTopGroups(0, true);
+      }
     } else {
       topGroups = Collections.emptyList();
     }
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
index 35b43be..eb8b485 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
@@ -17,13 +17,18 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.search.grouping.TopGroups;
+import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.mutable.MutableValue;
+import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.grouping.Command;
 
@@ -31,6 +36,7 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 
 /**
@@ -101,7 +107,7 @@
   private final int maxDocPerGroup;
   private final boolean needScores;
   private final boolean needMaxScore;
-  private TermSecondPassGroupingCollector secondPassCollector;
+  private AbstractSecondPassGroupingCollector secondPassCollector;
 
   private TopGroupsFieldCommand(SchemaField field,
                                 Sort groupSort,
@@ -126,9 +132,18 @@
     }
 
     List<Collector> collectors = new ArrayList<>();
-    secondPassCollector = new TermSecondPassGroupingCollector(
+    FieldType fieldType = field.getType();
+    if (fieldType.getNumericType() != null) {
+      ValueSource vs = fieldType.getValueSource(field, null);
+      Collection<SearchGroup<MutableValue>> v = GroupConverter.toMutable(field, firstPhaseGroups);
+      secondPassCollector = new FunctionSecondPassGroupingCollector(
+          v, groupSort, sortWithinGroup, maxDocPerGroup, needScores, needMaxScore, true, vs, new HashMap<Object,Object>()
+      );
+    } else {
+      secondPassCollector = new TermSecondPassGroupingCollector(
           field.getName(), firstPhaseGroups, groupSort, sortWithinGroup, maxDocPerGroup, needScores, needMaxScore, true
-    );
+      );
+    }
     collectors.add(secondPassCollector);
     return collectors;
   }
@@ -140,7 +155,12 @@
       return new TopGroups<>(groupSort.getSort(), sortWithinGroup.getSort(), 0, 0, new GroupDocs[0], Float.NaN);
     }
 
-    return secondPassCollector.getTopGroups(0);
+    FieldType fieldType = field.getType();
+    if (fieldType.getNumericType() != null) {
+      return GroupConverter.fromMutable(field, secondPassCollector.getTopGroups(0));
+    } else {
+      return secondPassCollector.getTopGroups(0);
+    }
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/update/DeleteByQueryWrapper.java b/solr/core/src/java/org/apache/solr/update/DeleteByQueryWrapper.java
new file mode 100644
index 0000000..1c1fccc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/DeleteByQueryWrapper.java
@@ -0,0 +1,119 @@
+package org.apache.solr.update;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.uninverting.UninvertingReader;
+import org.apache.lucene.util.Bits;
+import org.apache.solr.schema.IndexSchema;
+
+/** 
+ * Allows access to uninverted docvalues by delete-by-queries.
+ * this is used e.g. to implement versioning constraints in solr.
+ * <p>
+ * Even though we wrap for each query, UninvertingReader's core 
+ * cache key is the inner one, so it still reuses fieldcaches and so on.
+ */
+final class DeleteByQueryWrapper extends Query {
+  final Query in;
+  final IndexSchema schema;
+  
+  DeleteByQueryWrapper(Query in, IndexSchema schema) {
+    this.in = in;
+    this.schema = schema;
+  }
+  
+  AtomicReader wrap(AtomicReader reader) {
+    return new UninvertingReader(reader, schema.getUninversionMap(reader));
+  }
+  
+  // we try to be well-behaved, but we are not (and IW's applyQueryDeletes isn't much better...)
+  
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query rewritten = in.rewrite(reader);
+    if (rewritten != in) {
+      return new DeleteByQueryWrapper(rewritten, schema);
+    } else {
+      return this;
+    }
+  }
+  
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    final AtomicReader wrapped = wrap((AtomicReader) searcher.getIndexReader());
+    final IndexSearcher privateContext = new IndexSearcher(wrapped);
+    final Weight inner = in.createWeight(privateContext);
+    return new Weight() {
+      @Override
+      public Explanation explain(AtomicReaderContext context, int doc) throws IOException { throw new UnsupportedOperationException(); }
+
+      @Override
+      public Query getQuery() { return DeleteByQueryWrapper.this; }
+
+      @Override
+      public float getValueForNormalization() throws IOException { return inner.getValueForNormalization(); }
+
+      @Override
+      public void normalize(float norm, float topLevelBoost) { inner.normalize(norm, topLevelBoost); }
+
+      @Override
+      public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+        return inner.scorer(privateContext.getIndexReader().leaves().get(0), acceptDocs);
+      }
+    };
+  }
+
+  @Override
+  public String toString(String field) {
+    return "Uninverting(" + in.toString(field) + ")";
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + ((in == null) ? 0 : in.hashCode());
+    result = prime * result + ((schema == null) ? 0 : schema.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!super.equals(obj)) return false;
+    if (getClass() != obj.getClass()) return false;
+    DeleteByQueryWrapper other = (DeleteByQueryWrapper) obj;
+    if (in == null) {
+      if (other.in != null) return false;
+    } else if (!in.equals(other.in)) return false;
+    if (schema == null) {
+      if (other.schema != null) return false;
+    } else if (!schema.equals(other.schema)) return false;
+    return true;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
index a7ace1d..5240753 100644
--- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
+++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
@@ -242,7 +242,7 @@
               bq.add(new BooleanClause(new TermQuery(updateTerm),
                   Occur.MUST_NOT));
               bq.add(new BooleanClause(new TermQuery(idTerm), Occur.MUST));
-              writer.deleteDocuments(bq);
+              writer.deleteDocuments(new DeleteByQueryWrapper(bq, core.getLatestSchema()));
             }
             
             // Add to the transaction log *after* successfully adding to the
@@ -402,7 +402,7 @@
         } else {
           RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
           try {
-            iw.get().deleteDocuments(q);
+            iw.get().deleteDocuments(new DeleteByQueryWrapper(q, core.getLatestSchema()));
           } finally {
             iw.decref();
           }
@@ -440,7 +440,7 @@
             .getIndexAnalyzer());
         
         for (Query q : dbqList) {
-          writer.deleteDocuments(q);
+          writer.deleteDocuments(new DeleteByQueryWrapper(q, core.getLatestSchema()));
         }
       } finally {
         iw.decref();
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index 1210fd6..86913b5 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -89,7 +89,7 @@
 
   public void split() throws IOException {
 
-    List<AtomicReaderContext> leaves = searcher.getTopReaderContext().leaves();
+    List<AtomicReaderContext> leaves = searcher.getRawReader().leaves();
     List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
 
     log.info("SolrIndexSplitter: partitions=" + numPieces + " segments="+leaves.size());
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema.xml b/solr/core/src/test-files/solr/collection1/conf/schema.xml
index 219361c..3ee60eb 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema.xml
@@ -500,7 +500,7 @@
 
 
    <field name="cat" type="string" indexed="true" stored="true" multiValued="true"/>
-   <field name="price"  type="float" indexed="true" stored="true"/>
+   <field name="price"  type="float" indexed="true" stored="true" multiValued="false"/>
    <field name="inStock" type="boolean" indexed="true" stored="true" />
 
    <field name="subword" type="subword" indexed="true" stored="true"/>
diff --git a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
index 6c06ec2..56aafb3 100644
--- a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
@@ -23,7 +23,6 @@
 import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
@@ -422,8 +421,6 @@
     
     // Thread.sleep(10000000000L);
 
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
-
     del("*:*"); // delete all docs and test stats request
     commit();
     try {
diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
index af34844..56d6181 100644
--- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
@@ -17,7 +17,6 @@
 
 package org.apache.solr;
 
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.index.LogDocMergePolicy;
 import org.noggit.JSONUtil;
 import org.noggit.ObjectBuilder;
@@ -518,7 +517,6 @@
       ,"/grouped/"+f+"/matches==10"
       ,"/facet_counts/facet_fields/"+f+"==['1',3, '2',3, '3',2, '4',1, '5',1]"
     );
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
 
     // test that grouping works with highlighting
     assertJQ(req("fq",filt,  "q","{!func}"+f2, "group","true", "group.field",f, "fl","id"
diff --git a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
index 53334b3..1e75ab2 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
@@ -23,7 +23,6 @@
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -114,27 +113,23 @@
 
   @Test
   public void testRandomFaceting() throws Exception {
-    try {
-      Random rand = random();
-      int iter = atLeast(100);
-      init();
-      addMoreDocs(0);
-
-      for (int i=0; i<iter; i++) {
-        doFacetTests();
-
-        if (rand.nextInt(100) < 5) {
-          init();
-        }
-
-        addMoreDocs(rand.nextInt(indexSize) + 1);
-
-        if (rand.nextInt(100) < 50) {
-          deleteSomeDocs();
-        }
+    Random rand = random();
+    int iter = atLeast(100);
+    init();
+    addMoreDocs(0);
+    
+    for (int i=0; i<iter; i++) {
+      doFacetTests();
+      
+      if (rand.nextInt(100) < 5) {
+        init();
       }
-    } finally {
-      FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
+      
+      addMoreDocs(rand.nextInt(indexSize) + 1);
+      
+      if (rand.nextInt(100) < 50) {
+        deleteSomeDocs();
+      }
     }
   }
 
diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
index f2d4cd3..228ede3 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
@@ -17,7 +17,6 @@
 
 package org.apache.solr;
 
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -113,27 +112,23 @@
 
   @Test
   public void testRandomFaceting() throws Exception {
-    try {
-      Random rand = random();
-      int iter = atLeast(100);
-      init();
-      addMoreDocs(0);
-
-      for (int i=0; i<iter; i++) {
-        doFacetTests();
-
-        if (rand.nextInt(100) < 5) {
-          init();
-        }
-
-        addMoreDocs(rand.nextInt(indexSize) + 1);
-
-        if (rand.nextInt(100) < 50) {
-          deleteSomeDocs();
-        }
+    Random rand = random();
+    int iter = atLeast(100);
+    init();
+    addMoreDocs(0);
+    
+    for (int i=0; i<iter; i++) {
+      doFacetTests();
+      
+      if (rand.nextInt(100) < 5) {
+        init();
       }
-    } finally {
-      FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
+      
+      addMoreDocs(rand.nextInt(indexSize) + 1);
+      
+      if (rand.nextInt(100) < 50) {
+        deleteSomeDocs();
+      }
     }
   }
 
diff --git a/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java b/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
index 85c93ff..2507ae6 100644
--- a/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
@@ -196,7 +196,7 @@
   public static void assertCompoundSegments(SolrCore core, boolean compound) {
     RefCounted<SolrIndexSearcher> searcherRef = core.getRegisteredSearcher();
     try {
-      assertCompoundSegments(searcherRef.get().getIndexReader(), compound);
+      assertCompoundSegments(searcherRef.get().getRawReader(), compound);
     } finally {
       searcherRef.decref();
     }
diff --git a/solr/core/src/test/org/apache/solr/core/TestNRTOpen.java b/solr/core/src/test/org/apache/solr/core/TestNRTOpen.java
index cf5e80b..9ceca36 100644
--- a/solr/core/src/test/org/apache/solr/core/TestNRTOpen.java
+++ b/solr/core/src/test/org/apache/solr/core/TestNRTOpen.java
@@ -129,7 +129,7 @@
   static void assertNRT(int maxDoc) {
     RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
     try {
-      DirectoryReader ir = searcher.get().getIndexReader();
+      DirectoryReader ir = searcher.get().getRawReader();
       assertEquals(maxDoc, ir.maxDoc());
       assertTrue("expected NRT reader, got: " + ir, ir.toString().contains(":nrt"));
     } finally {
@@ -141,7 +141,7 @@
     RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
     Set<Object> set = Collections.newSetFromMap(new IdentityHashMap<Object,Boolean>());
     try {
-      DirectoryReader ir = searcher.get().getIndexReader();
+      DirectoryReader ir = searcher.get().getRawReader();
       for (AtomicReaderContext context : ir.leaves()) {
         set.add(context.reader().getCoreCacheKey());
       }
diff --git a/solr/core/src/test/org/apache/solr/core/TestNonNRTOpen.java b/solr/core/src/test/org/apache/solr/core/TestNonNRTOpen.java
index e828824..a3bfd7d 100644
--- a/solr/core/src/test/org/apache/solr/core/TestNonNRTOpen.java
+++ b/solr/core/src/test/org/apache/solr/core/TestNonNRTOpen.java
@@ -138,7 +138,7 @@
     RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
     try {
       SolrIndexSearcher s = searcher.get();
-      DirectoryReader ir = s.getIndexReader();
+      DirectoryReader ir = s.getRawReader();
       assertEquals("SOLR-5815? : wrong maxDoc: core=" + core.toString() +" searcher=" + s.toString(),
                    maxDoc, ir.maxDoc());
       assertFalse("SOLR-5815? : expected non-NRT reader, got: " + ir, ir.toString().contains(":nrt"));
@@ -151,7 +151,7 @@
     RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
     Set<Object> set = Collections.newSetFromMap(new IdentityHashMap<Object,Boolean>());
     try {
-      DirectoryReader ir = searcher.get().getIndexReader();
+      DirectoryReader ir = searcher.get().getRawReader();
       for (AtomicReaderContext context : ir.leaves()) {
         set.add(context.reader().getCoreCacheKey());
       }
diff --git a/solr/core/src/test/org/apache/solr/request/TestFaceting.java b/solr/core/src/test/org/apache/solr/request/TestFaceting.java
index 05fabc2..410d4da 100644
--- a/solr/core/src/test/org/apache/solr/request/TestFaceting.java
+++ b/solr/core/src/test/org/apache/solr/request/TestFaceting.java
@@ -22,9 +22,12 @@
 import java.util.Locale;
 import java.util.Random;
 
-import org.apache.lucene.index.DocTermOrds;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.uninverting.DocTermOrds;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.params.FacetParams;
@@ -81,12 +84,11 @@
     createIndex(size);
     req = lrf.makeRequest("q","*:*");
 
-    UnInvertedField uif = new UnInvertedField(proto.field(), req.getSearcher());
+    SortedSetDocValues dv = DocValues.getSortedSet(req.getSearcher().getAtomicReader(), proto.field());
 
-    assertEquals(size, uif.getNumTerms());
+    assertEquals(size, dv.getValueCount());
 
-    TermsEnum te = uif.getOrdTermsEnum(req.getSearcher().getAtomicReader());
-    assertEquals(size == 0, te == null);
+    TermsEnum te = dv.termsEnum();
 
     Random r = new Random(size);
     // test seeking by term string
@@ -763,16 +765,16 @@
     RefCounted<SolrIndexSearcher> currentSearcherRef = h.getCore().getSearcher();
     try {
       SolrIndexSearcher currentSearcher = currentSearcherRef.get();
-      UnInvertedField ui0 = UnInvertedField.getUnInvertedField("f0_ws", currentSearcher);
-      UnInvertedField ui1 = UnInvertedField.getUnInvertedField("f1_ws", currentSearcher);
-      UnInvertedField ui2 = UnInvertedField.getUnInvertedField("f2_ws", currentSearcher);
-      UnInvertedField ui3 = UnInvertedField.getUnInvertedField("f3_ws", currentSearcher);
-      UnInvertedField ui4 = UnInvertedField.getUnInvertedField("f4_ws", currentSearcher);
-      UnInvertedField ui5 = UnInvertedField.getUnInvertedField("f5_ws", currentSearcher);
-      UnInvertedField ui6 = UnInvertedField.getUnInvertedField("f6_ws", currentSearcher);
-      UnInvertedField ui7 = UnInvertedField.getUnInvertedField("f7_ws", currentSearcher);
-      UnInvertedField ui8 = UnInvertedField.getUnInvertedField("f8_ws", currentSearcher);
-      UnInvertedField ui9 = UnInvertedField.getUnInvertedField("f9_ws", currentSearcher);
+      SortedSetDocValues ui0 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f0_ws");
+      SortedSetDocValues ui1 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f1_ws");
+      SortedSetDocValues ui2 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f2_ws");
+      SortedSetDocValues ui3 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f3_ws");
+      SortedSetDocValues ui4 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f4_ws");
+      SortedSetDocValues ui5 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f5_ws");
+      SortedSetDocValues ui6 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f6_ws");
+      SortedSetDocValues ui7 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f7_ws");
+      SortedSetDocValues ui8 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f8_ws");
+      SortedSetDocValues ui9 = DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f9_ws");
 
       assertQ("check threading, more threads than fields",
           req("q", "id:*", "indent", "true", "fl", "id", "rows", "1"
@@ -924,28 +926,39 @@
       // Now, are all the UnInvertedFields still the same? Meaning they weren't re-fetched even when a bunch were
       // requested at the same time?
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui0, UnInvertedField.getUnInvertedField("f0_ws", currentSearcher));
+          ui0, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f0_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui1, UnInvertedField.getUnInvertedField("f1_ws", currentSearcher));
+          ui1, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f1_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui2, UnInvertedField.getUnInvertedField("f2_ws", currentSearcher));
+          ui2, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f2_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui3, UnInvertedField.getUnInvertedField("f3_ws", currentSearcher));
+          ui3, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f3_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui4, UnInvertedField.getUnInvertedField("f4_ws", currentSearcher));
+          ui4, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f4_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui5, UnInvertedField.getUnInvertedField("f5_ws", currentSearcher));
+          ui5, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f5_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui6, UnInvertedField.getUnInvertedField("f6_ws", currentSearcher));
+          ui6, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f6_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui7, UnInvertedField.getUnInvertedField("f7_ws", currentSearcher));
+          ui7, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f7_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui8, UnInvertedField.getUnInvertedField("f8_ws", currentSearcher));
+          ui8, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f8_ws"));
       assertEquals("UnInvertedField coming back from the seacher should not have changed! ",
-          ui9, UnInvertedField.getUnInvertedField("f9_ws", currentSearcher));
+          ui9, DocValues.getSortedSet(currentSearcher.getAtomicReader(), "f9_ws"));
     } finally {
       currentSearcherRef.decref();
     }
   }
+  
+  // assert same instance: either same object, or both wrapping same single-valued object
+  private void assertEquals(String msg, SortedSetDocValues dv1, SortedSetDocValues dv2) {
+    SortedDocValues singleton1 = DocValues.unwrapSingleton(dv1);
+    SortedDocValues singleton2 = DocValues.unwrapSingleton(dv2);
+    if (singleton1 == null || singleton2 == null) {
+      assertSame(dv1, dv2);
+    } else {
+      assertSame(singleton1, singleton2);
+    }
+  }
 }
 
diff --git a/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java b/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java
index bc6a0e3..e62595e 100644
--- a/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java
+++ b/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr.search;
 
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReaderContext;
@@ -71,7 +72,7 @@
     assertU(commit());
 
     SolrQueryRequest sr1 = req("q","foo");
-    IndexReaderContext rCtx1 = sr1.getSearcher().getTopReaderContext();
+    IndexReader r1 = sr1.getSearcher().getRawReader();
 
     String sval1 = getStringVal(sr1, "v_s1",0);
     assertEquals("string1", sval1);
@@ -81,28 +82,28 @@
     assertU(commit());
 
     SolrQueryRequest sr2 = req("q","foo");
-    IndexReaderContext rCtx2 = sr2.getSearcher().getTopReaderContext();
+    IndexReader r2 = sr2.getSearcher().getRawReader();
 
     // make sure the readers share the first segment
     // Didn't work w/ older versions of lucene2.9 going from segment -> multi
-    assertEquals(rCtx1.leaves().get(0).reader(), rCtx2.leaves().get(0).reader());
+    assertEquals(r1.leaves().get(0).reader(), r2.leaves().get(0).reader());
 
     assertU(adoc("id","5", "v_f","3.14159"));
     assertU(adoc("id","6", "v_f","8983", "v_s1","string6"));
     assertU(commit());
 
     SolrQueryRequest sr3 = req("q","foo");
-    IndexReaderContext rCtx3 = sr3.getSearcher().getTopReaderContext();
+    IndexReader r3 = sr3.getSearcher().getRawReader();
     // make sure the readers share segments
     // assertEquals(r1.getLeafReaders()[0], r3.getLeafReaders()[0]);
-    assertEquals(rCtx2.leaves().get(0).reader(), rCtx3.leaves().get(0).reader());
-    assertEquals(rCtx2.leaves().get(1).reader(), rCtx3.leaves().get(1).reader());
+    assertEquals(r2.leaves().get(0).reader(), r3.leaves().get(0).reader());
+    assertEquals(r2.leaves().get(1).reader(), r3.leaves().get(1).reader());
 
     sr1.close();
     sr2.close();            
 
     // should currently be 1, but this could change depending on future index management
-    int baseRefCount = rCtx3.reader().getRefCount();
+    int baseRefCount = r3.getRefCount();
     assertEquals(1, baseRefCount);
 
     Object sr3SearcherRegAt = sr3.getSearcher().getStatistics().get("registeredAt");
@@ -112,7 +113,7 @@
                sr3.getSearcher(), sr4.getSearcher());
     assertEquals("nothing changed, searcher should not have been re-registered",
                  sr3SearcherRegAt, sr4.getSearcher().getStatistics().get("registeredAt"));
-    IndexReaderContext rCtx4 = sr4.getSearcher().getTopReaderContext();
+    IndexReader r4 = sr4.getSearcher().getRawReader();
 
     // force an index change so the registered searcher won't be the one we are testing (and
     // then we should be able to test the refCount going all the way to 0
@@ -120,12 +121,12 @@
     assertU(commit()); 
 
     // test that reader didn't change
-    assertSame(rCtx3.reader(), rCtx4.reader());
-    assertEquals(baseRefCount, rCtx4.reader().getRefCount());
+    assertSame(r3, r4);
+    assertEquals(baseRefCount, r4.getRefCount());
     sr3.close();
-    assertEquals(baseRefCount, rCtx4.reader().getRefCount());
+    assertEquals(baseRefCount, r4.getRefCount());
     sr4.close();
-    assertEquals(baseRefCount-1, rCtx4.reader().getRefCount());
+    assertEquals(baseRefCount-1, r4.getRefCount());
 
 
     SolrQueryRequest sr5 = req("q","foo");
diff --git a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
index 4b18ce9..267b7e9 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
@@ -18,9 +18,10 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -702,7 +703,7 @@
   class TestCollector extends TopDocsCollector {
 
     private List<ScoreDoc> list = new ArrayList();
-    private FieldCache.Ints values;
+    private NumericDocValues values;
     private int base;
 
     public TestCollector(PriorityQueue pq) {
@@ -714,7 +715,7 @@
     }
 
     public void doSetNextReader(AtomicReaderContext context) throws IOException {
-      values = FieldCache.DEFAULT.getInts(context.reader(), "sort_i", false);
+      values = DocValues.getNumeric(context.reader(), "sort_i");
       base = context.docBase;
     }
 
diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java
index dee7e92..d1a909b 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSort.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSort.java
@@ -21,7 +21,9 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.lucene.analysis.core.SimpleAnalyzer;
@@ -50,6 +52,7 @@
 import org.apache.lucene.search.TopFieldCollector;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.uninverting.UninvertingReader;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.TestUtil;
@@ -221,8 +224,11 @@
       }
       iw.shutdown();
 
+      Map<String,UninvertingReader.Type> mapping = new HashMap<>();
+      mapping.put("f", UninvertingReader.Type.SORTED);
+      mapping.put("f2", UninvertingReader.Type.SORTED);
 
-      DirectoryReader reader = DirectoryReader.open(dir);
+      DirectoryReader reader = UninvertingReader.wrap(DirectoryReader.open(dir), mapping);
       IndexSearcher searcher = new IndexSearcher(reader);
       // System.out.println("segments="+searcher.getIndexReader().getSequentialSubReaders().length);
       assertTrue(reader.leaves().size() > 1);
diff --git a/solr/core/src/test/org/apache/solr/search/function/SortByFunctionTest.java b/solr/core/src/test/org/apache/solr/search/function/SortByFunctionTest.java
index 4264d82..6f11d9e 100644
--- a/solr/core/src/test/org/apache/solr/search/function/SortByFunctionTest.java
+++ b/solr/core/src/test/org/apache/solr/search/function/SortByFunctionTest.java
@@ -99,11 +99,11 @@
   
   public void testSortJoinDocFreq() throws Exception
   {
-    assertU(adoc("id", "4", "id_s", "D", "links_mfacet", "A", "links_mfacet", "B", "links_mfacet", "C" ) );
-    assertU(adoc("id", "3", "id_s", "C", "links_mfacet", "A", "links_mfacet", "B" ) );
+    assertU(adoc("id", "4", "id_s1", "D", "links_mfacet", "A", "links_mfacet", "B", "links_mfacet", "C" ) );
+    assertU(adoc("id", "3", "id_s1", "C", "links_mfacet", "A", "links_mfacet", "B" ) );
     assertU(commit()); // Make sure it uses two readers
-    assertU(adoc("id", "2", "id_s", "B", "links_mfacet", "A" ) );
-    assertU(adoc("id", "1", "id_s", "A"  ) );
+    assertU(adoc("id", "2", "id_s1", "B", "links_mfacet", "A" ) );
+    assertU(adoc("id", "1", "id_s1", "A"  ) );
     assertU(commit());
 
     assertQ(req("q", "links_mfacet:B", "fl", "id", "sort", "id asc"),
@@ -112,7 +112,7 @@
             "//result/doc[2]/int[@name='id'][.='4']"
     );
     
-    assertQ(req("q", "*:*", "fl", "id", "sort", "joindf(id_s, links_mfacet) desc"),
+    assertQ(req("q", "*:*", "fl", "id", "sort", "joindf(id_s1, links_mfacet) desc"),
             "//*[@numFound='4']",
             "//result/doc[1]/int[@name='id'][.='1']",
             "//result/doc[2]/int[@name='id'][.='2']",
@@ -120,7 +120,7 @@
             "//result/doc[4]/int[@name='id'][.='4']"
     );
 
-    assertQ(req("q", "*:*", "fl", "id", "sort", "joindf(id_s, links_mfacet) asc"),
+    assertQ(req("q", "*:*", "fl", "id", "sort", "joindf(id_s1, links_mfacet) asc"),
             "//*[@numFound='4']",
             "//result/doc[1]/int[@name='id'][.='4']",
             "//result/doc[2]/int[@name='id'][.='3']",
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
index d5fe2bb..2ad29ed 100644
--- a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
+++ b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
@@ -19,7 +19,6 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.TFIDFSimilarity;
 import org.apache.solr.SolrTestCaseJ4;
@@ -202,8 +201,6 @@
     singleTest(field,"sum(query($v1,5),query($v1,7))",
             Arrays.asList("v1","\0:[* TO *]"),  88,12
             );
-
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
   }
 
   @Test
@@ -283,9 +280,7 @@
 
       singleTest(field, "\0", answers);
       // System.out.println("Done test "+i);
-    }
-
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity    
+    }  
   }
 
   @Test
@@ -422,9 +417,6 @@
            ,"*//doc[1]/float[.='120.0']"
            ,"*//doc[2]/float[.='121.0']"
     );
-
-
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
   }
 
   /**
@@ -640,9 +632,7 @@
     assertU(adoc("id", "10000")); // will get same reader if no index change
     assertU(commit());   
     singleTest(fieldAsFunc, "sqrt(\0)");
-    assertTrue(orig != FileFloatSource.onlyForTesting);
-
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity    
+    assertTrue(orig != FileFloatSource.onlyForTesting);  
   }
 
   /**
@@ -667,9 +657,7 @@
                100,100,  -4,-4,  0,0,  10,10,  25,25,  5,5,  77,77,  1,1);
     singleTest(fieldAsFunc, "sqrt(\0)", 
                100,10,  25,5,  0,0,   1,1);
-    singleTest(fieldAsFunc, "log(\0)",  1,0);
-
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity    
+    singleTest(fieldAsFunc, "log(\0)",  1,0); 
   }
 
     @Test
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
new file mode 100644
index 0000000..6b78051
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
@@ -0,0 +1,310 @@
+package org.apache.solr.search.function;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.queries.function.FunctionQuery;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
+import org.apache.lucene.queries.function.valuesource.IntFieldSource;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test search based on OrdFieldSource and ReverseOrdFieldSource.
+ * <p/>
+ * Tests here create an index with a few documents, each having
+ * an indexed "id" field.
+ * The ord values of this field are later used for scoring.
+ * <p/>
+ * The order tests use Hits to verify that docs are ordered as expected.
+ * <p/>
+ * The exact score tests use TopDocs top to verify the exact score.
+ */
+public class TestOrdValues extends LuceneTestCase {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createIndex(false);
+  }
+
+  /**
+   * Test OrdFieldSource
+   */
+  @Test
+  public void testOrdFieldRank() throws Exception {
+    doTestRank(ID_FIELD, true);
+  }
+
+  /**
+   * Test ReverseOrdFieldSource
+   */
+  @Test
+  public void testReverseOrdFieldRank() throws Exception {
+    doTestRank(ID_FIELD, false);
+  }
+
+  // Test that queries based on reverse/ordFieldScore scores correctly
+  private void doTestRank(String field, boolean inOrder) throws Exception {
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    ValueSource vs;
+    if (inOrder) {
+      vs = new OrdFieldSource(field);
+    } else {
+      vs = new ReverseOrdFieldSource(field);
+    }
+
+    Query q = new FunctionQuery(vs);
+    log("test: " + q);
+    QueryUtils.check(random(), q, s);
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    assertEquals("All docs should be matched!", N_DOCS, h.length);
+    String prevID = inOrder
+            ? "IE"   // greater than all ids of docs in this test ("ID0001", etc.)
+            : "IC";  // smaller than all ids of docs in this test ("ID0001", etc.)
+
+    for (int i = 0; i < h.length; i++) {
+      String resID = s.doc(h[i].doc).get(ID_FIELD);
+      log(i + ".   score=" + h[i].score + "  -  " + resID);
+      log(s.explain(q, h[i].doc));
+      if (inOrder) {
+        assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.compareTo(prevID) < 0);
+      } else {
+        assertTrue("res id " + resID + " should be > prev res id " + prevID, resID.compareTo(prevID) > 0);
+      }
+      prevID = resID;
+    }
+    r.close();
+  }
+
+  /**
+   * Test exact score for OrdFieldSource
+   */
+  @Test
+  public void testOrdFieldExactScore() throws Exception {
+    doTestExactScore(ID_FIELD, true);
+  }
+
+  /**
+   * Test exact score for ReverseOrdFieldSource
+   */
+  @Test
+  public void testReverseOrdFieldExactScore() throws Exception {
+    doTestExactScore(ID_FIELD, false);
+  }
+
+
+  // Test that queries based on reverse/ordFieldScore returns docs with expected score.
+  private void doTestExactScore(String field, boolean inOrder) throws Exception {
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    ValueSource vs;
+    if (inOrder) {
+      vs = new OrdFieldSource(field);
+    } else {
+      vs = new ReverseOrdFieldSource(field);
+    }
+    Query q = new FunctionQuery(vs);
+    TopDocs td = s.search(q, null, 1000);
+    assertEquals("All docs should be matched!", N_DOCS, td.totalHits);
+    ScoreDoc sd[] = td.scoreDocs;
+    for (int i = 0; i < sd.length; i++) {
+      float score = sd[i].score;
+      String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD);
+      log("-------- " + i + ". Explain doc " + id);
+      log(s.explain(q, sd[i].doc));
+      float expectedScore = N_DOCS - i - 1;
+      assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
+      String expectedId = inOrder
+              ? id2String(N_DOCS - i) // in-order ==> larger  values first
+              : id2String(i + 1);     // reverse  ==> smaller values first
+      assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
+    }
+    r.close();
+  }
+  
+  // LUCENE-1250
+  public void testEqualsNull() throws Exception {
+    OrdFieldSource ofs = new OrdFieldSource("f");
+    assertFalse(ofs.equals(null));
+    
+    ReverseOrdFieldSource rofs = new ReverseOrdFieldSource("f");
+    assertFalse(rofs.equals(null));
+  }
+  
+  /**
+   * Actual score computation order is slightly different than assumptios
+   * this allows for a small amount of variation
+   */
+  protected static float TEST_SCORE_TOLERANCE_DELTA = 0.001f;
+
+  protected static final int N_DOCS = 17; // select a primary number > 2
+
+  protected static final String ID_FIELD = "id";
+  protected static final String TEXT_FIELD = "text";
+  protected static final String INT_FIELD = "iii";
+  protected static final String FLOAT_FIELD = "fff";
+
+  protected ValueSource INT_VALUESOURCE = new IntFieldSource(INT_FIELD);
+  protected ValueSource FLOAT_VALUESOURCE = new FloatFieldSource(FLOAT_FIELD);
+
+  private static final String DOC_TEXT_LINES[] = {
+          "Well, this is just some plain text we use for creating the ",
+          "test documents. It used to be a text from an online collection ",
+          "devoted to first aid, but if there was there an (online) lawyers ",
+          "first aid collection with legal advices, \"it\" might have quite ",
+          "probably advised one not to include \"it\"'s text or the text of ",
+          "any other online collection in one's code, unless one has money ",
+          "that one don't need and one is happy to donate for lawyers ",
+          "charity. Anyhow at some point, rechecking the usage of this text, ",
+          "it became uncertain that this text is free to use, because ",
+          "the web site in the disclaimer of he eBook containing that text ",
+          "was not responding anymore, and at the same time, in projGut, ",
+          "searching for first aid no longer found that eBook as well. ",
+          "So here we are, with a perhaps much less interesting ",
+          "text for the test, but oh much much safer. ",
+  };
+
+  protected static Directory dir;
+  protected static Analyzer anlzr;
+
+  @AfterClass
+  public static void afterClassFunctionTestSetup() throws Exception {
+    dir.close();
+    dir = null;
+    anlzr = null;
+  }
+
+  protected static void createIndex(boolean doMultiSegment) throws Exception {
+    if (VERBOSE) {
+      System.out.println("TEST: setUp");
+    }
+    // prepare a small index with just a few documents.
+    dir = newDirectory();
+    anlzr = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy());
+    if (doMultiSegment) {
+      iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 7));
+    }
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
+    int remaining = N_DOCS;
+    boolean done[] = new boolean[N_DOCS];
+    int i = 0;
+    while (remaining > 0) {
+      if (done[i]) {
+        throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
+      }
+      addDoc(iw, i);
+      done[i] = true;
+      i = (i + 4) % N_DOCS;
+      remaining --;
+    }
+    if (!doMultiSegment) {
+      if (VERBOSE) {
+        System.out.println("TEST: setUp full merge");
+      }
+      iw.forceMerge(1);
+    }
+    iw.shutdown();
+    if (VERBOSE) {
+      System.out.println("TEST: setUp done close");
+    }
+  }
+
+  private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
+    Document d = new Document();
+    Field f;
+    int scoreAndID = i + 1;
+
+    FieldType customType = new FieldType(TextField.TYPE_STORED);
+    customType.setTokenized(false);
+    customType.setOmitNorms(true);
+    
+    f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
+    d.add(f);
+    d.add(new SortedDocValuesField(ID_FIELD, new BytesRef(id2String(scoreAndID))));
+
+    FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
+    customType2.setOmitNorms(true);
+    f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
+    d.add(f);
+
+    f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
+    d.add(f);
+    d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
+
+    f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
+    d.add(f);
+    d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));
+
+    iw.addDocument(d);
+    log("added: " + d);
+  }
+
+  // 17 --> ID00017
+  protected static String id2String(int scoreAndID) {
+    String s = "000000000" + scoreAndID;
+    int n = ("" + N_DOCS).length() + 3;
+    int k = s.length() - n;
+    return "ID" + s.substring(k);
+  }
+
+  // some text line for regular search
+  private static String textLine(int docNum) {
+    return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.length];
+  }
+
+  // extract expected doc score from its ID Field: "ID7" --> 7.0
+  protected static float expectedFieldScore(String docIDFieldVal) {
+    return Float.parseFloat(docIDFieldVal.substring(2));
+  }
+
+  // debug messages (change DBG to true for anything to print)
+  protected static void log(Object o) {
+    if (VERBOSE) {
+      System.out.println(o.toString());
+    }
+  }
+
+}
diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
index 55c57b2..e76907e 100644
--- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
@@ -35,7 +35,6 @@
 import junit.framework.Assert;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -284,7 +283,6 @@
   @Override
   public void tearDown() throws Exception {
     destroyServers();
-    FieldCache.DEFAULT.purgeAllCaches();   // avoid FC insanity
     super.tearDown();
   }