LUCENE-6005: merge trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene6005@1658277 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
index 9997d40..7d8f3e1 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
@@ -34,7 +34,7 @@
   private boolean done = false;
   private int finalOffset;
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-  private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   
   public KeywordTokenizer() {
     this(DEFAULT_BUFFER_SIZE);
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java b/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java
deleted file mode 100644
index 3910e74..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/collation/CollationDocValuesField.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package org.apache.lucene.collation;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.text.Collator;
-
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.search.DocValuesRangeFilter;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * Indexes collation keys as a single-valued {@link SortedDocValuesField}.
- * <p>
- * This is more efficient that {@link CollationKeyAnalyzer} if the field 
- * only has one value: no uninversion is necessary to sort on the field, 
- * locale-sensitive range queries can still work via {@link DocValuesRangeFilter}, 
- * and the underlying data structures built at index-time are likely more efficient 
- * and use less memory than FieldCache.
- */
-public final class CollationDocValuesField extends Field {
-  private final String name;
-  private final Collator collator;
-  private final BytesRef bytes = new BytesRef();
-  
-  /**
-   * Create a new ICUCollationDocValuesField.
-   * <p>
-   * NOTE: you should not create a new one for each document, instead
-   * just make one and reuse it during your indexing process, setting
-   * the value via {@link #setStringValue(String)}.
-   * @param name field name
-   * @param collator Collator for generating collation keys.
-   */
-  // TODO: can we make this trap-free? maybe just synchronize on the collator
-  // instead? 
-  public CollationDocValuesField(String name, Collator collator) {
-    super(name, SortedDocValuesField.TYPE);
-    this.name = name;
-    this.collator = (Collator) collator.clone();
-    fieldsData = bytes; // so wrong setters cannot be called
-  }
-
-  @Override
-  public String name() {
-    return name;
-  }
-  
-  @Override
-  public void setStringValue(String value) {
-    bytes.bytes = collator.getCollationKey(value).toByteArray();
-    bytes.offset = 0;
-    bytes.length = bytes.bytes.length;
-  }
-}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFieldTypes.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFieldTypes.java
new file mode 100644
index 0000000..2ab2247
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFieldTypes.java
@@ -0,0 +1,45 @@
+package org.apache.lucene.analysis.core;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestFieldTypes extends LuceneTestCase {
+
+  public void testReversedTokens() throws Exception {
+    RandomIndexWriter w = newRandomIndexWriter();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setReversedTerms("rev");
+    Document doc = w.newDocument();
+    doc.addShortText("rev", "here");
+    w.addDocument(doc);
+    IndexReader r = w.getReader();
+    IndexSearcher s = newSearcher(r);
+    assertEquals(0, s.search(new TermQuery(new Term("rev", "here")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("rev", "ereh")), 1).totalHits);
+    r.close();
+    w.close();
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
index 7381e11..91a50ac 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
@@ -23,9 +23,6 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
@@ -51,9 +48,9 @@
     directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new SimpleAnalyzer()));
 
-    Document doc = new Document();
-    doc.add(new StringField("partnum", "Q36", Field.Store.YES));
-    doc.add(new TextField("description", "Illidium Space Modulator", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("partnum", "Q36");
+    doc.addLargeText("description", "Illidium Space Modulator");
     writer.addDocument(doc);
 
     writer.close();
@@ -87,11 +84,11 @@
   public void testMutipleDocument() throws Exception {
     RAMDirectory dir = new RAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new KeywordAnalyzer()));
-    Document doc = new Document();
-    doc.add(new TextField("partnum", "Q36", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("partnum", "Q36");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new TextField("partnum", "Q37", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("partnum", "Q37");
     writer.addDocument(doc);
     writer.close();
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java
index a8d9156..91145b6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.NumericTokenStream;
 import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
@@ -43,23 +42,6 @@
     assertTrue("types Size: " + types.size() + " is not: " + 4, types.size() == 4);
   }
 
-  public void testCreationWithBlackList() throws Exception {
-    TokenFilterFactory factory = tokenFilterFactory("Type",
-        "types", "stoptypes-1.txt, stoptypes-2.txt");
-    NumericTokenStream input = new NumericTokenStream();
-    input.setIntValue(123);
-    factory.create(input);
-  }
-  
-  public void testCreationWithWhiteList() throws Exception {
-    TokenFilterFactory factory = tokenFilterFactory("Type",
-        "types", "stoptypes-1.txt, stoptypes-2.txt",
-        "useWhitelist", "true");
-    NumericTokenStream input = new NumericTokenStream();
-    input.setIntValue(123);
-    factory.create(input);
-  }
-
   public void testMissingTypesParameter() throws Exception {
     try {
       tokenFilterFactory("Type");
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java
index 0f86147..54f26bc 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestEmptyTokenStream.java
@@ -19,13 +19,10 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.Directory;
 
@@ -55,9 +52,9 @@
     TokenStream ts = new EmptyTokenStream();
     assertFalse(ts.hasAttribute(TermToBytesRefAttribute.class));
 
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new TextField("description", ts));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "0");
+    doc.addLargeText("description", ts);
     
     // this should not fail because we have no TermToBytesRefAttribute
     writer.addDocument(doc);
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
index 3d191d5..21cfd55 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -69,13 +68,13 @@
 
       IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(a));
 
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       StringBuilder b = new StringBuilder();
       for(int i=1;i<limit;i++)
         b.append(" a");
       b.append(" x");
       b.append(" z");
-      doc.add(newTextField("field", b.toString(), Field.Store.NO));
+      doc.addLargeText("field", b.toString());
       writer.addDocument(doc);
       writer.close();
       
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
index 675a96c..467ee3f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
@@ -16,19 +16,17 @@
  * limitations under the License.
  */
 
+import java.util.Arrays;
+import java.util.Collections;
+
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.RAMDirectory;
 
-import java.util.Arrays;
-import java.util.Collections;
-
 public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
   String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
   String repetitiveFieldValues[] = {"boring", "boring", "vaguelyboring"};
@@ -45,11 +43,11 @@
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(appAnalyzer));
     int numDocs = 200;
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       String variedFieldValue = variedFieldValues[i % variedFieldValues.length];
       String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length];
-      doc.add(new TextField("variedField", variedFieldValue, Field.Store.YES));
-      doc.add(new TextField("repetitiveField", repetitiveFieldValue, Field.Store.YES));
+      doc.addLargeText("variedField", variedFieldValue);
+      doc.addLargeText("repetitiveField", repetitiveFieldValue);
       writer.addDocument(doc);
     }
     writer.close();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
index b905d0a..a1c4663 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
@@ -29,8 +29,6 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -61,16 +59,16 @@
     IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(analyzer));
 
     Document doc;
-    doc = new Document();
-    doc.add(new TextField("content", "please divide this sentence into shingles", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "please divide this sentence into shingles");
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new TextField("content", "just another test sentence", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "just another test sentence");
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new TextField("content", "a sentence which contains no test", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "a sentence which contains no test");
     writer.addDocument(doc);
 
     writer.close();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index 9c63bb0..3b5fe1b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -27,9 +27,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
@@ -90,18 +88,18 @@
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.setMultiValued("field");
+
+    Document doc = w.newDocument();
     TokenStream tokenStream = analyzer.tokenStream("field", "abcd   ");
     TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream);
     TokenStream sink = tee.newSinkTokenStream();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
-    Field f1 = new Field("field", tee, ft);
-    Field f2 = new Field("field", sink, ft);
-    doc.add(f1);
-    doc.add(f2);
+    doc.addLargeText("field", tee);
+    doc.addLargeText("field", sink);
     w.addDocument(doc);
     w.close();
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
index 3b3706f..600d93b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
@@ -1,11 +1,12 @@
 package org.apache.lucene.analysis.standard;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.standard.ClassicAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
@@ -17,10 +18,6 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.BytesRef;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Random;
-
 
 /**
  * Copyright 2004 The Apache Software Foundation
@@ -257,17 +254,17 @@
 
     char[] chars = new char[IndexWriter.MAX_TERM_LENGTH];
     Arrays.fill(chars, 'x');
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     final String bigTerm = new String(chars);
 
     // This produces a too-long term:
     String contents = "abc xyz x" + bigTerm + " another term";
-    doc.add(new TextField("content", contents, Field.Store.NO));
+    doc.addLargeText("content", contents);
     writer.addDocument(doc);
 
     // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(new TextField("content", "abc bbb ccc", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "abc bbb ccc");
     writer.addDocument(doc);
     writer.close();
 
@@ -297,11 +294,11 @@
 
     // Make sure we can add a document with exactly the
     // maximum length term, and search on that term:
-    doc = new Document();
-    doc.add(new TextField("content", bigTerm, Field.Store.NO));
     ClassicAnalyzer sa = new ClassicAnalyzer();
     sa.setMaxTokenLength(100000);
-    writer  = new IndexWriter(dir, new IndexWriterConfig(sa));
+    writer = new IndexWriter(dir, new IndexWriterConfig(sa));
+    doc = writer.newDocument();
+    doc.addLargeText("content", bigTerm);
     writer.addDocument(doc);
     writer.close();
     reader = DirectoryReader.open(dir);
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java b/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java
deleted file mode 100644
index 8334065..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationDocValuesField.java
+++ /dev/null
@@ -1,144 +0,0 @@
-package org.apache.lucene.collation;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.text.Collator;
-import java.util.Locale;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.DocValuesRangeFilter;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-
-/**
- * trivial test of CollationDocValuesField
- */
-public class TestCollationDocValuesField extends LuceneTestCase {
-  
-  public void testBasic() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = newField("field", "", StringField.TYPE_STORED);
-    CollationDocValuesField collationField = new CollationDocValuesField("collated", Collator.getInstance(Locale.ENGLISH));
-    doc.add(field);
-    doc.add(collationField);
-
-    field.setStringValue("ABC");
-    collationField.setStringValue("ABC");
-    iw.addDocument(doc);
-    
-    field.setStringValue("abc");
-    collationField.setStringValue("abc");
-    iw.addDocument(doc);
-    
-    IndexReader ir = iw.getReader();
-    iw.close();
-    
-    IndexSearcher is = newSearcher(ir);
-    
-    SortField sortField = new SortField("collated", SortField.Type.STRING);
-    
-    TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField));
-    assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field"));
-    assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field"));
-    ir.close();
-    dir.close();
-  }
-  
-  public void testRanges() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = newField("field", "", StringField.TYPE_STORED);
-    Collator collator = Collator.getInstance(Locale.getDefault()); // uses -Dtests.locale
-    if (random().nextBoolean()) {
-      collator.setStrength(Collator.PRIMARY);
-    }
-    CollationDocValuesField collationField = new CollationDocValuesField("collated", collator);
-    doc.add(field);
-    doc.add(collationField);
-    
-    int numDocs = atLeast(500);
-    for (int i = 0; i < numDocs; i++) {
-      String value = TestUtil.randomSimpleString(random());
-      field.setStringValue(value);
-      collationField.setStringValue(value);
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.close();
-    IndexSearcher is = newSearcher(ir);
-    
-    int numChecks = atLeast(100);
-    
-    try {
-      for (int i = 0; i < numChecks; i++) {
-        String start = TestUtil.randomSimpleString(random());
-        String end = TestUtil.randomSimpleString(random());
-        BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray());
-        BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray());
-        Query query = new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
-        doTestRanges(is, start, end, query, collator);
-      }
-    } finally {
-      ir.close();
-      dir.close();
-    }
-  }
-  
-  private void doTestRanges(IndexSearcher is, String startPoint, String endPoint, Query query, Collator collator) throws Exception { 
-    QueryUtils.check(query);
-    
-    // positive test
-    TopDocs docs = is.search(query, is.getIndexReader().maxDoc());
-    for (ScoreDoc doc : docs.scoreDocs) {
-      String value = is.doc(doc.doc).get("field");
-      assertTrue(collate(collator, value, startPoint) >= 0);
-      assertTrue(collate(collator, value, endPoint) <= 0);
-    }
-    
-    // negative test
-    BooleanQuery bq = new BooleanQuery();
-    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
-    bq.add(query, Occur.MUST_NOT);
-    docs = is.search(bq, is.getIndexReader().maxDoc());
-    for (ScoreDoc doc : docs.scoreDocs) {
-      String value = is.doc(doc.doc).get("field");
-      assertTrue(collate(collator, value, startPoint) < 0 || collate(collator, value, endPoint) > 0);
-    }
-  }
-}
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
index f1352cb..ff2d6d7 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
@@ -21,7 +21,7 @@
 
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.icu.ICUFoldingFilter;
-import org.apache.lucene.analysis.util.AbstractAnalysisFactory; // javadocs
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
 import org.apache.lucene.analysis.util.MultiTermAwareComponent;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java b/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
index b70b0ae..33d0bb9 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationDocValuesField.java
@@ -17,24 +17,49 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.search.DocValuesRangeFilter;
 import org.apache.lucene.util.BytesRef;
-
 import com.ibm.icu.text.Collator;
 import com.ibm.icu.text.RawCollationKey;
 
 /**
  * Indexes collation keys as a single-valued {@link SortedDocValuesField}.
  * <p>
- * This is more efficient that {@link ICUCollationKeyAnalyzer} if the field 
+ * This is more efficient than {@link ICUCollationKeyAnalyzer} if the field 
  * only has one value: no uninversion is necessary to sort on the field, 
  * locale-sensitive range queries can still work via {@link DocValuesRangeFilter}, 
  * and the underlying data structures built at index-time are likely more efficient 
  * and use less memory than FieldCache.
  */
-public final class ICUCollationDocValuesField extends Field {
+public final class ICUCollationDocValuesField implements IndexableField {
+
+  private static final IndexableFieldType TYPE = new IndexableFieldType() {
+      @Override
+      public DocValuesType docValuesType() {
+        return DocValuesType.SORTED;
+      }
+    };
+
+  @Override
+  public String name() {
+    return name;
+  }
+
+  public IndexableFieldType fieldType() {
+    return TYPE;
+  }
+
+  @Override
+  public BytesRef binaryDocValue() {
+    return bytes;
+  }
+
   private final String name;
   private final Collator collator;
   private final BytesRef bytes = new BytesRef();
@@ -52,22 +77,14 @@
   // TODO: can we make this trap-free? maybe just synchronize on the collator
   // instead? 
   public ICUCollationDocValuesField(String name, Collator collator) {
-    super(name, SortedDocValuesField.TYPE);
     this.name = name;
     try {
       this.collator = (Collator) collator.clone();
     } catch (CloneNotSupportedException e) {
       throw new RuntimeException(e);
     }
-    fieldsData = bytes; // so wrong setters cannot be called
   }
 
-  @Override
-  public String name() {
-    return name;
-  }
-  
-  @Override
   public void setStringValue(String value) {
     collator.getRawCollationKey(value, key);
     bytes.bytes = key.bytes;
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
index dff883b..d66393a 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationDocValuesField.java
@@ -18,10 +18,10 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocValuesRangeFilter;
@@ -33,12 +33,10 @@
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-
 import com.ibm.icu.text.Collator;
 import com.ibm.icu.util.ULocale;
 
@@ -50,18 +48,18 @@
   public void testBasic() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = newField("field", "", StringField.TYPE_STORED);
-    ICUCollationDocValuesField collationField = new ICUCollationDocValuesField("collated", Collator.getInstance(ULocale.ENGLISH));
-    doc.add(field);
-    doc.add(collationField);
 
-    field.setStringValue("ABC");
+    Document doc = iw.newDocument();
+    doc.addAtom("field", "ABC");
+    ICUCollationDocValuesField collationField = new ICUCollationDocValuesField("collated", Collator.getInstance(ULocale.ENGLISH));
     collationField.setStringValue("ABC");
+    doc.add(collationField);
     iw.addDocument(doc);
     
-    field.setStringValue("abc");
+    doc = iw.newDocument();
+    doc.addAtom("field", "abc");
     collationField.setStringValue("abc");
+    doc.add(collationField);
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -81,20 +79,20 @@
   public void testRanges() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = newField("field", "", StringField.TYPE_STORED);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableSorting("collated");
     Collator collator = Collator.getInstance(); // uses -Dtests.locale
     if (random().nextBoolean()) {
       collator.setStrength(Collator.PRIMARY);
     }
     ICUCollationDocValuesField collationField = new ICUCollationDocValuesField("collated", collator);
-    doc.add(field);
-    doc.add(collationField);
     
     int numDocs = atLeast(500);
     for (int i = 0; i < numDocs; i++) {
+      Document doc = iw.newDocument();
       String value = TestUtil.randomSimpleString(random());
-      field.setStringValue(value);
+      doc.addAtom("field", value);
+      doc.add(collationField);
       collationField.setStringValue(value);
       iw.addDocument(doc);
     }
@@ -109,7 +107,7 @@
       String end = TestUtil.randomSimpleString(random());
       BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray());
       BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray());
-      Query query = new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true));
+      Query query = new ConstantScoreQuery(DocValuesRangeFilter.newBytesRefRange("collated", lowerVal, upperVal, true, true, null));
       doTestRanges(is, start, end, query, collator);
     }
     
@@ -123,7 +121,7 @@
     // positive test
     TopDocs docs = is.search(query, is.getIndexReader().maxDoc());
     for (ScoreDoc doc : docs.scoreDocs) {
-      String value = is.doc(doc.doc).get("field");
+      String value = is.doc(doc.doc).getString("field");
       assertTrue(collator.compare(value, startPoint) >= 0);
       assertTrue(collator.compare(value, endPoint) <= 0);
     }
@@ -134,7 +132,7 @@
     bq.add(query, Occur.MUST_NOT);
     docs = is.search(bq, is.getIndexReader().maxDoc());
     for (ScoreDoc doc : docs.scoreDocs) {
-      String value = is.doc(doc.doc).get("field");
+      String value = is.doc(doc.doc).getString("field");
       assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0);
     }
   }
diff --git a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
index 892b35a..52eb974 100644
--- a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
+++ b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
@@ -17,15 +17,15 @@
  * limitations under the License.
  */
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.TopDocs;
@@ -35,9 +35,6 @@
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.Map;
-
 /**
  * Testcase for {@link UIMABaseAnalyzer}
  */
@@ -70,11 +67,11 @@
     Directory dir = new RAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add the first doc
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     String dummyTitle = "this is a dummy title ";
-    doc.add(new TextField("title", dummyTitle, Field.Store.YES));
+    doc.addLargeText("title", dummyTitle);
     String dummyContent = "there is some content written here";
-    doc.add(new TextField("contents", dummyContent, Field.Store.YES));
+    doc.addLargeText("contents", dummyContent);
     writer.addDocument(doc);
     writer.commit();
 
@@ -83,7 +80,7 @@
     IndexSearcher indexSearcher = newSearcher(directoryReader);
     TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
     assertTrue(result.totalHits > 0);
-    StoredDocument d = indexSearcher.doc(result.scoreDocs[0].doc);
+    Document d = indexSearcher.doc(result.scoreDocs[0].doc);
     assertNotNull(d);
     assertNotNull(d.getField("title"));
     assertEquals(dummyTitle, d.getField("title").stringValue());
@@ -91,11 +88,11 @@
     assertEquals(dummyContent, d.getField("contents").stringValue());
 
     // add a second doc
-    doc = new Document();
+    doc = writer.newDocument();
     String dogmasTitle = "dogmas";
-    doc.add(new TextField("title", dogmasTitle, Field.Store.YES));
+    doc.addLargeText("title", dogmasTitle);
     String dogmasContents = "white men can't jump";
-    doc.add(new TextField("contents", dogmasContents, Field.Store.YES));
+    doc.addLargeText("contents", dogmasContents);
     writer.addDocument(doc);
     writer.commit();
 
@@ -103,7 +100,7 @@
     directoryReader = DirectoryReader.open(dir);
     indexSearcher = newSearcher(directoryReader);
     result = indexSearcher.search(new MatchAllDocsQuery(), 2);
-    StoredDocument d1 = indexSearcher.doc(result.scoreDocs[1].doc);
+    Document d1 = indexSearcher.doc(result.scoreDocs[1].doc);
     assertNotNull(d1);
     assertNotNull(d1.getField("title"));
     assertEquals(dogmasTitle, d1.getField("title").stringValue());
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index a73bcc4..d86d733 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -37,24 +37,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -140,7 +127,7 @@
     IndexWriterConfig conf = new IndexWriterConfig(analyzer)
       .setMergePolicy(mp).setUseCompoundFile(false);
     IndexWriter writer = new IndexWriter(dir, conf);
-    LineFileDocs docs = new LineFileDocs(null, true);
+    LineFileDocs docs = new LineFileDocs(writer, null);
     for(int i=0;i<50;i++) {
       writer.addDocument(docs.nextDoc());
     }
@@ -171,18 +158,23 @@
     IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random()))
       .setUseCompoundFile(false).setMergePolicy(NoMergePolicy.INSTANCE);
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv1");
+    fieldTypes.disableSorting("bdv1_c");
+    fieldTypes.disableSorting("bdv2");
+    fieldTypes.disableSorting("bdv2_c");
     // create an index w/ few doc-values fields, some with updates and some without
     for (int i = 0; i < 30; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "" + i, Field.Store.NO));
-      doc.add(new NumericDocValuesField("ndv1", i));
-      doc.add(new NumericDocValuesField("ndv1_c", i*2));
-      doc.add(new NumericDocValuesField("ndv2", i*3));
-      doc.add(new NumericDocValuesField("ndv2_c", i*6));
-      doc.add(new BinaryDocValuesField("bdv1", toBytes(i)));
-      doc.add(new BinaryDocValuesField("bdv1_c", toBytes(i*2)));
-      doc.add(new BinaryDocValuesField("bdv2", toBytes(i*3)));
-      doc.add(new BinaryDocValuesField("bdv2_c", toBytes(i*6)));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "" + i);
+      doc.addInt("ndv1", i);
+      doc.addInt("ndv1_c", i*2);
+      doc.addInt("ndv2", i*3);
+      doc.addInt("ndv2_c", i*6);
+      doc.addBinary("bdv1", toBytes(i));
+      doc.addBinary("bdv1_c", toBytes(i*2));
+      doc.addBinary("bdv2", toBytes(i*3));
+      doc.addBinary("bdv2_c", toBytes(i*6));
       writer.addDocument(doc);
       if ((i+1) % 10 == 0) {
         writer.commit(); // flush every 10 docs
@@ -627,13 +619,13 @@
 
     for(int i=0;i<35;i++) {
       if (liveDocs.get(i)) {
-        StoredDocument d = reader.document(i);
-        List<StorableField> fields = d.getFields();
+        Document d = reader.document(i);
+        List<IndexableField> fields = d.getFields();
         boolean isProxDoc = d.getField("content3") == null;
         if (isProxDoc) {
           final int numFields = is40Index ? 7 : 5;
           assertEquals(numFields, fields.size());
-          StorableField f =  d.getField("id");
+          IndexableField f =  d.getField("id");
           assertEquals(""+i, f.stringValue());
 
           f = d.getField("utf8");
@@ -684,7 +676,7 @@
       }
       
       for (int i=0;i<35;i++) {
-        int id = Integer.parseInt(reader.document(i).get("id"));
+        int id = Integer.parseInt(reader.document(i).getString("id"));
         assertEquals(id, dvByte.get(i));
         
         byte bytes[] = new byte[] {
@@ -729,8 +721,8 @@
     ScoreDoc[] hits = searcher.search(new TermQuery(new Term(new String("content"), "aaa")), null, 1000).scoreDocs;
 
     // First document should be #0
-    StoredDocument d = searcher.getIndexReader().document(hits[0].doc);
-    assertEquals("didn't get the right document first", "0", d.get("id"));
+    Document d = searcher.getIndexReader().document(hits[0].doc);
+    assertEquals("didn't get the right document first", "0", d.getString("id"));
 
     doTestHits(hits, 34, searcher.getIndexReader());
     
@@ -773,8 +765,8 @@
     IndexReader reader = DirectoryReader.open(dir);
     IndexSearcher searcher = newSearcher(reader);
     ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
-    StoredDocument d = searcher.getIndexReader().document(hits[0].doc);
-    assertEquals("wrong first document", "0", d.get("id"));
+    Document d = searcher.getIndexReader().document(hits[0].doc);
+    assertEquals("wrong first document", "0", d.getString("id"));
     doTestHits(hits, 44, searcher.getIndexReader());
     reader.close();
 
@@ -801,8 +793,8 @@
     IndexSearcher searcher = newSearcher(reader);
     ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
     assertEquals("wrong number of hits", 34, hits.length);
-    StoredDocument d = searcher.doc(hits[0].doc);
-    assertEquals("wrong first document", "0", d.get("id"));
+    Document d = searcher.doc(hits[0].doc);
+    assertEquals("wrong first document", "0", d.getString("id"));
     reader.close();
 
     // fully merge
@@ -862,56 +854,56 @@
     dir.close();
   }
 
-  private void addDoc(IndexWriter writer, int id) throws IOException
-  {
-    Document doc = new Document();
-    doc.add(new TextField("content", "aaa", Field.Store.NO));
-    doc.add(new StringField("id", Integer.toString(id), Field.Store.YES));
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
-    doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
-    doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
-    doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
+  private void addDoc(IndexWriter writer, int id) throws IOException {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"autf8", "utf8", "content2", "fie\u2C77ld", "content5", "content6"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    fieldTypes.setIndexOptions("content6", IndexOptions.DOCS_AND_FREQS);
+    fieldTypes.disableSorting("dvBytesDerefFixed");
+    fieldTypes.disableSorting("dvBytesDerefVar");
+    fieldTypes.disableSorting("dvBytesStraightFixed");
+    fieldTypes.disableSorting("dvBytesStraightVar");
+    fieldTypes.setMultiValued("dvSortedSet");
+    fieldTypes.setMultiValued("dvSortedNumeric");
+
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addAtom("id", Integer.toString(id));
+    doc.addLargeText("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd");
+    doc.addLargeText("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd");
+    doc.addLargeText("content2", "here is more content with aaa aaa aaa");
+    doc.addLargeText("fie\u2C77ld", "field with non-ascii name");
     // add numeric fields, to test if flex preserves encoding
-    doc.add(new IntField("trieInt", id, Field.Store.NO));
-    doc.add(new LongField("trieLong", (long) id, Field.Store.NO));
+    // nocommit get these into back compat index
+    //doc.add(new IntField("trieInt", id, Field.Store.NO));
+    //doc.add(new LongField("trieLong", (long) id, Field.Store.NO));
     // add docvalues fields
-    doc.add(new NumericDocValuesField("dvByte", (byte) id));
+    doc.addInt("dvByte", (byte) id);
     byte bytes[] = new byte[] {
       (byte)(id >>> 24), (byte)(id >>> 16),(byte)(id >>> 8),(byte)id
     };
     BytesRef ref = new BytesRef(bytes);
-    doc.add(new BinaryDocValuesField("dvBytesDerefFixed", ref));
-    doc.add(new BinaryDocValuesField("dvBytesDerefVar", ref));
-    doc.add(new SortedDocValuesField("dvBytesSortedFixed", ref));
-    doc.add(new SortedDocValuesField("dvBytesSortedVar", ref));
-    doc.add(new BinaryDocValuesField("dvBytesStraightFixed", ref));
-    doc.add(new BinaryDocValuesField("dvBytesStraightVar", ref));
-    doc.add(new DoubleDocValuesField("dvDouble", (double)id));
-    doc.add(new FloatDocValuesField("dvFloat", (float)id));
-    doc.add(new NumericDocValuesField("dvInt", id));
-    doc.add(new NumericDocValuesField("dvLong", id));
-    doc.add(new NumericDocValuesField("dvPacked", id));
-    doc.add(new NumericDocValuesField("dvShort", (short)id));
-    doc.add(new SortedSetDocValuesField("dvSortedSet", ref));
-    doc.add(new SortedNumericDocValuesField("dvSortedNumeric", id));
+    doc.addBinary("dvBytesDerefFixed", ref);
+    doc.addBinary("dvBytesDerefVar", ref);
+    doc.addAtom("dvBytesSortedFixed", ref);
+    doc.addAtom("dvBytesSortedVar", ref);
+    doc.addBinary("dvBytesStraightFixed", ref);
+    doc.addBinary("dvBytesStraightVar", ref);
+    doc.addDouble("dvDouble", (double) id);
+    doc.addFloat("dvFloat", (float) id);
+    doc.addInt("dvInt", id);
+    doc.addLong("dvLong", id);
+    doc.addInt("dvPacked", id);
+    doc.addInt("dvShort", (short) id);
+    doc.addAtom("dvSortedSet", ref);
+    doc.addInt("dvSortedNumeric", id);
     // a field with both offsets and term vectors for a cross-check
-    FieldType customType3 = new FieldType(TextField.TYPE_STORED);
-    customType3.setStoreTermVectors(true);
-    customType3.setStoreTermVectorPositions(true);
-    customType3.setStoreTermVectorOffsets(true);
-    customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    doc.add(new Field("content5", "here is more content with aaa aaa aaa", customType3));
+    doc.addLargeText("content5", "here is more content with aaa aaa aaa");
     // a field that omits only positions
-    FieldType customType4 = new FieldType(TextField.TYPE_STORED);
-    customType4.setStoreTermVectors(true);
-    customType4.setStoreTermVectorPositions(false);
-    customType4.setStoreTermVectorOffsets(true);
-    customType4.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    doc.add(new Field("content6", "here is more content with aaa aaa aaa", customType4));
+    doc.addLargeText("content6", "here is more content with aaa aaa aaa");
     // TODO: 
     //   index different norms types via similarity (we use a random one currently?!)
     //   remove any analyzer randomness, explicitly add payloads for certain fields.
@@ -919,16 +911,9 @@
   }
 
   private void addNoProxDoc(IndexWriter writer) throws IOException {
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setIndexOptions(IndexOptions.DOCS);
-    Field f = new Field("content3", "aaa", customType);
-    doc.add(f);
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
-    customType2.setIndexOptions(IndexOptions.DOCS);
-    f = new Field("content4", "aaa", customType2);
-    doc.add(f);
+    Document doc = writer.newDocument();
+    doc.addAtom("content3", "aaa");
+    doc.addAtom("content4", "aaa");
     writer.addDocument(doc);
   }
 
@@ -990,7 +975,7 @@
     // first create a little index with the current code and get the version
     Directory currentDir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), currentDir);
-    riw.addDocument(new Document());
+    riw.addDocument(riw.newDocument());
     riw.close();
     DirectoryReader ir = DirectoryReader.open(currentDir);
     SegmentReader air = (SegmentReader)ir.leaves().get(0).reader();
@@ -1031,7 +1016,9 @@
       verifyUsesDefaultCodec(dir, name);
     }
   }
-  
+
+  // nocommit put back
+  /*
   public void testNumericFields() throws Exception {
     for (String name : oldNames) {
       
@@ -1042,13 +1029,13 @@
       for (int id=10; id<15; id++) {
         ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", NumericUtils.PRECISION_STEP_DEFAULT_32, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
         assertEquals("wrong number of hits", 1, hits.length);
-        StoredDocument d = searcher.doc(hits[0].doc);
-        assertEquals(String.valueOf(id), d.get("id"));
+        Document d = searcher.doc(hits[0].doc);
+        assertEquals(String.valueOf(id), d.getString("id"));
         
         hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", NumericUtils.PRECISION_STEP_DEFAULT, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
         assertEquals("wrong number of hits", 1, hits.length);
         d = searcher.doc(hits[0].doc);
-        assertEquals(String.valueOf(id), d.get("id"));
+        assertEquals(String.valueOf(id), d.getString("id"));
       }
       
       // check that also lower-precision fields are ok
@@ -1076,6 +1063,7 @@
       reader.close();
     }
   }
+  */
   
   private int checkAllSegmentsUpgraded(Directory dir) throws IOException {
     final SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
@@ -1302,7 +1290,7 @@
 
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                            .setOpenMode(OpenMode.APPEND));
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       DirectoryReader r = DirectoryReader.open(writer, true);
       writer.commit();
       r.close();
@@ -1321,9 +1309,9 @@
 
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                            .setOpenMode(OpenMode.APPEND));
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       writer.close();
       dir.close();
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/AbstractQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/AbstractQueryMaker.java
index eb356a8..7bfc4c2 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/AbstractQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/AbstractQueryMaker.java
@@ -15,8 +15,9 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.Query;
 import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.search.Query;
 
 /**
  * Abstract base query maker. 
@@ -33,12 +34,11 @@
     qnum = 0;
   }
 
-  protected abstract Query[] prepareQueries() throws Exception;
+  protected abstract Query[] prepareQueries(FieldTypes fieldTypes) throws Exception;
 
   @Override
   public void setConfig(Config config) throws Exception {
     this.config = config;
-    queries = prepareQueries();
   }
 
   @Override
@@ -55,7 +55,10 @@
   }
 
   @Override
-  public Query makeQuery() throws Exception {
+  public Query makeQuery(FieldTypes fieldTypes) throws Exception {
+    if (queries == null) {
+      queries = prepareQueries(fieldTypes);
+    }
     return queries[nextQnum()];
   }
   
@@ -71,7 +74,7 @@
   * @see org.apache.lucene.benchmark.byTask.feeds.QueryMaker#makeQuery(int)
   */
   @Override
-  public Query makeQuery(int size) throws Exception {
+  public Query makeQuery(FieldTypes fieldTypes, int size) throws Exception {
     throw new Exception(this+".makeQuery(int size) is not supported!");
   }
 }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java
index 94b2eda..bb29fe2 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java
@@ -35,15 +35,8 @@
 
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexWriter;
 
 /**
  * Creates {@link Document} objects. Uses a {@link ContentSource} to generate
@@ -73,8 +66,6 @@
  * stored with offsets (default <b>false</b>).
  * <li><b>doc.store.body.bytes</b> - specifies whether to store the raw bytes of
  * the document's content in the document (default <b>false</b>).
- * <li><b>doc.reuse.fields</b> - specifies whether Field and Document objects
- * should be reused (default <b>true</b>).
  * <li><b>doc.index.props</b> - specifies whether the properties returned by
  * <li><b>doc.random.id.limit</b> - if specified, docs will be assigned random
  * IDs from 0 to this limit.  This is useful with UpdateDoc
@@ -93,92 +84,10 @@
   private int updateDocIDLimit;
 
   /**
-   * Document state, supports reuse of field instances
-   * across documents (see <code>reuseFields</code> parameter).
+   * Document state.
    */
   protected static class DocState {
-    
-    private final Map<String,Field> fields;
-    private final Map<String,Field> numericFields;
-    private final boolean reuseFields;
-    final Document doc;
     DocData docData = new DocData();
-    
-    public DocState(boolean reuseFields, FieldType ft, FieldType bodyFt) {
-
-      this.reuseFields = reuseFields;
-      
-      if (reuseFields) {
-        fields =  new HashMap<>();
-        numericFields = new HashMap<>();
-        
-        // Initialize the map with the default fields.
-        fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyFt));
-        fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", ft));
-        fields.put(DATE_FIELD, new Field(DATE_FIELD, "", ft));
-        fields.put(ID_FIELD, new StringField(ID_FIELD, "", Field.Store.YES));
-        fields.put(NAME_FIELD, new Field(NAME_FIELD, "", ft));
-
-        numericFields.put(DATE_MSEC_FIELD, new LongField(DATE_MSEC_FIELD, 0L, Field.Store.NO));
-        numericFields.put(TIME_SEC_FIELD, new IntField(TIME_SEC_FIELD, 0, Field.Store.NO));
-        
-        doc = new Document();
-      } else {
-        numericFields = null;
-        fields = null;
-        doc = null;
-      }
-    }
-
-    /**
-     * Returns a field corresponding to the field name. If
-     * <code>reuseFields</code> was set to true, then it attempts to reuse a
-     * Field instance. If such a field does not exist, it creates a new one.
-     */
-    Field getField(String name, FieldType ft) {
-      if (!reuseFields) {
-        return new Field(name, "", ft);
-      }
-      
-      Field f = fields.get(name);
-      if (f == null) {
-        f = new Field(name, "", ft);
-        fields.put(name, f);
-      }
-      return f;
-    }
-
-    Field getNumericField(String name, NumericType type) {
-      Field f;
-      if (reuseFields) {
-        f = numericFields.get(name);
-      } else {
-        f = null;
-      }
-      
-      if (f == null) {
-        switch(type) {
-        case INT:
-          f = new IntField(name, 0, Field.Store.NO);
-          break;
-        case LONG:
-          f = new LongField(name, 0L, Field.Store.NO);
-          break;
-        case FLOAT:
-          f = new FloatField(name, 0.0F, Field.Store.NO);
-          break;
-        case DOUBLE:
-          f = new DoubleField(name, 0.0, Field.Store.NO);
-          break;
-        default:
-          throw new AssertionError("Cannot get here");
-        }
-        if (reuseFields) {
-          numericFields.put(name, f);
-        }
-      }
-      return f;
-    }
   }
   
   private boolean storeBytes = false;
@@ -193,7 +102,7 @@
   }
 
   // leftovers are thread local, because it is unsafe to share residues between threads
-  private ThreadLocal<LeftOver> leftovr = new ThreadLocal<>();
+  private ThreadLocal<LeftOver> leftover = new ThreadLocal<>();
   private ThreadLocal<DocState> docState = new ThreadLocal<>();
   private ThreadLocal<DateUtil> dateParsers = new ThreadLocal<>();
 
@@ -208,14 +117,14 @@
 
   protected Config config;
 
-  protected FieldType valType;
-  protected FieldType bodyValType;
-    
   protected ContentSource source;
   protected boolean reuseFields;
   protected boolean indexProperties;
-  
+  private boolean tokenized;
+  private boolean bodyTokenized;
+
   private final AtomicInteger numDocsCreated = new AtomicInteger();
+  private IndexWriter schemaApplied;
 
   public DocMaker() {
   }
@@ -223,17 +132,12 @@
   // create a doc
   // use only part of the body, modify it to keep the rest (or use all if size==0).
   // reset the docdata properties so they are not added more than once.
-  private Document createDocument(DocData docData, int size, int cnt) throws UnsupportedEncodingException {
+  private Document createDocument(IndexWriter w, DocData docData, int size, int cnt) throws UnsupportedEncodingException {
+    applySchema(w);
 
     final DocState ds = getDocState();
-    final Document doc = reuseFields ? ds.doc : new Document();
-    doc.clear();
+    final Document doc = w.newDocument();
     
-    // Set ID_FIELD
-    FieldType ft = new FieldType(valType);
-    ft.setStored(true);
-
-    Field idField = ds.getField(ID_FIELD, ft);
     int id;
     if (r != null) {
       id = r.nextInt(updateDocIDLimit);
@@ -243,16 +147,21 @@
         id = numDocsCreated.getAndIncrement();
       }
     }
-    idField.setStringValue(Integer.toString(id));
-    doc.add(idField);
+    if (tokenized) {
+      doc.addLargeText(ID_FIELD, Integer.toString(id));
+    } else {
+      doc.addAtom(ID_FIELD, Integer.toString(id));
+    }
     
     // Set NAME_FIELD
     String name = docData.getName();
     if (name == null) name = "";
     name = cnt < 0 ? name : name + "_" + cnt;
-    Field nameField = ds.getField(NAME_FIELD, valType);
-    nameField.setStringValue(name);
-    doc.add(nameField);
+    if (tokenized) {
+      doc.addLargeText(NAME_FIELD, name);
+    } else {
+      doc.addAtom(NAME_FIELD, name);
+    }
     
     // Set DATE_FIELD
     DateUtil util = dateParsers.get();
@@ -269,31 +178,30 @@
     } else {
       dateString = "";
     }
-    Field dateStringField = ds.getField(DATE_FIELD, valType);
-    dateStringField.setStringValue(dateString);
-    doc.add(dateStringField);
+    if (tokenized) {
+      doc.addLargeText(DATE_FIELD, dateString);
+    } else {
+      doc.addAtom(DATE_FIELD, dateString);
+    }
 
     if (date == null) {
       // just set to right now
       date = new Date();
     }
 
-    Field dateField = ds.getNumericField(DATE_MSEC_FIELD, NumericType.LONG);
-    dateField.setLongValue(date.getTime());
-    doc.add(dateField);
+    doc.addLong(DATE_MSEC_FIELD, date.getTime());
 
     util.cal.setTime(date);
     final int sec = util.cal.get(Calendar.HOUR_OF_DAY)*3600 + util.cal.get(Calendar.MINUTE)*60 + util.cal.get(Calendar.SECOND);
-
-    Field timeSecField = ds.getNumericField(TIME_SEC_FIELD, NumericType.INT);
-    timeSecField.setIntValue(sec);
-    doc.add(timeSecField);
+    doc.addLong(TIME_SEC_FIELD, sec);
     
     // Set TITLE_FIELD
     String title = docData.getTitle();
-    Field titleField = ds.getField(TITLE_FIELD, valType);
-    titleField.setStringValue(title == null ? "" : title);
-    doc.add(titleField);
+    if (tokenized) {
+      doc.addShortText(TITLE_FIELD, title == null ? "" : title);
+    } else {
+      doc.addAtom(TITLE_FIELD, title == null ? "" : title);
+    }
     
     String body = docData.getBody();
     if (body != null && body.length() > 0) {
@@ -312,14 +220,14 @@
         bdy = body.substring(0, size); // use part
         docData.setBody(body.substring(size)); // some left
       }
-      Field bodyField = ds.getField(BODY_FIELD, bodyValType);
-      bodyField.setStringValue(bdy);
-      doc.add(bodyField);
+      if (bodyTokenized) {
+        doc.addLargeText(BODY_FIELD, bdy);
+      } else {
+        doc.addAtom(BODY_FIELD, bdy);
+      }
       
       if (storeBytes) {
-        Field bytesField = ds.getField(BYTES_FIELD, StringField.TYPE_STORED);
-        bytesField.setBytesValue(bdy.getBytes(StandardCharsets.UTF_8));
-        doc.add(bytesField);
+        doc.addBinary(BYTES_FIELD, bdy.getBytes(StandardCharsets.UTF_8));
       }
     }
 
@@ -327,9 +235,11 @@
       Properties props = docData.getProps();
       if (props != null) {
         for (final Map.Entry<Object,Object> entry : props.entrySet()) {
-          Field f = ds.getField((String) entry.getKey(), valType);
-          f.setStringValue((String) entry.getValue());
-          doc.add(f);
+          if (tokenized) {
+            doc.addLargeText((String) entry.getKey(), (String) entry.getValue());
+          } else {
+            doc.addAtom((String) entry.getKey(), (String) entry.getValue());
+          }
         }
         docData.setProps(null);
       }
@@ -340,13 +250,13 @@
   }
 
   private void resetLeftovers() {
-    leftovr.set(null);
+    leftover.set(null);
   }
 
   protected DocState getDocState() {
     DocState ds = docState.get();
     if (ds == null) {
-      ds = new DocState(reuseFields, valType, bodyValType);
+      ds = new DocState();
       docState.set(ds);
     }
     return ds;
@@ -369,10 +279,10 @@
    * <code>reuseFields</code> was set to true, it will reuse {@link Document}
    * and {@link Field} instances.
    */
-  public Document makeDocument() throws Exception {
+  public Document makeDocument(IndexWriter w) throws Exception {
     resetLeftovers();
     DocData docData = source.getNextDocData(getDocState().docData);
-    Document doc = createDocument(docData, 0, -1);
+    Document doc = createDocument(w, docData, 0, -1);
     return doc;
   }
 
@@ -380,8 +290,8 @@
    * Same as {@link #makeDocument()}, only this method creates a document of the
    * given size input by <code>size</code>.
    */
-  public Document makeDocument(int size) throws Exception {
-    LeftOver lvr = leftovr.get();
+  public Document makeDocument(IndexWriter w, int size) throws Exception {
+    LeftOver lvr = leftover.get();
     if (lvr == null || lvr.docdata == null || lvr.docdata.getBody() == null
         || lvr.docdata.getBody().length() == 0) {
       resetLeftovers();
@@ -395,13 +305,13 @@
       cnt = 0;
       dd.setBody(dd2.getBody() + dd.getBody());
     }
-    Document doc = createDocument(dd, size, cnt);
+    Document doc = createDocument(w, dd, size, cnt);
     if (dd.getBody() == null || dd.getBody().length() == 0) {
       resetLeftovers();
     } else {
       if (lvr == null) {
         lvr = new LeftOver();
-        leftovr.set(lvr);
+        leftover.set(lvr);
       }
       lvr.docdata = dd;
       lvr.cnt = ++cnt;
@@ -418,43 +328,101 @@
     numDocsCreated.set(0);
     resetLeftovers();
   }
+
+  private void applySchema(IndexWriter w) {
+    if (schemaApplied == w) {
+      return;
+    }
+    schemaApplied = w;
+
+    boolean stored = config.get("doc.stored", false);
+    boolean bodyStored = config.get("doc.body.stored", stored);
+    boolean norms = config.get("doc.tokenized.norms", false);
+    boolean bodyNorms = config.get("doc.body.tokenized.norms", true);
+    boolean termVec = config.get("doc.term.vector", false);
+    boolean termVecPositions = config.get("doc.term.vector.positions", false);
+    boolean termVecOffsets = config.get("doc.term.vector.offsets", false);
+    tokenized = config.get("doc.tokenized", true);
+    bodyTokenized = config.get("doc.body.tokenized", tokenized);
+    if (bodyTokenized == false) {
+      bodyNorms = false;
+    }
+
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+
+    for(String fieldName : new String[] {ID_FIELD,
+                                         NAME_FIELD,
+                                         DATE_FIELD,
+                                         TITLE_FIELD}) {
+      if (stored) {
+        fieldTypes.enableStored(fieldName);
+      } else {
+        fieldTypes.disableStored(fieldName);
+      }
+
+      if (norms) {
+        fieldTypes.enableNorms(fieldName);
+      } else {
+        fieldTypes.disableNorms(fieldName);
+      }
+
+      if (termVec) {
+        fieldTypes.enableTermVectors(fieldName);
+        if (termVecPositions) {
+          fieldTypes.enableTermVectorPositions(fieldName);
+        } else {
+          fieldTypes.disableTermVectorPositions(fieldName);
+        }
+        if (termVecOffsets) {
+          fieldTypes.enableTermVectorOffsets(fieldName);
+        } else {
+          fieldTypes.disableTermVectorOffsets(fieldName);
+        }
+      } else {
+        fieldTypes.disableTermVectors(fieldName);
+      }
+    }
+
+    if (bodyStored) {
+      fieldTypes.enableStored(BODY_FIELD);
+    } else {
+      fieldTypes.disableStored(BODY_FIELD);
+    }
+
+    if (bodyNorms) {
+      fieldTypes.enableNorms(BODY_FIELD);
+    } else {
+      fieldTypes.disableNorms(BODY_FIELD);
+    }
+
+    if (termVec) {
+      fieldTypes.enableTermVectors(BODY_FIELD);
+      if (termVecPositions) {
+        fieldTypes.enableTermVectorPositions(BODY_FIELD);
+      } else {
+        fieldTypes.disableTermVectorPositions(BODY_FIELD);
+      }
+      if (termVecOffsets) {
+        fieldTypes.enableTermVectorOffsets(BODY_FIELD);
+      } else {
+        fieldTypes.disableTermVectorOffsets(BODY_FIELD);
+      }
+    } else {
+      fieldTypes.disableTermVectors(BODY_FIELD);
+    }
+  }
   
   /** Set the configuration parameters of this doc maker. */
   public void setConfig(Config config, ContentSource source) {
     this.config = config;
     this.source = source;
 
-    boolean stored = config.get("doc.stored", false);
-    boolean bodyStored = config.get("doc.body.stored", stored);
-    boolean tokenized = config.get("doc.tokenized", true);
-    boolean bodyTokenized = config.get("doc.body.tokenized", tokenized);
-    boolean norms = config.get("doc.tokenized.norms", false);
-    boolean bodyNorms = config.get("doc.body.tokenized.norms", true);
-    boolean termVec = config.get("doc.term.vector", false);
-    boolean termVecPositions = config.get("doc.term.vector.positions", false);
-    boolean termVecOffsets = config.get("doc.term.vector.offsets", false);
-    
-    valType = new FieldType(TextField.TYPE_NOT_STORED);
-    valType.setStored(stored);
-    valType.setTokenized(tokenized);
-    valType.setOmitNorms(!norms);
-    valType.setStoreTermVectors(termVec);
-    valType.setStoreTermVectorPositions(termVecPositions);
-    valType.setStoreTermVectorOffsets(termVecOffsets);
-    valType.freeze();
-
-    bodyValType = new FieldType(TextField.TYPE_NOT_STORED);
-    bodyValType.setStored(bodyStored);
-    bodyValType.setTokenized(bodyTokenized);
-    bodyValType.setOmitNorms(!bodyNorms);
-    bodyValType.setStoreTermVectors(termVec);
-    bodyValType.setStoreTermVectorPositions(termVecPositions);
-    bodyValType.setStoreTermVectorOffsets(termVecOffsets);
-    bodyValType.freeze();
-
     storeBytes = config.get("doc.store.body.bytes", false);
     
-    reuseFields = config.get("doc.reuse.fields", true);
+    if (config.get("doc.reuse.fields", false)) {
+      throw new IllegalStateException("field reuse is no longer supported");
+    }
 
     // In a multi-rounds run, it is important to reset DocState since settings
     // of fields may change between rounds, and this is the only way to reset
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
index 606cc79..60af63c 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
@@ -23,6 +23,8 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.MultiTermQuery;
@@ -32,7 +34,6 @@
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
-import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 
 /**
  * A QueryMaker that uses common and uncommon actual Wikipedia queries for
@@ -122,7 +123,7 @@
   }
 
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
     // analyzer (default is standard analyzer)
     Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer", StandardAnalyzer.class.getName()));
 
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
index 622018b..a4ffa00 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
@@ -1,12 +1,5 @@
 package org.apache.lucene.benchmark.byTask.feeds;
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.queryparser.classic.QueryParser;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
-import org.apache.lucene.util.IOUtils;
-
 import java.io.*;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
@@ -15,6 +8,14 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queryparser.classic.ParseException;
+import org.apache.lucene.queryparser.classic.QueryParser;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.IOUtils;
+
 /**
  * Copyright 2004 The Apache Software Foundation
  * <p/>
@@ -49,7 +50,7 @@
 
 
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
 
     Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer",
             "org.apache.lucene.analysis.standard.StandardAnalyzer"));
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
index 731155a..ee79b8d 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
 import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.Query;
 import com.ibm.icu.text.RuleBasedNumberFormat;
@@ -40,12 +41,12 @@
                                                                        RuleBasedNumberFormat.SPELLOUT);
 
   @Override
-  public Query makeQuery(int size) throws Exception {
+  public Query makeQuery(FieldTypes fieldTypes, int size) throws Exception {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  public synchronized Query makeQuery() throws Exception {
+  public synchronized Query makeQuery(FieldTypes fieldTypes) throws Exception {
     return parser.parse("" + rnbf.format(getNextCounter()) + "");
   }
 
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
index f4b4668..b6a29c3 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
@@ -18,8 +18,9 @@
  */
 
 
-import org.apache.lucene.search.Query;
 import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.search.Query;
 
 
 /**
@@ -32,13 +33,13 @@
    * @param size the size of the query - number of terms, etc.
    * @exception Exception if cannot make the query, or if size &gt; 0 was specified but this feature is not supported.
    */ 
-  public Query makeQuery (int size) throws Exception;
+  public Query makeQuery(FieldTypes fieldTypes, int size) throws Exception;
 
   /** Create the next query */ 
-  public Query makeQuery () throws Exception;
+  public Query makeQuery(FieldTypes fieldTypes) throws Exception;
 
   /** Set the properties */
-  public void setConfig (Config config) throws Exception;
+  public void setConfig(Config config) throws Exception;
   
   /** Reset inputs so that the test run would behave, input wise, as if it just started. */
   public void resetInputs();
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
index 1a57a05..1715255 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
@@ -17,7 +17,13 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.Query;
@@ -26,11 +32,6 @@
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
-import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
 
 
 /**
@@ -102,7 +103,7 @@
   }
   
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
     // analyzer (default is standard analyzer)
     Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer",
     "org.apache.lucene.analysis.standard.StandardAnalyzer")); 
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
index 0a375d0..6c91220 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
@@ -17,16 +17,17 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
-
-import java.util.ArrayList;
 
 /**
  * A QueryMaker that makes queries for a collection created 
@@ -42,7 +43,7 @@
    * @throws Exception if cannot prepare the queries.
    */
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
     // analyzer (default is standard analyzer)
     Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer",
         "org.apache.lucene.analysis.standard.StandardAnalyzer")); 
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java
index 6a5730c..9e88f70 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java
@@ -20,6 +20,7 @@
 import java.util.ArrayList;
 import java.util.StringTokenizer;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -33,7 +34,7 @@
    * @see org.apache.lucene.benchmark.byTask.feeds.SimpleQueryMaker#prepareQueries()
    */
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
     // extract some 100 words from doc text to an array
     String words[];
     ArrayList<String> w = new ArrayList<>();
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialDocMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialDocMaker.java
index f8853f2..1b7e096 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialDocMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialDocMaker.java
@@ -17,24 +17,24 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.context.SpatialContextFactory;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.benchmark.byTask.utils.Config;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.spatial.SpatialStrategy;
-import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
-import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
-import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTreeFactory;
-
 import java.util.AbstractMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.spatial.SpatialStrategy;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTreeFactory;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.context.SpatialContextFactory;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
+
 /**
  * Indexes spatial data according to a configured {@link SpatialStrategy} with optional
  * shape transformation via a configured {@link ShapeConverter}. The converter can turn points into
@@ -162,23 +162,20 @@
   }
 
   @Override
-  public Document makeDocument() throws Exception {
+  public Document makeDocument(IndexWriter w) throws Exception {
 
     DocState docState = getDocState();
 
-    Document doc = super.makeDocument();
+    Document doc = super.makeDocument(w);
 
     // Set SPATIAL_FIELD from body
     DocData docData = docState.docData;
     //   makeDocument() resets docState.getBody() so we can't look there; look in Document
-    String shapeStr = doc.getField(DocMaker.BODY_FIELD).stringValue();
+    String shapeStr = doc.getString(DocMaker.BODY_FIELD);
     Shape shape = makeShapeFromString(strategy, docData.getName(), shapeStr);
     if (shape != null) {
       shape = shapeConverter.convert(shape);
-      //index
-      for (Field f : strategy.createIndexableFields(shape)) {
-        doc.add(f);
-      }
+      strategy.addFields(doc, shape);
     }
 
     return doc;
@@ -197,7 +194,7 @@
   }
 
   @Override
-  public Document makeDocument(int size) throws Exception {
+  public Document makeDocument(IndexWriter w, int size) throws Exception {
     //TODO consider abusing the 'size' notion to number of shapes per document
     throw new UnsupportedOperationException();
   }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java
index 498e839..fa073bd 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java
@@ -17,8 +17,12 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.shape.Shape;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
 import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.CustomScoreQuery;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
@@ -29,10 +33,7 @@
 import org.apache.lucene.spatial.SpatialStrategy;
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * Reads spatial data from the body field docs from an internally created {@link LineDocSource}.
@@ -64,7 +65,7 @@
   }
 
   @Override
-  protected Query[] prepareQueries() throws Exception {
+  protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
     final int maxQueries = config.get("query.file.maxQueries", 1000);
     Config srcConfig = new Config(new Properties());
     srcConfig.set("docs.file", config.get("query.file", null));
@@ -82,7 +83,7 @@
         Shape shape = SpatialDocMaker.makeShapeFromString(strategy, docData.getName(), docData.getBody());
         if (shape != null) {
           shape = shapeConverter.convert(shape);
-          queries.add(makeQueryFromShape(shape));
+          queries.add(makeQueryFromShape(fieldTypes, shape));
         } else {
           i--;//skip
         }
@@ -95,20 +96,19 @@
     return queries.toArray(new Query[queries.size()]);
   }
 
-
-  protected Query makeQueryFromShape(Shape shape) {
+  protected Query makeQueryFromShape(FieldTypes fieldTypes, Shape shape) {
     SpatialArgs args = new SpatialArgs(operation, shape);
     if (!Double.isNaN(distErrPct))
       args.setDistErrPct(distErrPct);
 
     if (score) {
       ValueSource valueSource = strategy.makeDistanceValueSource(shape.getCenter());
-      return new CustomScoreQuery(strategy.makeQuery(args), new FunctionQuery(valueSource));
+      return new CustomScoreQuery(strategy.makeQuery(fieldTypes, args), new FunctionQuery(valueSource));
     } else {
       //strategy.makeQuery() could potentially score (isn't well defined) so instead we call
       // makeFilter() and wrap
 
-      Filter filter = strategy.makeFilter(args);
+      Filter filter = strategy.makeFilter(fieldTypes, args);
       if (filter instanceof QueryWrapperFilter) {
         return ((QueryWrapperFilter)filter).getQuery();
       } else {
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java
index b40c76f..5f00fa2 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
 
 /**
  * Add a document, optionally of a certain size.
@@ -36,35 +37,26 @@
 
   private int docSize = 0;
   
-  /** 
-   * volatile data passed between setup(), doLogic(), tearDown().
-   * the doc is created at setup() and added at doLogic(). 
-   */
-  protected Document doc = null;
-
-  @Override
-  public void setup() throws Exception {
-    super.setup();
-    DocMaker docMaker = getRunData().getDocMaker();
-    if (docSize > 0) {
-      doc = docMaker.makeDocument(docSize);
-    } else {
-      doc = docMaker.makeDocument();
-    }
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    doc = null;
-    super.tearDown();
-  }
-
   @Override
   protected String getLogMessage(int recsCount) {
     return String.format(Locale.ROOT, "added %9d docs",recsCount);
   }
+
+  protected Document doc;
   
   @Override
+  public void setup() throws Exception {
+    super.setup();
+    DocMaker docMaker = getRunData().getDocMaker();
+    IndexWriter iw = getRunData().getIndexWriter();
+    if (docSize > 0) {
+      doc = docMaker.makeDocument(iw, docSize);
+    } else {
+      doc = docMaker.makeDocument(iw);
+    }
+  }
+
+  @Override
   public int doLogic() throws Exception {
     getRunData().getIndexWriter().addDocument(doc);
     return 1;
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java
index d1a3abf..cb01eef 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java
@@ -20,12 +20,11 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 
 /**
  * Abstract class for benchmarking highlighting performance
  */
 public abstract class BenchmarkHighlighter {
-  public abstract int doHighlight( IndexReader reader, int doc, String field,
-      StoredDocument document, Analyzer analyzer, String text ) throws Exception ;
+  public abstract int doHighlight(IndexReader reader, int doc, String field,
+                                  Document document, Analyzer analyzer, String text) throws Exception;
 }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
index cfd2c12..21e9317 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
@@ -195,6 +195,7 @@
       }
     }
     IndexWriter writer = new IndexWriter(runData.getDirectory(), iwc);
+    writer.getFieldTypes().disableExistsFilters();
     return writer;
   }
 }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
index b940ac4..4f6344e 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import java.util.Collection;
 import java.util.HashSet;
-
 import java.util.List;
 import java.util.Set;
 
@@ -32,17 +31,14 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.TopFieldCollector;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopScoreDocCollector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 
@@ -98,7 +94,7 @@
 
     // optionally warm and add num docs traversed to count
     if (withWarm()) {
-      StoredDocument doc = null;
+      Document doc = null;
       Bits liveDocs = MultiFields.getLiveDocs(reader);
       for (int m = 0; m < reader.maxDoc(); m++) {
         if (null == liveDocs || liveDocs.get(m)) {
@@ -110,7 +106,7 @@
 
     if (withSearch()) {
       res++;
-      Query q = queryMaker.makeQuery();
+      Query q = queryMaker.makeQuery(searcher.getIndexReader().getFieldTypes());
       Sort sort = getSort();
       TopDocs hits = null;
       final int numHits = numHits();
@@ -143,8 +139,8 @@
           System.out.println("numDocs() = " + reader.numDocs());
           for(int i=0;i<hits.scoreDocs.length;i++) {
             final int docID = hits.scoreDocs[i].doc;
-            final StoredDocument doc = reader.document(docID);
-            System.out.println("  " + i + ": doc=" + docID + " score=" + hits.scoreDocs[i].score + " " + printHitsField + " =" + doc.get(printHitsField));
+            final Document doc = reader.document(docID);
+            System.out.println("  " + i + ": doc=" + docID + " score=" + hits.scoreDocs[i].score + " " + printHitsField + " =" + doc.getString(printHitsField));
           }
         }
 
@@ -164,12 +160,12 @@
               int id = scoreDocs[m].doc;
               res++;
               if (retrieve) {
-                StoredDocument document = retrieveDoc(reader, id);
+                Document document = retrieveDoc(reader, id);
                 res += document != null ? 1 : 0;
                 if (numHighlight > 0 && m < numHighlight) {
                   Collection<String> fieldsToHighlight = getFieldsToHighlight(document);
                   for (final String field : fieldsToHighlight) {
-                    String text = document.get(field);
+                    String text = document.getString(field);
                     res += highlighter.doHighlight(reader, id, field, document, analyzer, text);
                   }
                 }
@@ -194,7 +190,7 @@
   }
 
 
-  protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
+  protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
     return ir.document(id);
   }
 
@@ -297,10 +293,10 @@
    * @param document The Document
    * @return A Collection of Field names (Strings)
    */
-  protected Collection<String> getFieldsToHighlight(StoredDocument document) {
-    List<StorableField> fields = document.getFields();
+  protected Collection<String> getFieldsToHighlight(Document document) {
+    List<IndexableField> fields = document.getFields();
     Set<String> result = new HashSet<>(fields.size());
-    for (final StorableField f : fields) {
+    for (final IndexableField f : fields) {
       result.add(f.name());
     }
     return result;
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
index f8bfaef..af7c1a6 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
@@ -26,12 +26,10 @@
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.store.RAMDirectory;
 
 /**
  * Simple task to test performance of tokenizers.  It just
@@ -46,52 +44,50 @@
 
   private int totalTokenCount = 0;
   
-  // volatile data passed between setup(), doLogic(), tearDown().
-  private Document doc = null;
-  
-  @Override
-  public void setup() throws Exception {
-    super.setup();
-    DocMaker docMaker = getRunData().getDocMaker();
-    doc = docMaker.makeDocument();
-  }
-
   @Override
   protected String getLogMessage(int recsCount) {
     return "read " + recsCount + " docs; " + totalTokenCount + " tokens";
   }
   
-  @Override
-  public void tearDown() throws Exception {
-    doc = null;
-    super.tearDown();
+  private IndexWriter privateWriter;
+
+  private IndexWriter getPrivateWriter() throws Exception {
+    if (privateWriter == null) {
+      RAMDirectory dir = new RAMDirectory();
+      privateWriter = new IndexWriter(dir, new IndexWriterConfig(getRunData().getAnalyzer()));
+    }
+    return privateWriter;
   }
 
   @Override
   public int doLogic() throws Exception {
-    List<Field> fields = doc.getFields();
-    Analyzer analyzer = getRunData().getAnalyzer();
-    int tokenCount = 0;
-    for(final Field field : fields) {
-      if (!field.fieldType().tokenized() ||
-          field instanceof IntField ||
-          field instanceof LongField ||
-          field instanceof FloatField ||
-          field instanceof DoubleField) {
-        continue;
-      }
-      
-      final TokenStream stream = field.tokenStream(analyzer, null);
-      // reset the TokenStream to the first token
-      stream.reset();
+    DocMaker docMaker = getRunData().getDocMaker();
+    IndexWriter iw = getRunData().getIndexWriter();
+    if (iw == null) {
+      iw = getPrivateWriter();
+    }
+    Document doc = docMaker.makeDocument(iw);
 
-      TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-      while(stream.incrementToken()) {
-        termAtt.fillBytesRef();
-        tokenCount++;
+    List<IndexableField> fields = doc.getFields();
+    Analyzer analyzer = iw.getFieldTypes().getIndexAnalyzer();
+    int tokenCount = 0;
+    for(final IndexableField field : fields) {
+      if (field.name().equals(DocMaker.BODY_FIELD) ||
+          field.name().equals(DocMaker.DATE_FIELD) ||
+          field.name().equals(DocMaker.TITLE_FIELD)) {
+      
+        final TokenStream stream = field.tokenStream(null);
+        // reset the TokenStream to the first token
+        stream.reset();
+
+        TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+        while(stream.incrementToken()) {
+          termAtt.fillBytesRef();
+          tokenCount++;
+        }
+        stream.end();
+        stream.close();
       }
-      stream.end();
-      stream.close();
     }
     totalTokenCount += tokenCount;
     return tokenCount;
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
index f18ce10..7c0da18 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
@@ -17,12 +17,16 @@
  * limitations under the License.
  */
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.highlight.Highlighter;
 import org.apache.lucene.search.highlight.QueryScorer;
@@ -30,11 +34,6 @@
 import org.apache.lucene.search.highlight.TextFragment;
 import org.apache.lucene.search.highlight.TokenSources;
 
-import java.util.Set;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Collections;
-
 /**
  * Search and Traverse and Retrieve docs task.  Highlight the fields in the retrieved documents.
  *
@@ -102,7 +101,7 @@
     return new BenchmarkHighlighter(){
       @Override
       public int doHighlight(IndexReader reader, int doc, String field,
-          StoredDocument document, Analyzer analyzer, String text) throws Exception {
+          Document document, Analyzer analyzer, String text) throws Exception {
         TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
         TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
         return frag != null ? frag.length : 0;
@@ -111,7 +110,7 @@
   }
 
   @Override
-  protected Collection<String> getFieldsToHighlight(StoredDocument document) {
+  protected Collection<String> getFieldsToHighlight(Document document) {
     Collection<String> result = super.getFieldsToHighlight(document);
     //if stored is false, then result will be empty, in which case just get all the param fields
     if (paramFields.isEmpty() == false && result.isEmpty() == false) {
@@ -150,4 +149,4 @@
   }
 
 
-}
\ No newline at end of file
+}
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java
index dfbbaa2..f0335ea 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DocumentStoredFieldVisitor;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 
 /**
  * Search and Traverse and Retrieve docs task using a
@@ -55,11 +54,11 @@
 
 
   @Override
-  protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
+  protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
     if (fieldsToLoad == null) {
       return ir.document(id);
     } else {
-      DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
+      DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(ir.getFieldTypes(), fieldsToLoad);
       ir.document(id, visitor);
       return visitor.getDocument();
     }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java
index 8559030..bfa52c4 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java
@@ -17,20 +17,19 @@
  * limitations under the License.
  */
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
 import org.apache.lucene.search.vectorhighlight.FieldQuery;
 
-import java.util.Set;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Collections;
-
 /**
  * Search and Traverse and Retrieve docs task.  Highlight the fields in the retrieved documents by using FastVectorHighlighter.
  *
@@ -100,7 +99,7 @@
     return new BenchmarkHighlighter(){
       @Override
       public int doHighlight(IndexReader reader, int doc, String field,
-          StoredDocument document, Analyzer analyzer, String text) throws Exception {
+          Document document, Analyzer analyzer, String text) throws Exception {
         final FieldQuery fq = highlighter.getFieldQuery( myq, reader);
         String[] fragments = highlighter.getBestFragments(fq, reader, doc, field, fragSize, maxFrags);
         return fragments != null ? fragments.length : 0;
@@ -109,7 +108,7 @@
   }
 
   @Override
-  protected Collection<String> getFieldsToHighlight(StoredDocument document) {
+  protected Collection<String> getFieldsToHighlight(Document document) {
     Collection<String> result = super.getFieldsToHighlight(document);
     //if stored is false, then result will be empty, in which case just get all the param fields
     if (paramFields.isEmpty() == false && result.isEmpty() == false) {
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/UpdateDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/UpdateDocTask.java
index 0ff3ff2..598a38c 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/UpdateDocTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/UpdateDocTask.java
@@ -20,8 +20,8 @@
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
 
 /**
  * Update a document, using IndexWriter.updateDocument,
@@ -37,33 +37,20 @@
 
   private int docSize = 0;
   
-  // volatile data passed between setup(), doLogic(), tearDown().
-  private Document doc = null;
-  
-  @Override
-  public void setup() throws Exception {
-    super.setup();
-    DocMaker docMaker = getRunData().getDocMaker();
-    if (docSize > 0) {
-      doc = docMaker.makeDocument(docSize);
-    } else {
-      doc = docMaker.makeDocument();
-    }
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    doc = null;
-    super.tearDown();
-  }
-
   @Override
   public int doLogic() throws Exception {
-    final String docID = doc.get(DocMaker.ID_FIELD);
+    Document doc;
+    DocMaker docMaker = getRunData().getDocMaker();
+    IndexWriter iw = getRunData().getIndexWriter();
+    if (docSize > 0) {
+      doc = docMaker.makeDocument(iw, docSize);
+    } else {
+      doc = docMaker.makeDocument(iw);
+    }
+    final String docID = doc.getString(DocMaker.ID_FIELD);
     if (docID == null) {
       throw new IllegalStateException("document must define the docid field");
     }
-    final IndexWriter iw = getRunData().getIndexWriter();
     iw.updateDocument(new Term(DocMaker.ID_FIELD, docID), doc);
     return 1;
   }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTask.java
index 2caee9d..4a46e04 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTask.java
@@ -13,7 +13,7 @@
 import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
 import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.IndexableField;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -63,7 +63,7 @@
   
   @Override
   protected PrintWriter lineFileOut(Document doc) {
-    StorableField titleField = doc.getField(DocMaker.TITLE_FIELD);
+    IndexableField titleField = doc.getField(DocMaker.TITLE_FIELD);
     if (titleField!=null && titleField.stringValue().startsWith("Category:")) {
       return categoryLineFileOut;
     }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java
index 4053554..393564f 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java
@@ -33,7 +33,10 @@
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.store.RAMDirectory;
 
 /**
  * A task which writes documents, one line per document. Each line is in the
@@ -155,10 +158,24 @@
   protected String getLogMessage(int recsCount) {
     return "Wrote " + recsCount + " line docs";
   }
+
+  private IndexWriter privateWriter;
+
+  private IndexWriter getPrivateWriter() throws Exception {
+    if (privateWriter == null) {
+      RAMDirectory dir = new RAMDirectory();
+      privateWriter = new IndexWriter(dir, new IndexWriterConfig(null));
+    }
+    return privateWriter;
+  }
   
   @Override
   public int doLogic() throws Exception {
-    Document doc = docSize > 0 ? docMaker.makeDocument(docSize) : docMaker.makeDocument();
+    IndexWriter iw = getRunData().getIndexWriter();
+    if (iw == null) {
+      iw = getPrivateWriter();
+    }
+    Document doc = docSize > 0 ? docMaker.makeDocument(iw,docSize) : docMaker.makeDocument(iw);
 
     Matcher matcher = threadNormalizer.get();
     if (matcher == null) {
@@ -175,7 +192,7 @@
 
     boolean sufficient = !checkSufficientFields;
     for (int i=0; i<fieldsToWrite.length; i++) {
-      StorableField f = doc.getField(fieldsToWrite[i]);
+      IndexableField f = doc.getField(fieldsToWrite[i]);
       String text = f == null ? "" : matcher.reset(f.stringValue()).replaceAll(" ").trim();
       sb.append(text).append(SEP);
       sufficient |= text.length()>0 && sufficientFields[i];
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java
index e85656a..89b74b1 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java
@@ -31,6 +31,9 @@
 import org.apache.lucene.benchmark.byTask.feeds.NoMoreDataException;
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.IOUtils;
 
 /**
@@ -92,14 +95,20 @@
     Document doc = null;
     System.out.println("Starting Extraction");
     long start = System.currentTimeMillis();
+    RAMDirectory dir = new RAMDirectory();
+    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
     try {
-      while ((doc = docMaker.makeDocument()) != null) {
-        create(doc.get(DocMaker.ID_FIELD), doc.get(DocMaker.TITLE_FIELD), doc
-            .get(DocMaker.DATE_FIELD), doc.get(DocMaker.BODY_FIELD));
+      while ((doc = docMaker.makeDocument(iw)) != null) {
+        create(doc.getString(DocMaker.ID_FIELD),
+               doc.getString(DocMaker.TITLE_FIELD),
+               doc.getString(DocMaker.DATE_FIELD),
+               doc.getString(DocMaker.BODY_FIELD));
       }
     } catch (NoMoreDataException e) {
       //continue
     }
+    iw.close();
+    dir.close();
     long finish = System.currentTimeMillis();
     System.out.println("Extraction took " + (finish - start) + " ms");
   }
@@ -150,4 +159,4 @@
     System.err.println("--discardImageOnlyDocs tells the extractor to skip Wiki docs that contain only images");
   }
 
-}
\ No newline at end of file
+}
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index d89a670..ad9d9ba 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -508,7 +508,7 @@
     reader.close();
 
     // Make sure they are the same
-    assertEquals(totalTokenCount1, totalTokenCount2);
+    assertEquals(totalTokenCount2, totalTokenCount1);
   }
   
   /**
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
index 2b15e99..603030a 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksParse.java
@@ -34,10 +34,11 @@
 import org.apache.lucene.benchmark.byTask.tasks.TaskSequence;
 import org.apache.lucene.benchmark.byTask.utils.Algorithm;
 import org.apache.lucene.benchmark.byTask.utils.Config;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
+import org.apache.lucene.util.LuceneTestCase;
 
 import conf.ConfLoader;
 
@@ -105,7 +106,7 @@
 
   public static class MockQueryMaker extends AbstractQueryMaker {
     @Override
-    protected Query[] prepareQueries() throws Exception {
+    protected Query[] prepareQueries(FieldTypes fieldTypes) throws Exception {
       return new Query[0];
     }
   }
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
index 0d7292e..472da24 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
@@ -33,10 +33,13 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.IOUtils;
 
 /** Tests the functionality of {@link DocMaker}. */
@@ -117,8 +120,13 @@
     Config config = new Config(props);
     
     DocMaker dm = new DocMaker();
+    RAMDirectory dir = new RAMDirectory();
+    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
     dm.setConfig(config, new OneDocSource());
-    return dm.makeDocument();
+    Document doc = dm.makeDocument(w);
+    w.close();
+    dir.close();
+    return doc;
   }
   
   /* Tests doc.index.props property. */
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java
index d5a3114..688d8c2 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.benchmark.BenchmarkTestCase;
 import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.benchmark.byTask.utils.Config;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -48,7 +47,7 @@
     try {
       IndexWriter writer = new IndexWriter(tmpDir, new IndexWriterConfig(null));
       for (int i = 0; i < 10; i++) {
-        writer.addDocument(new Document());
+        writer.addDocument(writer.newDocument());
       }
       writer.close();
     } finally {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java
index 3d5bb8a..b66443a 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java
@@ -17,20 +17,19 @@
 
 package org.apache.lucene.benchmark.byTask.tasks;
 
-import org.apache.lucene.benchmark.byTask.PerfRunData;
-import org.apache.lucene.analysis.TokenStream;
+import java.io.IOException;
+
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
-import org.apache.lucene.search.highlight.Highlighter;
-import org.apache.lucene.search.highlight.TextFragment;
-import org.apache.lucene.search.highlight.QueryScorer;
-import org.apache.lucene.search.highlight.TokenSources;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.benchmark.byTask.PerfRunData;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
-
-import java.io.IOException;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.highlight.Highlighter;
+import org.apache.lucene.search.highlight.QueryScorer;
+import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
+import org.apache.lucene.search.highlight.TextFragment;
+import org.apache.lucene.search.highlight.TokenSources;
 
 /**
  * Test Search task which counts number of searches.
@@ -45,8 +44,8 @@
   }
 
   @Override
-  protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
-    StoredDocument document = ir.document(id);
+  protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
+    Document document = ir.document(id);
     if (document != null) {
       numDocsRetrieved++;
     }
@@ -58,7 +57,7 @@
     highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
     return new BenchmarkHighlighter() {
       @Override
-      public int doHighlight(IndexReader reader, int doc, String field, StoredDocument document, Analyzer analyzer, String text) throws Exception {
+      public int doHighlight(IndexReader reader, int doc, String field, Document document, Analyzer analyzer, String text) throws Exception {
         TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
         TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
         numHighlightedResults += frag != null ? frag.length : 0;
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java
index ac8277c..3d651fa 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java
@@ -33,8 +33,7 @@
 import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexWriter;
 
 /** Tests the functionality of {@link WriteEnwikiLineDocTask}. */
 public class WriteEnwikiLineDocTaskTest extends BenchmarkTestCase {
@@ -47,12 +46,12 @@
     AtomicInteger flip = new AtomicInteger(0);
     
     @Override
-    public Document makeDocument() throws Exception {
+    public Document makeDocument(IndexWriter w) throws Exception {
       boolean isCategory = (flip.incrementAndGet() % 2 == 0); 
-      Document doc = new Document();
-      doc.add(new StringField(BODY_FIELD, "body text", Field.Store.NO));
-      doc.add(new StringField(TITLE_FIELD, isCategory ? "Category:title text" : "title text", Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date text", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom(BODY_FIELD, "body text");
+      doc.addAtom(TITLE_FIELD, isCategory ? "Category:title text" : "title text");
+      doc.addAtom(DATE_FIELD, "date text");
       return doc;
     }
     
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
index 6a5292a..1165e2c 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
@@ -34,8 +34,7 @@
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.benchmark.byTask.utils.StreamUtils.Type;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexWriter;
 
 /** Tests the functionality of {@link WriteLineDocTask}. */
 public class WriteLineDocTaskTest extends BenchmarkTestCase {
@@ -44,11 +43,11 @@
   public static final class WriteLineDocMaker extends DocMaker {
   
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(BODY_FIELD, "body", Field.Store.NO));
-      doc.add(new StringField(TITLE_FIELD, "title", Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(BODY_FIELD, "body");
+      doc.addAtom(TITLE_FIELD, "title");
+      doc.addAtom(DATE_FIELD, "date");
       return doc;
     }
     
@@ -58,11 +57,11 @@
   public static final class NewLinesDocMaker extends DocMaker {
   
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(BODY_FIELD, "body\r\ntext\ttwo", Field.Store.NO));
-      doc.add(new StringField(TITLE_FIELD, "title\r\ntext", Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date\r\ntext", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(BODY_FIELD, "body\r\ntext\ttwo");
+      doc.addAtom(TITLE_FIELD, "title\r\ntext");
+      doc.addAtom(DATE_FIELD, "date\r\ntext");
       return doc;
     }
     
@@ -71,10 +70,10 @@
   // class has to be public so that Class.forName.newInstance() will work
   public static final class NoBodyDocMaker extends DocMaker {
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(TITLE_FIELD, "title", Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(TITLE_FIELD, "title");
+      doc.addAtom(DATE_FIELD, "date");
       return doc;
     }
   }
@@ -82,10 +81,10 @@
   // class has to be public so that Class.forName.newInstance() will work
   public static final class NoTitleDocMaker extends DocMaker {
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(BODY_FIELD, "body", Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(BODY_FIELD, "body");
+      doc.addAtom(DATE_FIELD, "date");
       return doc;
     }
   }
@@ -93,9 +92,9 @@
   // class has to be public so that Class.forName.newInstance() will work
   public static final class JustDateDocMaker extends DocMaker {
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(DATE_FIELD, "date", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(DATE_FIELD, "date");
       return doc;
     }
   }
@@ -104,9 +103,9 @@
   // same as JustDate just that this one is treated as legal
   public static final class LegalJustDateDocMaker extends DocMaker {
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
-      doc.add(new StringField(DATE_FIELD, "date", Field.Store.NO));
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
+      doc.addAtom(DATE_FIELD, "date");
       return doc;
     }
   }
@@ -114,8 +113,8 @@
   // class has to be public so that Class.forName.newInstance() will work
   public static final class EmptyDocMaker extends DocMaker {
     @Override
-    public Document makeDocument() throws Exception {
-      return new Document();
+    public Document makeDocument(IndexWriter w) throws Exception {
+      return w.newDocument();
     }
   }
   
@@ -123,15 +122,14 @@
   public static final class ThreadingDocMaker extends DocMaker {
   
     @Override
-    public Document makeDocument() throws Exception {
-      Document doc = new Document();
+    public Document makeDocument(IndexWriter w) throws Exception {
+      Document doc = w.newDocument();
       String name = Thread.currentThread().getName();
-      doc.add(new StringField(BODY_FIELD, "body_" + name, Field.Store.NO));
-      doc.add(new StringField(TITLE_FIELD, "title_" + name, Field.Store.NO));
-      doc.add(new StringField(DATE_FIELD, "date_" + name, Field.Store.NO));
+      doc.addAtom(BODY_FIELD, "body_" + name);
+      doc.addAtom(TITLE_FIELD, "title_" + name);
+      doc.addAtom(DATE_FIELD, "date_" + name);
       return doc;
     }
-    
   }
 
   private static final CompressorStreamFactory csFactory = new CompressorStreamFactory();
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
index 7b35a06..6c2c61c 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
@@ -25,10 +25,10 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -171,7 +171,7 @@
     // run the search and use stored field values
     for (ScoreDoc scoreDoc : indexSearcher.search(q,
         Integer.MAX_VALUE).scoreDocs) {
-      StoredDocument doc = indexSearcher.doc(scoreDoc.doc);
+      Document doc = indexSearcher.doc(scoreDoc.doc);
 
       // assign class to the doc
       ClassificationResult<Boolean> classificationResult = assignClass(doc
@@ -179,7 +179,7 @@
       Boolean assignedClass = classificationResult.getAssignedClass();
 
       // get the expected result
-      StorableField field = doc.getField(classFieldName);
+      IndexableField field = doc.getField(classFieldName);
 
       Boolean correctClass = Boolean.valueOf(field.stringValue());
       long modifier = correctClass.compareTo(assignedClass);
@@ -262,4 +262,4 @@
     return null;
   }
 
-}
\ No newline at end of file
+}
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
index 4f0ca17..184fd06 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
@@ -21,13 +21,11 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.ScoreDoc;
@@ -72,51 +70,48 @@
     IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(analyzer));
     IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(analyzer));
 
+    for (IndexWriter w : new IndexWriter[] {testWriter, cvWriter, trainingWriter}) {
+      FieldTypes fieldTypes = w.getFieldTypes();
+      for (String fieldName : fieldNames) {
+        fieldTypes.enableTermVectors(fieldName);
+        fieldTypes.enableTermVectorPositions(fieldName);
+        fieldTypes.enableTermVectorOffsets(fieldName);
+      }
+    }
+
     try {
       int size = originalIndex.maxDoc();
 
       IndexSearcher indexSearcher = new IndexSearcher(originalIndex);
       TopDocs topDocs = indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE);
 
-      // set the type to be indexed, stored, with term vectors
-      FieldType ft = new FieldType(TextField.TYPE_STORED);
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(true);
-      ft.setStoreTermVectorPositions(true);
-
       int b = 0;
 
       // iterate over existing documents
       for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
+        IndexWriter w;
+        if (b % 2 == 0 && testWriter.maxDoc() < size * testRatio) {
+          w = testWriter;
+        } else if (cvWriter.maxDoc() < size * crossValidationRatio) {
+          w = cvWriter;
+        } else {
+          w = trainingWriter;
+        }
 
         // create a new document for indexing
-        Document doc = new Document();
+        Document doc = w.newDocument();
         if (fieldNames != null && fieldNames.length > 0) {
           for (String fieldName : fieldNames) {
-            doc.add(new Field(fieldName, originalIndex.document(scoreDoc.doc).getField(fieldName).stringValue(), ft));
+            doc.addLargeText(fieldName, originalIndex.document(scoreDoc.doc).getField(fieldName).stringValue());
           }
         } else {
-          for (StorableField storableField : originalIndex.document(scoreDoc.doc).getFields()) {
-            if (storableField.readerValue() != null) {
-              doc.add(new Field(storableField.name(), storableField.readerValue(), ft));
-            } else if (storableField.binaryValue() != null) {
-              doc.add(new Field(storableField.name(), storableField.binaryValue(), ft));
-            } else if (storableField.stringValue() != null) {
-              doc.add(new Field(storableField.name(), storableField.stringValue(), ft));
-            } else if (storableField.numericValue() != null) {
-              doc.add(new Field(storableField.name(), storableField.numericValue().toString(), ft));
-            }
+          for (IndexableField storableField : originalIndex.document(scoreDoc.doc).getFields()) {
+            doc.addLargeText(storableField.name(), storableField.stringValue());
           }
         }
 
         // add it to one of the IDXs
-        if (b % 2 == 0 && testWriter.maxDoc() < size * testRatio) {
-          testWriter.addDocument(doc);
-        } else if (cvWriter.maxDoc() < size * crossValidationRatio) {
-          cvWriter.addDocument(doc);
-        } else {
-          trainingWriter.addDocument(doc);
-        }
+        w.addDocument(doc);
         b++;
       }
     } catch (Exception e) {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java b/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java
index b8334af..0559959 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java
@@ -21,9 +21,8 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -50,7 +49,6 @@
 
   private RandomIndexWriter indexWriter;
   private Directory dir;
-  private FieldType ft;
 
   String textFieldName;
   String categoryFieldName;
@@ -65,10 +63,12 @@
     textFieldName = "text";
     categoryFieldName = "cat";
     booleanFieldName = "bool";
-    ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    for(String fieldName : new String[] {textFieldName, categoryFieldName, booleanFieldName}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+    }
   }
 
   @Override
@@ -99,6 +99,7 @@
         leafReader.close();
     }
   }
+
   protected void checkOnlineClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName) throws Exception {
     checkOnlineClassification(classifier, inputDoc, expectedResult, analyzer, textFieldName, classFieldName, null);
   }
@@ -130,73 +131,80 @@
     indexWriter = new RandomIndexWriter(random(), dir, newIndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
     indexWriter.commit();
 
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    for(String fieldName : new String[] {textFieldName, categoryFieldName, booleanFieldName}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+    }
+
     String text;
 
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     text = "The traveling press secretary for Mitt Romney lost his cool and cursed at reporters " +
         "who attempted to ask questions of the Republican presidential candidate in a public plaza near the Tomb of " +
         "the Unknown Soldier in Warsaw Tuesday.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
 
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Mitt Romney seeks to assure Israel and Iran, as well as Jewish voters in the United" +
         " States, that he will be tougher against Iran's nuclear ambitions than President Barack Obama.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "And there's a threshold question that he has to answer for the American people and " +
         "that's whether he is prepared to be commander-in-chief,\" she continued. \"As we look to the past events, we " +
         "know that this raises some questions about his preparedness and we'll see how the rest of his trip goes.\"";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Still, when it comes to gun policy, many congressional Democrats have \"decided to " +
         "keep quiet and not go there,\" said Alan Lizotte, dean and professor at the State University of New York at " +
         "Albany's School of Criminal Justice.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Standing amongst the thousands of people at the state Capitol, Jorstad, director of " +
         "technology at the University of Wisconsin-La Crosse, documented the historic moment and shared it with the " +
         "world through the Internet.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "So, about all those experts and analysts who've spent the past year or so saying " +
         "Facebook was going to make a phone. A new expert has stepped forward to say it's not going to happen.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "More than 400 million people trust Google with their e-mail, and 50 million store files" +
         " in the cloud using the Dropbox service. People manage their bank accounts, pay bills, trade stocks and " +
         "generally transfer or store huge volumes of personal data online.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "unlabeled doc";
-    doc.add(new Field(textFieldName, text, ft));
+    doc.addLargeText(textFieldName, text);
     indexWriter.addDocument(doc);
 
     indexWriter.commit();
@@ -223,18 +231,21 @@
     indexWriter = new RandomIndexWriter(random(), dir, newIndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
     indexWriter.commit();
 
-    FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    for(String fieldName : new String[] {textFieldName, categoryFieldName, booleanFieldName}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+    }
+
     int docs = 1000;
     Random random = random();
     for (int i = 0; i < docs; i++) {
       boolean b = random.nextBoolean();
-      Document doc = new Document();
-      doc.add(new Field(textFieldName, createRandomString(random), ft));
-      doc.add(new Field(categoryFieldName, b ? "technology" : "politics", ft));
-      doc.add(new Field(booleanFieldName, String.valueOf(b), ft));
+      Document doc = indexWriter.newDocument();
+      doc.addLargeText(textFieldName, createRandomString(random));
+      doc.addLargeText(categoryFieldName, b ? "technology" : "politics");
+      doc.addLargeText(booleanFieldName, String.valueOf(b));
       indexWriter.addDocument(doc);
     }
     indexWriter.commit();
@@ -253,59 +264,59 @@
 
     String text;
 
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     text = "Warren Bennis says John F. Kennedy grasped a key lesson about the presidency that few have followed.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
 
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Julian Zelizer says Bill Clinton is still trying to shape his party, years after the White House, while George W. Bush opts for a much more passive role.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Crossfire: Sen. Tim Scott passes on Sen. Lindsey Graham endorsement";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Illinois becomes 16th state to allow same-sex marriage.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "politics", ft));
-    doc.add(new Field(booleanFieldName, "true", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "politics");
+    doc.addLargeText(booleanFieldName, "true");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Apple is developing iPhones with curved-glass screens and enhanced sensors that detect different levels of pressure, according to a new report.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "The Xbox One is Microsoft's first new gaming console in eight years. It's a quality piece of hardware but it's also noteworthy because Microsoft is using it to make a statement.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "Google says it will replace a Google Maps image after a California father complained it shows the body of his teen-age son, who was shot to death in 2009.";
-    doc.add(new Field(textFieldName, text, ft));
-    doc.add(new Field(categoryFieldName, "technology", ft));
-    doc.add(new Field(booleanFieldName, "false", ft));
+    doc.addLargeText(textFieldName, text);
+    doc.addLargeText(categoryFieldName, "technology");
+    doc.addLargeText(booleanFieldName, "false");
     indexWriter.addDocument(doc);
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     text = "second unlabeled doc";
-    doc.add(new Field(textFieldName, text, ft));
+    doc.addLargeText(textFieldName, text);
     indexWriter.addDocument(doc);
 
     indexWriter.commit();
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
index 9b64c84..2207112 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
@@ -17,28 +17,25 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.Analyzer;
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.Random;
-
 /**
  * Testcase for {@link org.apache.lucene.classification.utils.DatasetSplitter}
  */
@@ -59,18 +56,25 @@
     dir = newDirectory();
     indexWriter = new RandomIndexWriter(random(), dir);
 
-    FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(idFieldName);
+    fieldTypes.enableTermVectorPositions(idFieldName);
+    fieldTypes.enableTermVectorOffsets(idFieldName);
 
-    Document doc;
+    fieldTypes.enableTermVectors(textFieldName);
+    fieldTypes.enableTermVectorPositions(textFieldName);
+    fieldTypes.enableTermVectorOffsets(textFieldName);
+
+    fieldTypes.enableTermVectors(classFieldName);
+    fieldTypes.enableTermVectorPositions(classFieldName);
+    fieldTypes.enableTermVectorOffsets(classFieldName);
+
     Random rnd = random();
     for (int i = 0; i < 100; i++) {
-      doc = new Document();
-      doc.add(new Field(idFieldName, Integer.toString(i), ft));
-      doc.add(new Field(textFieldName, TestUtil.randomUnicodeString(rnd, 1024), ft));
-      doc.add(new Field(classFieldName, TestUtil.randomUnicodeString(rnd, 10), ft));
+      Document doc = indexWriter.newDocument();
+      doc.addAtom(idFieldName, Integer.toString(i));
+      doc.addLargeText(textFieldName, TestUtil.randomUnicodeString(rnd, 1024));
+      doc.addLargeText(classFieldName, TestUtil.randomUnicodeString(rnd, 10));
       indexWriter.addDocument(doc);
     }
 
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
index 41b09c6..f9fc184 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
@@ -16,12 +16,8 @@
  */
 package org.apache.lucene.classification.utils;
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -49,17 +45,19 @@
     super.setUp();
     dir = newDirectory();
     RandomIndexWriter indexWriter = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors("id");
+    fieldTypes.enableTermVectorPositions("id");
+    fieldTypes.enableTermVectorOffsets("id");
 
-    FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
+    fieldTypes.enableTermVectors("text");
+    fieldTypes.enableTermVectorPositions("text");
+    fieldTypes.enableTermVectorOffsets("text");
 
-    Document doc;
     for (int i = 0; i < 10; i++) {
-      doc = new Document();
-      doc.add(new Field("id", Integer.toString(i), ft));
-      doc.add(new Field("text", random().nextInt(10) + " " + random().nextInt(10) + " " + random().nextInt(10), ft));
+      Document doc = indexWriter.newDocument();
+      doc.addAtom("id", Integer.toString(i));
+      doc.addLargeText("text", random().nextInt(10) + " " + random().nextInt(10) + " " + random().nextInt(10));
       indexWriter.addDocument(doc);
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
index c578009..de4d6d9 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
@@ -169,6 +169,8 @@
 
   // private final String segment;
 
+  // TODO: support auto-prefix?
+
   /** Create a new writer.  The number of items (terms or
    *  sub-blocks) per block will aim to be between
    *  minItemsPerBlock and maxItemsPerBlock, though in some
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
index 14a2691..2bc9920 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
@@ -164,7 +164,8 @@
   @Override
   public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
     if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
-      throw new IllegalArgumentException("please use CompiledAutomaton.getTermsEnum instead");
+      // Let super handle RANGE, PREFIX:
+      return super.intersect(compiled, startTerm);
     }
     return new OrdsIntersectTermsEnum(this, compiled, startTerm);
   }
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesFormat.java
index bda5e4f..a7e35e8 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesFormat.java
@@ -22,7 +22,6 @@
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.DocValuesProducer;
-import org.apache.lucene.document.SortedSetDocValuesField; // javadocs
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.util.ArrayUtil;
@@ -45,7 +44,7 @@
  *        document's set of values cannot exceed about 2.1 B
  *        values (see #MAX_SORTED_SET_ORDS).  For example,
  *        if every document has 10 values (10 instances of
- *        {@link SortedSetDocValuesField}) added, then no
+ *        {@link Document2#addAtom}) added, then no
  *        more than ~210 M documents can be added to one
  *        segment. </li>
  *  </ul> */
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index 562c9dc..68ffa2e 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -669,7 +669,11 @@
     }
 
     @Override
-    public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) {
+    public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException {
+      if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
+        // Let super handle RANGE, PREFIX:
+        return super.intersect(compiled, startTerm);
+      }
       return new DirectIntersectTermsEnum(compiled, startTerm);
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index b5030ce..7db82ed 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -272,6 +272,10 @@
 
     @Override
     public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
+      if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
+        // Let super handle RANGE, PREFIX:
+        return super.intersect(compiled, startTerm);
+      }
       return new IntersectTermsEnum(compiled, startTerm);
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index 23065b2..c55fc44 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -54,8 +54,8 @@
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.automaton.ByteRunAutomaton;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
-import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
+import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.Outputs;
 import org.apache.lucene.util.fst.Util;
@@ -252,6 +252,10 @@
 
     @Override
     public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
+      if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
+        // Let super handle RANGE, PREFIX:
+        return super.intersect(compiled, startTerm);
+      }
       return new IntersectTermsEnum(compiled, startTerm);
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index c7ce7e1..37d27d8 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -850,14 +850,11 @@
 
     @Override
     public BytesRef next() throws IOException {
-      //System.out.println("te.next");
       current = fstEnum.next();
       if (current == null) {
-        //System.out.println("  END");
         return null;
       }
       didDecode = false;
-      //System.out.println("  term=" + field.name + ":" + current.input.utf8ToString());
       return current.input;
     }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java
index bad528f..2abcff5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
@@ -85,7 +85,7 @@
   }
 
   @Override
-  public void writeField(FieldInfo info, StorableField field) throws IOException {
+  public void writeField(FieldInfo info, IndexableField field) throws IOException {
     write(FIELD);
     write(Integer.toString(info.number));
     newLine();
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
index 341b8a3..407fccf 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BasePostingsFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
@@ -49,8 +49,8 @@
   public void testBasic() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     TermsEnum te = MultiFields.getTerms(r, "field").iterator(null);
@@ -90,23 +90,23 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     List<String> terms = new ArrayList<>();
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addLargeText("field", term);
       w.addDocument(doc);
     }
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "m" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addLargeText("field", term);
       w.addDocument(doc);
     }
     if (VERBOSE) {
@@ -159,33 +159,33 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     List<String> terms = new ArrayList<>();
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addLargeText("field", term);
       w.addDocument(doc);
     }
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "m" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addLargeText("field", term);
       w.addDocument(doc);
     }
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "mo" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addLargeText("field", term);
       w.addDocument(doc);
     }
     w.forceMerge(1);
@@ -215,7 +215,7 @@
     Collections.sort(terms);
     for(int i=terms.size()-1;i>=0;i--) {
       if (VERBOSE) {
-        System.out.println("TEST: seek to ord=" + i);
+        System.out.println("TEST: seek to ord=" + i + " term=" + terms.get(i));
       }
       te.seekExact(i);
       assertEquals(i, te.ord());
@@ -240,12 +240,12 @@
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<128;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "" + (char) i;
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term + " bytes=" + new BytesRef(term));
       }
-      doc.add(newStringField("field", term, Field.Store.NO));
+      doc.addAtom("field", term);
       w.addDocument(doc);
     }
     w.forceMerge(1);
@@ -279,23 +279,23 @@
     IndexWriter w = new IndexWriter(dir, iwc);
     List<String> terms = new ArrayList<>();
     for(int i=0;i<36;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "" + (char) (97+i);
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term);
       }
-      doc.add(newTextField("field", term, Field.Store.NO));
+      doc.addAtom("field", term);
       w.addDocument(doc);
     }
     for(int i=0;i<128;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String term = "m" + (char) i;
       terms.add(term);
       if (VERBOSE) {
         System.out.println("i=" + i + " term=" + term + " bytes=" + new BytesRef(term));
       }
-      doc.add(newStringField("field", term, Field.Store.NO));
+      doc.addAtom("field", term);
       w.addDocument(doc);
     }
     w.forceMerge(1);
@@ -326,13 +326,13 @@
     List<String> terms = new ArrayList<>();
     for(int i=0;i<30;i++) {
       for(int j=0;j<30;j++) {
-        Document doc = new Document();
+        Document doc = w.newDocument();
         String term = "" + (char) (97+i) + (char) (97+j);
         terms.add(term);
         if (VERBOSE) {
           System.out.println("term=" + term);
         }
-        doc.add(newTextField("body", term, Field.Store.NO));
+        doc.addLargeText("body", term);
         w.addDocument(doc);
       }
     }
@@ -364,16 +364,19 @@
   public void testSeekCeilNotFound() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
     // Get empty string in there!
-    doc.add(newStringField("field", "", Field.Store.NO));
+    doc.addAtom("field", "");
     w.addDocument(doc);
     
     for(int i=0;i<36;i++) {
-      doc = new Document();
+      doc = w.newDocument();
       String term = "" + (char) (97+i);
       String term2 = "a" + (char) (97+i);
-      doc.add(newTextField("field", term + " " + term2, Field.Store.NO));
+      doc.addAtom("field", term);
+      doc.addAtom("field", term2);
       w.addDocument(doc);
     }
 
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 478ead5..1a402b9 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -164,7 +164,10 @@
   <property name="javac.debug" value="on"/>
   <property name="javac.source" value="1.8"/>
   <property name="javac.target" value="1.8"/>
+  <!-- nocommit put back
   <property name="javac.args" value="-Xlint -Xlint:-deprecation -Xlint:-serial -Xlint:-options -Xdoclint:all/protected -Xdoclint:-html -Xdoclint:-missing"/>
+  -->
+  <property name="javac.args" value=""/>
   <property name="javadoc.link" value="http://download.oracle.com/javase/8/docs/api/"/>
   <property name="javadoc.link.junit" value="http://junit.sourceforge.net/javadoc/"/>
   <property name="javadoc.packagelist.dir" location="${common.dir}/tools/javadoc"/>
@@ -373,7 +376,10 @@
   </target>
 
   <!-- for now enable only some doclint: -->
+  <!-- nocommit put back
   <property name="javadoc.args" value="-Xdoclint:all -Xdoclint:-html -Xdoclint:-missing"/>
+  -->
+  <property name="javadoc.args" value="-Xdoclint:none"/>
 
   <!-- Import custom ANT tasks. -->
   <import file="${common.dir}/tools/custom-tasks.xml" />
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java b/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
index 5e2934c..fb97b43 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
@@ -89,4 +89,4 @@
     }
   };
   
-}
\ No newline at end of file
+}
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/NumericTokenStream.java b/lucene/core/src/java/org/apache/lucene/analysis/NumericTokenStream.java
deleted file mode 100644
index aa1baff..0000000
--- a/lucene/core/src/java/org/apache/lucene/analysis/NumericTokenStream.java
+++ /dev/null
@@ -1,362 +0,0 @@
-package org.apache.lucene.analysis;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Objects;
-
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.document.DoubleField; // for javadocs
-import org.apache.lucene.document.FloatField; // for javadocs
-import org.apache.lucene.document.IntField; // for javadocs
-import org.apache.lucene.document.LongField; // for javadocs
-import org.apache.lucene.search.NumericRangeFilter; // for javadocs
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeFactory;
-import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.AttributeReflector;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <b>Expert:</b> This class provides a {@link TokenStream}
- * for indexing numeric values that can be used by {@link
- * NumericRangeQuery} or {@link NumericRangeFilter}.
- *
- * <p>Note that for simple usage, {@link IntField}, {@link
- * LongField}, {@link FloatField} or {@link DoubleField} is
- * recommended.  These fields disable norms and
- * term freqs, as they are not usually needed during
- * searching.  If you need to change these settings, you
- * should use this class.
- *
- * <p>Here's an example usage, for an <code>int</code> field:
- *
- * <pre class="prettyprint">
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value), fieldType);
- *  document.add(field);
- * </pre>
- *
- * <p>For optimal performance, re-use the TokenStream and Field instance
- * for more than one document:
- *
- * <pre class="prettyprint">
- *  NumericTokenStream stream = new NumericTokenStream(precisionStep);
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, stream, fieldType);
- *  Document document = new Document();
- *  document.add(field);
- *
- *  for(all documents) {
- *    stream.setIntValue(value)
- *    writer.addDocument(document);
- *  }
- * </pre>
- *
- * <p>This stream is not intended to be used in analyzers;
- * it's more for iterating the different precisions during
- * indexing a specific numeric value.</p>
-
- * <p><b>NOTE</b>: as token streams are only consumed once
- * the document is added to the index, if you index more
- * than one numeric field, use a separate <code>NumericTokenStream</code>
- * instance for each.</p>
- *
- * <p>See {@link NumericRangeQuery} for more details on the
- * <a
- * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * parameter as well as how numeric fields work under the hood.</p>
- *
- * @since 2.9
- */
-public final class NumericTokenStream extends TokenStream {
-
-  /** The full precision token gets this token type assigned. */
-  public static final String TOKEN_TYPE_FULL_PREC  = "fullPrecNumeric";
-
-  /** The lower precision tokens gets this token type assigned. */
-  public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
-  
-  /** <b>Expert:</b> Use this attribute to get the details of the currently generated token.
-   * @lucene.experimental
-   * @since 4.0
-   */
-  public interface NumericTermAttribute extends Attribute {
-    /** Returns current shift value, undefined before first token */
-    int getShift();
-    /** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
-    long getRawValue();
-    /** Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) */
-    int getValueSize();
-    
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void init(long value, int valSize, int precisionStep, int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void setShift(int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    int incShift();
-  }
-  
-  // just a wrapper to prevent adding CTA
-  private static final class NumericAttributeFactory extends AttributeFactory {
-    private final AttributeFactory delegate;
-
-    NumericAttributeFactory(AttributeFactory delegate) {
-      this.delegate = delegate;
-    }
-  
-    @Override
-    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
-      if (CharTermAttribute.class.isAssignableFrom(attClass))
-        throw new IllegalArgumentException("NumericTokenStream does not support CharTermAttribute.");
-      return delegate.createAttributeInstance(attClass);
-    }
-  }
-
-  /** Implementation of {@link NumericTermAttribute}.
-   * @lucene.internal
-   * @since 4.0
-   */
-  public static final class NumericTermAttributeImpl extends AttributeImpl implements NumericTermAttribute,TermToBytesRefAttribute {
-    private long value = 0L;
-    private int valueSize = 0, shift = 0, precisionStep = 0;
-    private BytesRefBuilder bytes = new BytesRefBuilder();
-    
-    /** 
-     * Creates, but does not yet initialize this attribute instance
-     * @see #init(long, int, int, int)
-     */
-    public NumericTermAttributeImpl() {}
-
-    @Override
-    public BytesRef getBytesRef() {
-      return bytes.get();
-    }
-    
-    @Override
-    public void fillBytesRef() {
-      assert valueSize == 64 || valueSize == 32;
-      if (valueSize == 64) {
-        NumericUtils.longToPrefixCoded(value, shift, bytes);
-      } else {
-        NumericUtils.intToPrefixCoded((int) value, shift, bytes);
-      }
-    }
-
-    @Override
-    public int getShift() { return shift; }
-    @Override
-    public void setShift(int shift) { this.shift = shift; }
-    @Override
-    public int incShift() {
-      return (shift += precisionStep);
-    }
-
-    @Override
-    public long getRawValue() { return value  & ~((1L << shift) - 1L); }
-    @Override
-    public int getValueSize() { return valueSize; }
-
-    @Override
-    public void init(long value, int valueSize, int precisionStep, int shift) {
-      this.value = value;
-      this.valueSize = valueSize;
-      this.precisionStep = precisionStep;
-      this.shift = shift;
-    }
-
-    @Override
-    public void clear() {
-      // this attribute has no contents to clear!
-      // we keep it untouched as it's fully controlled by outer class.
-    }
-    
-    @Override
-    public void reflectWith(AttributeReflector reflector) {
-      fillBytesRef();
-      reflector.reflect(TermToBytesRefAttribute.class, "bytes", bytes.toBytesRef());
-      reflector.reflect(NumericTermAttribute.class, "shift", shift);
-      reflector.reflect(NumericTermAttribute.class, "rawValue", getRawValue());
-      reflector.reflect(NumericTermAttribute.class, "valueSize", valueSize);
-    }
-  
-    @Override
-    public void copyTo(AttributeImpl target) {
-      final NumericTermAttribute a = (NumericTermAttribute) target;
-      a.init(value, valueSize, precisionStep, shift);
-    }
-    
-    @Override
-    public NumericTermAttributeImpl clone() {
-      NumericTermAttributeImpl t = (NumericTermAttributeImpl)super.clone();
-      // Do a deep clone
-      t.bytes = new BytesRefBuilder();
-      t.bytes.copyBytes(bytes.get());
-      return t;
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(precisionStep, shift, value, valueSize);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) return true;
-      if (obj == null) return false;
-      if (getClass() != obj.getClass()) return false;
-      NumericTermAttributeImpl other = (NumericTermAttributeImpl) obj;
-      if (precisionStep != other.precisionStep) return false;
-      if (shift != other.shift) return false;
-      if (value != other.value) return false;
-      if (valueSize != other.valueSize) return false;
-      return true;
-    }
-  }
-  
-  /**
-   * Creates a token stream for numeric values using the default <code>precisionStep</code>
-   * {@link NumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public NumericTokenStream() {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, NumericUtils.PRECISION_STEP_DEFAULT);
-  }
-  
-  /**
-   * Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code>. The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public NumericTokenStream(final int precisionStep) {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
-  }
-
-  /**
-   * Expert: Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code> using the given
-   * {@link org.apache.lucene.util.AttributeFactory}.
-   * The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public NumericTokenStream(AttributeFactory factory, final int precisionStep) {
-    super(new NumericAttributeFactory(factory));
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    numericAtt.setShift(-precisionStep);
-  }
-
-  /**
-   * Initializes the token stream with the supplied <code>long</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new NumericTokenStream(precisionStep).setLongValue(value))</code>
-   */
-  public NumericTokenStream setLongValue(final long value) {
-    numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>int</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))</code>
-   */
-  public NumericTokenStream setIntValue(final int value) {
-    numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>double</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new NumericTokenStream(precisionStep).setDoubleValue(value))</code>
-   */
-  public NumericTokenStream setDoubleValue(final double value) {
-    numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>float</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))</code>
-   */
-  public NumericTokenStream setFloatValue(final float value) {
-    numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  @Override
-  public void reset() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    numericAtt.setShift(-precisionStep);
-  }
-
-  @Override
-  public boolean incrementToken() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    
-    // this will only clear all other attributes in this TokenStream
-    clearAttributes();
-
-    final int shift = numericAtt.incShift();
-    typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
-    posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
-    return (shift < valSize);
-  }
-
-  /** Returns the precision step. */
-  public int getPrecisionStep() {
-    return precisionStep;
-  }
-
-  @Override
-  public String toString() {
-    // We override default because it can throw cryptic "illegal shift value":
-    return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
-  }
-  
-  // members
-  private final NumericTermAttribute numericAtt = addAttribute(NumericTermAttribute.class);
-  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
-  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
-  
-  private int valSize = 0; // valSize==0 means not initialized
-  private final int precisionStep;
-}
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
index ea6d696..284941a 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
@@ -23,8 +23,6 @@
 
 import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.AttributeFactory;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
index e6d7a92..55e90ba 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader; // javadocs
 import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.index.OrdTermState;
 import org.apache.lucene.index.TermState;
@@ -24,6 +25,8 @@
  * Holds all state required for {@link PostingsReaderBase}
  * to produce a {@link DocsEnum} without re-seeking the
  * terms dict.
+ *
+ * @lucene.internal
  */
 public class BlockTermState extends OrdTermState {
   /** how many docs have this term */
@@ -37,6 +40,11 @@
   // TODO: update BTR to nuke this
   public long blockFilePointer;
 
+  /** True if this term is "real" (e.g., not an auto-prefix term or
+   *  some other "secret" term; currently only {@link BlockTreeTermsReader}
+   *  sets this). */
+  public boolean isRealTerm;
+
   /** Sole constructor. (For invocation by subclass 
    *  constructors, typically implicit.) */
   protected BlockTermState() {
@@ -51,10 +59,11 @@
     totalTermFreq = other.totalTermFreq;
     termBlockOrd = other.termBlockOrd;
     blockFilePointer = other.blockFilePointer;
+    isRealTerm = other.isRealTerm;
   }
 
   @Override
   public String toString() {
-    return "docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq + " termBlockOrd=" + termBlockOrd + " blockFP=" + blockFilePointer;
+    return "docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq + " termBlockOrd=" + termBlockOrd + " blockFP=" + blockFilePointer + " isRealTerm=" + isRealTerm;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
index 7832726..c983447 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
@@ -99,9 +99,9 @@
       docBase += maxDoc;
     }
 
-    Fields mergedFields = new MappedMultiFields(mergeState, 
-                                                new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
-                                                                slices.toArray(ReaderSlice.EMPTY_ARRAY)));
+    MultiFields mf = new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
+                                     slices.toArray(ReaderSlice.EMPTY_ARRAY));
+    Fields mergedFields = new MappedMultiFields(mergeState, mf);
     write(mergedFields);
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java
index 9d85f12..d0ea256 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java
@@ -20,12 +20,13 @@
 import java.io.IOException;
 import java.io.Reader;
 
-import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DocumentStoredFieldVisitor;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.index.MergeState;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredFieldVisitor;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -36,7 +37,7 @@
  * <ol>
  *   <li>For every document, {@link #startDocument()} is called,
  *       informing the Codec that a new document has started.
- *   <li>{@link #writeField(FieldInfo, StorableField)} is called for 
+ *   <li>{@link #writeField(FieldInfo, IndexableField)} is called for 
  *       each field in the document.
  *   <li>After all documents have been written, {@link #finish(FieldInfos, int)} 
  *       is called for verification/sanity-checks.
@@ -62,7 +63,7 @@
   public void finishDocument() throws IOException {}
 
   /** Writes a single stored field. */
-  public abstract void writeField(FieldInfo info, StorableField field) throws IOException;
+  public abstract void writeField(FieldInfo info, IndexableField field) throws IOException;
   
   /** Called before {@link #close()}, passing in the number
    *  of documents that were written. Note that this is 
@@ -101,6 +102,13 @@
     finish(mergeState.mergeFieldInfos, docCount);
     return docCount;
   }
+
+  final static IndexableFieldType STORED_TYPE = new IndexableFieldType() {
+    @Override
+    public boolean stored() {
+      return true;
+    }
+  };
   
   /** 
    * A visitor that adds every field it sees.
@@ -115,7 +123,7 @@
    * }
    * </pre>
    */
-  protected class MergeVisitor extends StoredFieldVisitor implements StorableField {
+  protected class MergeVisitor extends StoredFieldVisitor implements IndexableField {
     BytesRef binaryValue;
     String stringValue;
     Number numericValue;
@@ -192,7 +200,7 @@
 
     @Override
     public IndexableFieldType fieldType() {
-      return StoredField.TYPE;
+      return STORED_TYPE;
     }
 
     @Override
@@ -210,11 +218,6 @@
       return numericValue;
     }
 
-    @Override
-    public Reader readerValue() {
-      return null;
-    }
-    
     void reset(FieldInfo field) {
       if (remapper != null) {
         // field numbers are not aligned, we need to remap to the new field number
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/AutoPrefixTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/AutoPrefixTermsWriter.java
new file mode 100644
index 0000000..0a7d2f0
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/AutoPrefixTermsWriter.java
@@ -0,0 +1,416 @@
+package org.apache.lucene.codecs.blocktree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.StringHelper;
+
+// TODO: instead of inlining auto-prefix terms with normal terms,
+// we could write them into their own virtual/private field.  This
+// would make search time a bit more complex, since we'd need to
+// merge sort between two TermEnums, but it would also make stats
+// API (used by CheckIndex -verbose) easier to implement since we could
+// just walk this virtual field and gather its stats)
+
+/** Used in the first pass when writing a segment to locate
+ *  "appropriate" auto-prefix terms to pre-compile into the index.
+ *  This visits every term in the index to find prefixes that
+ *  match >= min and <= max number of terms. */
+
+class AutoPrefixTermsWriter {
+
+  //static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
+  //static boolean DEBUG2 = BlockTreeTermsWriter.DEBUG2;
+  //static boolean DEBUG = true;
+  //static boolean DEBUG2 = true;
+
+  /** Describes a range of term-space to match, either a simple prefix
+   *  (foo*) or a floor-block range of a prefix (e.g. foo[a-m]*,
+   *  foo[n-z]*) when there are too many terms starting with foo*. */
+  public static final class PrefixTerm implements Comparable<PrefixTerm> {
+    /** Common prefix */
+    public final byte[] prefix;
+
+    /** If this is -2, this is a normal prefix (foo *), else it's the minimum lead byte of the suffix (e.g. 'd' in foo[d-m]*). */
+    public final int floorLeadStart;
+
+    /** The lead byte (inclusive) of the suffix for the term range we match (e.g. 'm' in foo[d-m*]); this is ignored when
+     *  floorLeadStart is -2. */
+    public final int floorLeadEnd;
+
+    public final BytesRef term;
+
+    /** Sole constructor. */
+    public PrefixTerm(byte[] prefix, int floorLeadStart, int floorLeadEnd) {
+      this.prefix = prefix;
+      this.floorLeadStart = floorLeadStart;
+      this.floorLeadEnd = floorLeadEnd;
+      this.term = toBytesRef(prefix, floorLeadStart);
+
+      assert floorLeadEnd >= floorLeadStart;
+      assert floorLeadEnd >= 0;
+      assert floorLeadStart == -2 || floorLeadStart >= 0;
+
+      // We should never create empty-string prefix term:
+      assert prefix.length > 0 || floorLeadStart != -2 || floorLeadEnd != 0xff;
+    }
+
+    @Override
+    public String toString() {
+      String s = brToString(new BytesRef(prefix));
+      if (floorLeadStart == -2) {
+        s += "[-" + Integer.toHexString(floorLeadEnd) + "]";
+      } else {
+        s += "[" + Integer.toHexString(floorLeadStart) + "-" + Integer.toHexString(floorLeadEnd) + "]";
+      }
+      return s;
+    }
+
+    @Override
+    public int compareTo(PrefixTerm other) {
+      int cmp = term.compareTo(other.term);
+      if (cmp == 0) {
+        if (prefix.length != other.prefix.length) {
+          return prefix.length - other.prefix.length;
+        }
+
+        // On tie, sort the bigger floorLeadEnd, earlier, since it
+        // spans more terms, so during intersect, we want to encounter this one
+        // first so we can use it if the automaton accepts the larger range:
+        cmp = other.floorLeadEnd - floorLeadEnd;
+      }
+
+      return cmp;
+    }
+
+    /** Returns the leading term for this prefix term, e.g. "foo" (for
+     *  the foo* prefix) or "foom" (for the foo[m-z]* case). */
+    private static BytesRef toBytesRef(byte[] prefix, int floorLeadStart) {
+      BytesRef br;
+      if (floorLeadStart != -2) {
+        assert floorLeadStart >= 0;
+        br = new BytesRef(prefix.length+1);
+      } else {
+        br = new BytesRef(prefix.length);
+      }
+      System.arraycopy(prefix, 0, br.bytes, 0, prefix.length);
+      br.length = prefix.length;
+      if (floorLeadStart != -2) {
+        assert floorLeadStart >= 0;
+        br.bytes[br.length++] = (byte) floorLeadStart;
+      }
+
+      return br;
+    }
+
+    public int compareTo(BytesRef term) {
+      return this.term.compareTo(term);
+    }
+
+    public TermsEnum getTermsEnum(TermsEnum in) {
+
+      final BytesRef prefixRef = new BytesRef(prefix);
+
+      return new FilteredTermsEnum(in) {
+          {
+            setInitialSeekTerm(term);
+          }
+
+          @Override
+          protected AcceptStatus accept(BytesRef term) {
+            if (StringHelper.startsWith(term, prefixRef) &&
+                (floorLeadEnd == -1 || term.length == prefixRef.length || (term.bytes[term.offset + prefixRef.length] & 0xff) <= floorLeadEnd)) {
+              return AcceptStatus.YES;
+            } else {
+              return AcceptStatus.END;
+            }
+          }
+        };
+    }
+  }
+
+  // for debugging
+  static String brToString(BytesRef b) {
+    try {
+      return b.utf8ToString() + " " + b + " len=" + b.length;
+    } catch (Throwable t) {
+      // If BytesRef isn't actually UTF8, or it's eg a
+      // prefix of UTF8 that ends mid-unicode-char, we
+      // fallback to hex:
+      return b.toString();
+    }
+  }
+
+  final List<PrefixTerm> prefixes = new ArrayList<>();
+  private final int minItemsInPrefix;
+  private final int maxItemsInPrefix;
+
+  // Records index into pending where the current prefix at that
+  // length "started"; for example, if current term starts with 't',
+  // startsByPrefix[0] is the index into pending for the first
+  // term/sub-block starting with 't'.  We use this to figure out when
+  // to write a new block:
+  private final BytesRefBuilder lastTerm = new BytesRefBuilder();
+  private int[] prefixStarts = new int[8];
+  private List<Object> pending = new ArrayList<>();
+
+  private final String segment;
+
+  public AutoPrefixTermsWriter(String segment, Terms terms, int minItemsInPrefix, int maxItemsInPrefix) throws IOException {
+    this.minItemsInPrefix = minItemsInPrefix;
+    this.maxItemsInPrefix = maxItemsInPrefix;
+    this.segment = segment;
+    //if (DEBUG) System.out.println("autoprefix terms=" + terms);
+    TermsEnum termsEnum = terms.iterator(null);
+    while (true) {
+      BytesRef term = termsEnum.next();
+      if (term == null) {
+        break;
+      }
+      //if (DEBUG) System.out.println("pushTerm: " + brToString(term));
+      pushTerm(term);
+    }
+    //if (DEBUG) System.out.println("done push terms");
+
+    if (pending.size() > 1) {
+      pushTerm(BlockTreeTermsWriter.EMPTY_BYTES_REF);
+
+      // Also maybe save floor prefixes in root block; this can be a biggish perf gain for large ranges:
+      /*
+      System.out.println("root block pending.size=" + pending.size());
+      for(Object o : pending) {
+        System.out.println("  " + o);
+      }
+      */
+      while (pending.size() >= minItemsInPrefix) {
+        savePrefixes(0, pending.size());
+      }
+    }
+
+    Collections.sort(prefixes);
+  }
+
+  /** Pushes the new term to the top of the stack, and writes new blocks. */
+  private void pushTerm(BytesRef text) throws IOException {
+    int limit = Math.min(lastTerm.length(), text.length);
+
+    // Find common prefix between last term and current term:
+    int pos = 0;
+    while (pos < limit && lastTerm.byteAt(pos) == text.bytes[text.offset+pos]) {
+      pos++;
+    }
+
+    //if (DEBUG) System.out.println("  shared=" + pos + "  lastTerm.length=" + lastTerm.length());
+
+    // Close the "abandoned" suffix now:
+    for(int i=lastTerm.length()-1;i>=pos;i--) {
+
+      // How many items on top of the stack share the current suffix
+      // we are closing:
+      int prefixTopSize = pending.size() - prefixStarts[i];
+
+      while (prefixTopSize >= minItemsInPrefix) {       
+        //if (DEBUG) System.out.println("pushTerm i=" + i + " prefixTopSize=" + prefixTopSize + " minItemsInBlock=" + minItemsInPrefix);
+        savePrefixes(i+1, prefixTopSize);
+        //prefixStarts[i] -= prefixTopSize;
+        //System.out.println("    after savePrefixes: " + (pending.size() - prefixStarts[i]) + " pending.size()=" + pending.size() + " start=" + prefixStarts[i]);
+
+        // For large floor blocks, it's possible we should now re-run on the new prefix terms we just created:
+        prefixTopSize = pending.size() - prefixStarts[i];
+      }
+    }
+
+    if (prefixStarts.length < text.length) {
+      prefixStarts = ArrayUtil.grow(prefixStarts, text.length);
+    }
+
+    // Init new tail:
+    for(int i=pos;i<text.length;i++) {
+      prefixStarts[i] = pending.size();
+    }
+
+    lastTerm.copyBytes(text);
+
+    // Only append the first (optional) empty string, no the fake last one used to close all prefixes:
+    if (text.length > 0 || pending.isEmpty()) {
+      byte[] termBytes = new byte[text.length];
+      System.arraycopy(text.bytes, text.offset, termBytes, 0, text.length);
+      pending.add(termBytes);
+    }
+  }
+  
+  void savePrefixes(int prefixLength, int count) throws IOException {
+
+    assert count > 0;
+
+    //if (DEBUG2) {
+    //  BytesRef br = new BytesRef(lastTerm.bytes());
+    //  br.length = prefixLength;
+    //  System.out.println("  savePrefixes: seg=" + segment + " " + brToString(br) + " count=" + count + " pending.size()=" + pending.size());
+    //}
+
+    int lastSuffixLeadLabel = -2;
+
+    int start = pending.size()-count;
+    assert start >=0;
+
+    int end = pending.size();
+    int nextBlockStart = start;
+    int nextFloorLeadLabel = -1;
+    int prefixCount = 0;
+    int pendingCount = 0;
+    PrefixTerm lastPTEntry = null;
+    for (int i=start; i<end; i++) {
+
+      byte[] termBytes;
+      Object o = pending.get(i);
+      PrefixTerm ptEntry;
+      if (o instanceof byte[]) {
+        ptEntry = null;
+        termBytes = (byte[]) o;
+      } else {
+        ptEntry = (PrefixTerm) o;
+        termBytes = ptEntry.term.bytes;
+        if (ptEntry.prefix.length != prefixLength) {
+          assert ptEntry.prefix.length > prefixLength;
+          ptEntry = null;
+        }
+      }
+      pendingCount++;
+
+      //if (DEBUG) System.out.println("    check term=" + brToString(new BytesRef(termBytes)) + " pt=" + ptEntry);
+
+      int suffixLeadLabel;
+
+      if (termBytes.length == prefixLength) {
+        // Suffix is 0, i.e. prefix 'foo' and term is
+        // 'foo' so the term has empty string suffix
+        // in this block
+        assert lastSuffixLeadLabel == -2;
+        suffixLeadLabel = -2;
+      } else {
+        suffixLeadLabel = termBytes[prefixLength] & 0xff;
+      }
+
+      // if (DEBUG) System.out.println("  i=" + i + " ent=" + ent + " suffixLeadLabel=" + suffixLeadLabel);
+
+      if (suffixLeadLabel != lastSuffixLeadLabel) {
+        // This is a boundary, a chance to make an auto-prefix term if we want:
+
+        // When we are "recursing" (generating auto-prefix terms on a block of
+        // floor'd auto-prefix terms), this assert is non-trivial because it
+        // ensures the floorLeadEnd of the previous terms is in fact less
+        // than the lead start of the current entry:
+        assert suffixLeadLabel > lastSuffixLeadLabel: "suffixLeadLabel=" + suffixLeadLabel + " vs lastSuffixLeadLabel=" + lastSuffixLeadLabel;
+
+        // NOTE: must check nextFloorLeadLabel in case minItemsInPrefix is 2 and prefix is 'a' and we've seen 'a' and then 'aa'
+        if (pendingCount >= minItemsInPrefix && end-nextBlockStart > maxItemsInPrefix && nextFloorLeadLabel != -1) {
+          // The count is too large for one block, so we must break it into "floor" blocks, where we record
+          // the leading label of the suffix of the first term in each floor block, so at search time we can
+          // jump to the right floor block.  We just use a naive greedy segmenter here: make a new floor
+          // block as soon as we have at least minItemsInBlock.  This is not always best: it often produces
+          // a too-small block as the final block:
+
+          // If the last entry was another prefix term of the same length, then it represents a range of terms, so we must use its ending
+          // prefix label as our ending label:
+          if (lastPTEntry != null) {
+            lastSuffixLeadLabel = lastPTEntry.floorLeadEnd;
+          }
+
+          savePrefix(prefixLength, nextFloorLeadLabel, lastSuffixLeadLabel);
+          pendingCount = 0;
+
+          prefixCount++;
+          nextFloorLeadLabel = suffixLeadLabel;
+          nextBlockStart = i;
+        }
+
+        if (nextFloorLeadLabel == -1) {
+          nextFloorLeadLabel = suffixLeadLabel;
+          //if (DEBUG) System.out.println("set first lead label=" + nextFloorLeadLabel);
+        }
+
+        lastSuffixLeadLabel = suffixLeadLabel;
+      }
+      lastPTEntry = ptEntry;
+    }
+
+    // Write last block, if any:
+    if (nextBlockStart < end) {
+      //System.out.println("  lastPTEntry=" + lastPTEntry + " lastSuffixLeadLabel=" + lastSuffixLeadLabel);
+      if (lastPTEntry != null) {
+        lastSuffixLeadLabel = lastPTEntry.floorLeadEnd;
+      }
+      assert lastSuffixLeadLabel >= nextFloorLeadLabel: "lastSuffixLeadLabel=" + lastSuffixLeadLabel + " nextFloorLeadLabel=" + nextFloorLeadLabel;
+      if (prefixCount == 0) {
+        if (prefixLength > 0) {
+          savePrefix(prefixLength, -2, 0xff);
+          prefixCount++;
+        } else {
+          // Don't add a prefix term for all terms in the index!
+        }
+      } else {
+        if (lastSuffixLeadLabel == -2) {
+          // Special case when closing the empty string root block:
+          lastSuffixLeadLabel = 0xff;
+        }
+        savePrefix(prefixLength, nextFloorLeadLabel, lastSuffixLeadLabel);
+        prefixCount++;
+      }
+    }
+
+    // Remove slice from the top of the pending stack, that we just wrote:
+    int sizeToClear = count;
+    if (prefixCount > 1) {
+      Object o = pending.get(pending.size()-count);
+      if (o instanceof byte[] && ((byte[]) o).length == prefixLength) {
+        // If we were just asked to write all f* terms, but there were too many and so we made floor blocks, the exact term 'f' will remain
+        // as its own item, followed by floor block terms like f[a-m]*, f[n-z]*, so in this case we leave 3 (not 2) items on the pending stack:
+        sizeToClear--;
+      }
+    }
+    pending.subList(pending.size()-sizeToClear, pending.size()).clear();
+
+    // Append prefix terms for each prefix, since these count like real terms that also need to be "rolled up":
+    for(int i=0;i<prefixCount;i++) {
+      PrefixTerm pt = prefixes.get(prefixes.size()-(prefixCount-i));
+      pending.add(pt);
+    }
+  }
+
+  private void savePrefix(int prefixLength, int floorLeadStart, int floorLeadEnd) {
+    byte[] prefix = new byte[prefixLength];
+    System.arraycopy(lastTerm.bytes(), 0, prefix, 0, prefixLength);
+    assert floorLeadStart != -1;
+    assert floorLeadEnd != -1;
+
+    PrefixTerm pt = new PrefixTerm(prefix, floorLeadStart, floorLeadEnd); 
+    //if (DEBUG2) System.out.println("    savePrefix: seg=" + segment + " " + pt);
+    prefixes.add(pt);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
index 777356e..2bed20d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
@@ -90,8 +90,11 @@
   /** Initial terms format. */
   public static final int VERSION_START = 0;
 
+  /** Auto-prefix terms. */
+  public static final int VERSION_AUTO_PREFIX_TERMS = 1;
+
   /** Current terms format. */
-  public static final int VERSION_CURRENT = VERSION_START;
+  public static final int VERSION_CURRENT = VERSION_AUTO_PREFIX_TERMS;
 
   /** Extension of terms index file */
   static final String TERMS_INDEX_EXTENSION = "tip";
@@ -116,7 +119,7 @@
 
   final String segment;
   
-  private final int version;
+  final int version;
 
   /** Sole constructor. */
   public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
index 366be20..6fd9292 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
@@ -25,6 +25,8 @@
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.PostingsWriterBase;
+import org.apache.lucene.codecs.blocktree.AutoPrefixTermsWriter.PrefixTerm;
+import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
@@ -87,6 +89,16 @@
  * stride) each term's metadata for each set of terms
  * between two index terms.
  * <p>
+ *
+ * If {@code minItemsInAutoPrefix} is not zero, then for
+ * {@link IndexOptions#DOCS} fields we detect prefixes that match
+ * "enough" terms and insert auto-prefix terms into the index, which are
+ * used by {@link Terms#intersect}  at search time to speed up prefix
+ * and range queries.  Besides {@link Terms#intersect}, these
+ * auto-prefix terms are invisible to all other APIs (don't change terms
+ * stats, don't show up in {@link TermsEnum}, etc.).
+ * <p>
+ *
  * Files:
  * <ul>
  *   <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
@@ -200,7 +212,9 @@
    *  #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. */
   public final static int DEFAULT_MAX_BLOCK_SIZE = 48;
 
-  // public final static boolean DEBUG = false;
+  //public static boolean DEBUG = true;
+  //public static boolean DEBUG2 = true;
+
   //private final static boolean SAVE_DOT_FILES = false;
 
   private final IndexOutput termsOut;
@@ -208,6 +222,8 @@
   final int maxDoc;
   final int minItemsInBlock;
   final int maxItemsInBlock;
+  final int minItemsInAutoPrefix;
+  final int maxItemsInAutoPrefix;
 
   final PostingsWriterBase postingsWriter;
   final FieldInfos fieldInfos;
@@ -243,34 +259,70 @@
 
   private final List<FieldMetaData> fields = new ArrayList<>();
 
-  // private final String segment;
+  private final String segment;
+
+  final FixedBitSet prefixDocs;
+
+  /** Reused in getAutoPrefixTermsEnum: */
+  final FixedBitsTermsEnum prefixFixedBitsTermsEnum;
+
+  /** Reused in getAutoPrefixTermsEnum: */
+  private TermsEnum prefixTermsEnum;
+
+  /** Reused in getAutoPrefixTermsEnum: */
+  private DocsEnum prefixDocsEnum;
+
+  /** Create a new writer, using default values for auto-prefix terms. */
+  public BlockTreeTermsWriter(
+                              SegmentWriteState state,
+                              PostingsWriterBase postingsWriter,
+                              int minItemsInBlock,
+                              int maxItemsInBlock) throws IOException {
+    this(state, postingsWriter, minItemsInBlock, maxItemsInBlock, 0, 0);
+  }
+
 
   /** Create a new writer.  The number of items (terms or
    *  sub-blocks) per block will aim to be between
    *  minItemsPerBlock and maxItemsPerBlock, though in some
-   *  cases the blocks may be smaller than the min. */
+   *  cases the blocks may be smaller than the min.
+   *  For DOCS_ONLY fields, this terms dictionary will
+   *  insert automatically generated prefix terms for common
+   *  prefixes, as long as each prefix matches at least
+   *  {@code minItemsInAutoPrefix} other terms or prefixes,
+   *  and at most {@code maxItemsInAutoPrefix} other terms
+   *  or prefixes.  Set {@code minItemsInAutoPrefix} to 0
+   *  to disable auto-prefix terms. */
   public BlockTreeTermsWriter(SegmentWriteState state,
                               PostingsWriterBase postingsWriter,
                               int minItemsInBlock,
-                              int maxItemsInBlock)
+                              int maxItemsInBlock,
+                              int minItemsInAutoPrefix,
+                              int maxItemsInAutoPrefix)
     throws IOException
   {
     validateSettings(minItemsInBlock, maxItemsInBlock);
+    validateAutoPrefixSettings(minItemsInAutoPrefix, maxItemsInAutoPrefix);
 
-    if (minItemsInBlock <= 1) {
-      throw new IllegalArgumentException("minItemsInBlock must be >= 2; got " + minItemsInBlock);
-    }
-    if (minItemsInBlock > maxItemsInBlock) {
-      throw new IllegalArgumentException("maxItemsInBlock must be >= minItemsInBlock; got maxItemsInBlock=" + maxItemsInBlock + " minItemsInBlock=" + minItemsInBlock);
-    }
-    if (maxItemsInBlock < 2*(minItemsInBlock-1)) {
-      throw new IllegalArgumentException("maxItemsInBlock must be at least 2*(minItemsInBlock-1); got maxItemsInBlock=" + maxItemsInBlock + " minItemsInBlock=" + minItemsInBlock);
-    }
+    this.minItemsInBlock = minItemsInBlock;
+    this.maxItemsInBlock = maxItemsInBlock;
 
     this.maxDoc = state.segmentInfo.getDocCount();
     this.fieldInfos = state.fieldInfos;
-    this.minItemsInBlock = minItemsInBlock;
-    this.maxItemsInBlock = maxItemsInBlock;
+    this.segment = state.segmentInfo.name;
+
+    if (minItemsInAutoPrefix != 0) {
+      // TODO: can we used compressed bitset instead?  that auto-upgrades if it's dense enough...
+      prefixDocs = new FixedBitSet(state.segmentInfo.getDocCount());
+      prefixFixedBitsTermsEnum = new FixedBitsTermsEnum(prefixDocs);
+    } else {
+      prefixDocs = null;
+      prefixFixedBitsTermsEnum = null;
+    }
+
+    this.minItemsInAutoPrefix = minItemsInAutoPrefix;
+    this.maxItemsInAutoPrefix = maxItemsInAutoPrefix;
+
     this.postingsWriter = postingsWriter;
 
     final String termsName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BlockTreeTermsReader.TERMS_EXTENSION);
@@ -321,33 +373,108 @@
     }
   }
 
+  public static void validateAutoPrefixSettings(int minItemsInAutoPrefix,
+                                                int maxItemsInAutoPrefix) {
+    if (minItemsInAutoPrefix != 0) {
+      if (minItemsInAutoPrefix < 2) {
+        throw new IllegalArgumentException("minItemsInAutoPrefix must be at least 2; got minItemsInAutoPrefix=" + minItemsInAutoPrefix);
+      }
+      if (minItemsInAutoPrefix > maxItemsInAutoPrefix) {
+        throw new IllegalArgumentException("maxItemsInAutoPrefix must be >= minItemsInAutoPrefix; got maxItemsInAutoPrefix=" + maxItemsInAutoPrefix + " minItemsInAutoPrefix=" + minItemsInAutoPrefix);
+      }
+      if (2*(minItemsInAutoPrefix-1) > maxItemsInAutoPrefix) {
+        throw new IllegalArgumentException("maxItemsInAutoPrefix must be at least 2*(minItemsInAutoPrefix-1); got maxItemsInAutoPrefix=" + maxItemsInAutoPrefix + " minItemsInAutoPrefix=" + minItemsInAutoPrefix);
+      }
+    } else if (maxItemsInAutoPrefix != 0) {
+      throw new IllegalArgumentException("maxItemsInAutoPrefix must be 0 (disabled) when minItemsInAutoPrefix is 0");
+    }
+  }
+
   @Override
   public void write(Fields fields) throws IOException {
+    //if (DEBUG) System.out.println("\nBTTW.write seg=" + segment);
 
     String lastField = null;
     for(String field : fields) {
       assert lastField == null || lastField.compareTo(field) < 0;
       lastField = field;
 
+      //if (DEBUG) System.out.println("\nBTTW.write seg=" + segment + " field=" + field);
       Terms terms = fields.terms(field);
       if (terms == null) {
         continue;
       }
+      FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
+
+      // First pass to find all prefix terms we should compile into the index:
+      List<PrefixTerm> prefixTerms;
+      if (minItemsInAutoPrefix != 0) {
+        //if (DEBUG) System.out.println("now write auto-prefix terms field=" + fieldInfo.name);
+
+        if (fieldInfo.getIndexOptions() != IndexOptions.DOCS) {
+          throw new IllegalStateException("ranges can only be indexed with IndexOptions.DOCS (field: " + fieldInfo.name + ")");
+        }
+        prefixTerms = new AutoPrefixTermsWriter(segment, terms, minItemsInAutoPrefix, maxItemsInAutoPrefix).prefixes;
+        //if (DEBUG) {
+        //for(PrefixTerm term : prefixTerms) {
+        //System.out.println("field=" + fieldInfo.name + " PREFIX TERM: " + term);
+        //}
+        //}
+      } else {
+        prefixTerms = null;
+      }
 
       TermsEnum termsEnum = terms.iterator(null);
-
       TermsWriter termsWriter = new TermsWriter(fieldInfos.fieldInfo(field));
+      int prefixTermUpto = 0;
       while (true) {
         BytesRef term = termsEnum.next();
+        //if (DEBUG) System.out.println("BTTW: next term " + term);
+
+        // Insert (merge sort) next prefix term(s):
+        if (prefixTerms != null) {
+          while (prefixTermUpto < prefixTerms.size() && (term == null || prefixTerms.get(prefixTermUpto).compareTo(term) <= 0)) {
+            PrefixTerm prefixTerm = prefixTerms.get(prefixTermUpto);
+            //if (DEBUG) System.out.println("seg=" + segment + " field=" + fieldInfo.name + " NOW INSERT prefix=" + prefixTerm);
+            termsWriter.write(prefixTerm.term, getAutoPrefixTermsEnum(terms, prefixTerm), prefixTerm);
+            prefixTermUpto++;
+          }
+        }
+
         if (term == null) {
           break;
         }
-        termsWriter.write(term, termsEnum);
+
+        //if (DEBUG) System.out.println("write field=" + fieldInfo.name + " term=" + brToString(term));
+        termsWriter.write(term, termsEnum, null);
       }
 
+      assert prefixTerms == null || prefixTermUpto == prefixTerms.size();
+
       termsWriter.finish();
+
+      //if (DEBUG) System.out.println("\nBTTW.write done seg=" + segment + " field=" + field);
     }
   }
+
+  private TermsEnum getAutoPrefixTermsEnum(Terms terms, final PrefixTerm prefix) throws IOException {
+    assert prefixDocs != null;
+    prefixDocs.clear(0, prefixDocs.length());
+
+    prefixTermsEnum = prefix.getTermsEnum(terms.iterator(prefixTermsEnum));
+
+    //System.out.println("BTTW.getAutoPrefixTE: prefix=" + prefix);
+    while (prefixTermsEnum.next() != null) {
+      //System.out.println("    got term=" + prefixTermsEnum.term().utf8ToString());
+      //termCount++;
+      prefixDocsEnum = prefixTermsEnum.docs(null, prefixDocsEnum, 0);
+      //System.out.println("got: " + prefixDocsEnum + " doc=" + prefixDocsEnum.docID() + " term=" + prefixTermsEnum.term());
+      prefixDocs.or(prefixDocsEnum);
+    }
+
+    //System.out.println("  done terms: " + prefixDocs.cardinality() + " doc seen; " + termCount + " terms seen");
+    return prefixFixedBitsTermsEnum;
+  }
   
   static long encodeOutput(long fp, boolean hasTerms, boolean isFloor) {
     assert fp < (1L << 62);
@@ -366,30 +493,38 @@
     public final byte[] termBytes;
     // stats + metadata
     public final BlockTermState state;
+    // Non-null if this is an auto-prefix-term:
+    public final PrefixTerm prefixTerm;
+    public PendingTerm other;
 
-    public PendingTerm(BytesRef term, BlockTermState state) {
+    public PendingTerm(BytesRef term, BlockTermState state, PrefixTerm prefixTerm) {
       super(true);
       this.termBytes = new byte[term.length];
       System.arraycopy(term.bytes, term.offset, termBytes, 0, term.length);
       this.state = state;
+      this.prefixTerm = prefixTerm;
     }
 
     @Override
     public String toString() {
-      return brToString(termBytes);
+      return "TERM: " + brToString(termBytes);
     }
   }
 
   // for debugging
   @SuppressWarnings("unused")
   static String brToString(BytesRef b) {
-    try {
-      return b.utf8ToString() + " " + b;
-    } catch (Throwable t) {
-      // If BytesRef isn't actually UTF8, or it's eg a
-      // prefix of UTF8 that ends mid-unicode-char, we
-      // fallback to hex:
-      return b.toString();
+    if (b == null) {
+      return "(null)";
+    } else {
+      try {
+        return b.utf8ToString() + " " + b;
+      } catch (Throwable t) {
+        // If BytesRef isn't actually UTF8, or it's eg a
+        // prefix of UTF8 that ends mid-unicode-char, we
+        // fallback to hex:
+        return b.toString();
+      }
     }
   }
 
@@ -420,7 +555,7 @@
 
     @Override
     public String toString() {
-      return "BLOCK: " + brToString(prefix);
+      return "BLOCK: prefix=" + brToString(prefix);
     }
 
     public void compileIndex(List<PendingBlock> blocks, RAMOutputStream scratchBytes, IntsRefBuilder scratchIntsRef) throws IOException {
@@ -503,6 +638,8 @@
   private final RAMOutputStream scratchBytes = new RAMOutputStream();
   private final IntsRefBuilder scratchIntsRef = new IntsRefBuilder();
 
+  static final BytesRef EMPTY_BYTES_REF = new BytesRef();
+
   class TermsWriter {
     private final FieldInfo fieldInfo;
     private final int longsSize;
@@ -539,14 +676,11 @@
 
       assert count > 0;
 
-      /*
-      if (DEBUG) {
-        BytesRef br = new BytesRef(lastTerm.bytes);
-        br.offset = lastTerm.offset;
-        br.length = prefixLength;
-        System.out.println("writeBlocks: " + br.utf8ToString() + " count=" + count);
-      }
-      */
+      //if (DEBUG2) {
+      //  BytesRef br = new BytesRef(lastTerm.bytes());
+      //  br.length = prefixLength;
+      //  System.out.println("writeBlocks: seg=" + segment + " prefix=" + brToString(br) + " count=" + count);
+      //}
 
       // Root block better write all remaining pending entries:
       assert prefixLength > 0 || count == pending.size();
@@ -557,6 +691,7 @@
       // only points to sub-blocks in the terms index so we can avoid seeking
       // to it when we are looking for a term):
       boolean hasTerms = false;
+      boolean hasPrefixTerms = false;
       boolean hasSubBlocks = false;
 
       int start = pending.size()-count;
@@ -576,7 +711,7 @@
             // Suffix is 0, i.e. prefix 'foo' and term is
             // 'foo' so the term has empty string suffix
             // in this block
-            assert lastSuffixLeadLabel == -1;
+            assert lastSuffixLeadLabel == -1: "i=" + i + " lastSuffixLeadLabel=" + lastSuffixLeadLabel;
             suffixLeadLabel = -1;
           } else {
             suffixLeadLabel = term.termBytes[prefixLength] & 0xff;
@@ -597,10 +732,11 @@
             // block as soon as we have at least minItemsInBlock.  This is not always best: it often produces
             // a too-small block as the final block:
             boolean isFloor = itemsInBlock < count;
-            newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, i, hasTerms, hasSubBlocks));
+            newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, i, hasTerms, hasPrefixTerms, hasSubBlocks));
 
             hasTerms = false;
             hasSubBlocks = false;
+            hasPrefixTerms = false;
             nextFloorLeadLabel = suffixLeadLabel;
             nextBlockStart = i;
           }
@@ -610,6 +746,7 @@
 
         if (ent.isTerm) {
           hasTerms = true;
+          hasPrefixTerms |= ((PendingTerm) ent).prefixTerm != null;
         } else {
           hasSubBlocks = true;
         }
@@ -619,7 +756,7 @@
       if (nextBlockStart < end) {
         int itemsInBlock = end - nextBlockStart;
         boolean isFloor = itemsInBlock < count;
-        newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, end, hasTerms, hasSubBlocks));
+        newBlocks.add(writeBlock(prefixLength, isFloor, nextFloorLeadLabel, nextBlockStart, end, hasTerms, hasPrefixTerms, hasSubBlocks));
       }
 
       assert newBlocks.isEmpty() == false;
@@ -644,7 +781,8 @@
      *  were too many (more than maxItemsInBlock) entries sharing the
      *  same prefix, and so we broke it into multiple floor blocks where
      *  we record the starting label of the suffix of each floor block. */
-    private PendingBlock writeBlock(int prefixLength, boolean isFloor, int floorLeadLabel, int start, int end, boolean hasTerms, boolean hasSubBlocks) throws IOException {
+    private PendingBlock writeBlock(int prefixLength, boolean isFloor, int floorLeadLabel, int start, int end,
+                                    boolean hasTerms, boolean hasPrefixTerms, boolean hasSubBlocks) throws IOException {
 
       assert end > start;
 
@@ -656,6 +794,8 @@
       System.arraycopy(lastTerm.get().bytes, 0, prefix.bytes, 0, prefixLength);
       prefix.length = prefixLength;
 
+      //if (DEBUG2) System.out.println("  writeBlock seg=" + segment + " prefixLength=" + prefixLength + " entCount=" + (end-start) + " field=" + fieldInfo.name + " prefix=" + brToString(prefix) + " fp=" + startFP + " isFloor=" + isFloor + " isLastInFloor=" + (end == pending.size()) + " floorLeadLabel=" + floorLeadLabel + " start=" + start + " end=" + end + " hasTerms=" + hasTerms + " hasSubBlocks=" + hasSubBlocks + " hasPrefixTerms=" + hasPrefixTerms + " pending.size()=" + pending.size());
+
       // Write block header:
       int numEntries = end - start;
       int code = numEntries << 1;
@@ -665,42 +805,39 @@
       }
       termsOut.writeVInt(code);
 
-      /*
-      if (DEBUG) {
-        System.out.println("  writeBlock " + (isFloor ? "(floor) " : "") + "seg=" + segment + " pending.size()=" + pending.size() + " prefixLength=" + prefixLength + " indexPrefix=" + brToString(prefix) + " entCount=" + (end-start+1) + " startFP=" + startFP + (isFloor ? (" floorLeadLabel=" + Integer.toHexString(floorLeadLabel)) : ""));
-      }
-      */
-
       // 1st pass: pack term suffix bytes into byte[] blob
       // TODO: cutover to bulk int codec... simple64?
 
       // We optimize the leaf block case (block has only terms), writing a more
       // compact format in this case:
-      boolean isLeafBlock = hasSubBlocks == false;
+      boolean isLeafBlock = hasSubBlocks == false && hasPrefixTerms == false;
+
+      //System.out.println("  isLeaf=" + isLeafBlock);
 
       final List<FST<BytesRef>> subIndices;
 
       boolean absolute = true;
 
       if (isLeafBlock) {
-        // Only terms:
+        // Block contains only ordinary terms:
         subIndices = null;
         for (int i=start;i<end;i++) {
           PendingEntry ent = pending.get(i);
           assert ent.isTerm: "i=" + i;
 
           PendingTerm term = (PendingTerm) ent;
+          assert term.prefixTerm == null;
+
           assert StringHelper.startsWith(term.termBytes, prefix): "term.term=" + term.termBytes + " prefix=" + prefix;
           BlockTermState state = term.state;
           final int suffix = term.termBytes.length - prefixLength;
-          /*
-          if (DEBUG) {
-            BytesRef suffixBytes = new BytesRef(suffix);
-            System.arraycopy(term.termBytes, prefixLength, suffixBytes.bytes, 0, suffix);
-            suffixBytes.length = suffix;
-            System.out.println("    write term suffix=" + brToString(suffixBytes));
-          }
-          */
+          //if (DEBUG2) {
+          //  BytesRef suffixBytes = new BytesRef(suffix);
+          //  System.arraycopy(term.termBytes, prefixLength, suffixBytes.bytes, 0, suffix);
+          //  suffixBytes.length = suffix;
+          //  System.out.println("    write term suffix=" + brToString(suffixBytes));
+          //}
+
           // For leaf block we write suffix straight
           suffixWriter.writeVInt(suffix);
           suffixWriter.writeBytes(term.termBytes, prefixLength, suffix);
@@ -724,27 +861,52 @@
           absolute = false;
         }
       } else {
-        // Mixed terms and sub-blocks:
+        // Block has at least one prefix term or a sub block:
         subIndices = new ArrayList<>();
+        boolean sawAutoPrefixTerm = false;
         for (int i=start;i<end;i++) {
           PendingEntry ent = pending.get(i);
           if (ent.isTerm) {
             PendingTerm term = (PendingTerm) ent;
+
             assert StringHelper.startsWith(term.termBytes, prefix): "term.term=" + term.termBytes + " prefix=" + prefix;
             BlockTermState state = term.state;
             final int suffix = term.termBytes.length - prefixLength;
-            /*
-            if (DEBUG) {
-              BytesRef suffixBytes = new BytesRef(suffix);
-              System.arraycopy(term.termBytes, prefixLength, suffixBytes.bytes, 0, suffix);
-              suffixBytes.length = suffix;
-              System.out.println("    write term suffix=" + brToString(suffixBytes));
-            }
-            */
+            //if (DEBUG2) {
+            //  BytesRef suffixBytes = new BytesRef(suffix);
+            //  System.arraycopy(term.termBytes, prefixLength, suffixBytes.bytes, 0, suffix);
+            //  suffixBytes.length = suffix;
+            //  System.out.println("      write term suffix=" + brToString(suffixBytes));
+            //  if (term.prefixTerm != null) {
+            //    System.out.println("        ** auto-prefix term: " + term.prefixTerm);
+            //  }
+            //}
+
             // For non-leaf block we borrow 1 bit to record
-            // if entry is term or sub-block
-            suffixWriter.writeVInt(suffix<<1);
+            // if entry is term or sub-block, and 1 bit to record if
+            // it's a prefix term.  Terms cannot be larger than ~32 KB
+            // so we won't run out of bits:
+            code = suffix<<2;
+            int floorLeadEnd = -1;
+            if (term.prefixTerm != null) {
+              sawAutoPrefixTerm = true;
+              PrefixTerm prefixTerm = term.prefixTerm;
+              floorLeadEnd = prefixTerm.floorLeadEnd;
+              assert floorLeadEnd != -1;
+
+              if (prefixTerm.floorLeadStart == -2) {
+                // Starts with empty string
+                code |= 2;
+              } else {
+                code |= 3;
+              }
+            }
+            //if (DEBUG) System.out.println("    write suffix @ pos=" + suffixWriter.getFilePointer());
+            suffixWriter.writeVInt(code);
             suffixWriter.writeBytes(term.termBytes, prefixLength, suffix);
+            if (floorLeadEnd != -1) {
+              suffixWriter.writeByte((byte) floorLeadEnd);
+            }
             assert floorLeadLabel == -1 || (term.termBytes[prefixLength] & 0xff) >= floorLeadLabel;
 
             // Write term stats, to separate byte[] blob:
@@ -775,33 +937,32 @@
             PendingBlock block = (PendingBlock) ent;
             assert StringHelper.startsWith(block.prefix, prefix);
             final int suffix = block.prefix.length - prefixLength;
+            assert StringHelper.startsWith(block.prefix, prefix);
 
             assert suffix > 0;
 
             // For non-leaf block we borrow 1 bit to record
-            // if entry is term or sub-block
-            suffixWriter.writeVInt((suffix<<1)|1);
+            // if entry is term or sub-block, and 1 bit (unset here) to
+            // record if it's a prefix term:
+            suffixWriter.writeVInt((suffix<<2)|1);
             suffixWriter.writeBytes(block.prefix.bytes, prefixLength, suffix);
 
-            assert floorLeadLabel == -1 || (block.prefix.bytes[prefixLength] & 0xff) >= floorLeadLabel;
+            //if (DEBUG2) {
+            //  BytesRef suffixBytes = new BytesRef(suffix);
+            //  System.arraycopy(block.prefix.bytes, prefixLength, suffixBytes.bytes, 0, suffix);
+            //  suffixBytes.length = suffix;
+            //  System.out.println("      write sub-block suffix=" + brToString(suffixBytes) + " subFP=" + block.fp + " subCode=" + (startFP-block.fp) + " floor=" + block.isFloor);
+            //}
 
+            assert floorLeadLabel == -1 || (block.prefix.bytes[prefixLength] & 0xff) >= floorLeadLabel: "floorLeadLabel=" + floorLeadLabel + " suffixLead=" + (block.prefix.bytes[prefixLength] & 0xff);
             assert block.fp < startFP;
 
-            /*
-            if (DEBUG) {
-              BytesRef suffixBytes = new BytesRef(suffix);
-              System.arraycopy(block.prefix.bytes, prefixLength, suffixBytes.bytes, 0, suffix);
-              suffixBytes.length = suffix;
-              System.out.println("    write sub-block suffix=" + brToString(suffixBytes) + " subFP=" + block.fp + " subCode=" + (startFP-block.fp) + " floor=" + block.isFloor);
-            }
-            */
-
             suffixWriter.writeVLong(startFP - block.fp);
             subIndices.add(block.index);
           }
         }
 
-        assert subIndices.size() != 0;
+        assert subIndices.size() != 0 || sawAutoPrefixTerm;
       }
 
       // TODO: we could block-write the term suffix pointers;
@@ -845,30 +1006,36 @@
     }
     
     /** Writes one term's worth of postings. */
-    public void write(BytesRef text, TermsEnum termsEnum) throws IOException {
-      /*
-      if (DEBUG) {
-        int[] tmp = new int[lastTerm.length];
-        System.arraycopy(prefixStarts, 0, tmp, 0, tmp.length);
-        System.out.println("BTTW: write term=" + brToString(text) + " prefixStarts=" + Arrays.toString(tmp) + " pending.size()=" + pending.size());
-      }
-      */
+    public void write(BytesRef text, TermsEnum termsEnum, PrefixTerm prefixTerm) throws IOException {
+      //if (DEBUG) {
+      //  int[] tmp = new int[lastTerm.length()];
+      //  System.arraycopy(prefixStarts, 0, tmp, 0, tmp.length);
+      //  System.out.println("BTTW: write term=" + brToString(text) + " prefixTerm=" + prefixTerm + " prefixStarts=" + Arrays.toString(tmp) + " pending.size()=" + pending.size());
+      //}
 
       BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
       if (state != null) {
+
         assert state.docFreq != 0;
         assert fieldInfo.getIndexOptions() == IndexOptions.DOCS || state.totalTermFreq >= state.docFreq: "postingsWriter=" + postingsWriter;
-        sumDocFreq += state.docFreq;
-        sumTotalTermFreq += state.totalTermFreq;
         pushTerm(text);
        
-        PendingTerm term = new PendingTerm(text, state);
+        PendingTerm term = new PendingTerm(text, state, prefixTerm);
         pending.add(term);
-        numTerms++;
-        if (firstPendingTerm == null) {
-          firstPendingTerm = term;
+
+        //if (DEBUG) System.out.println("    add pending term = " + text + " pending.size()=" + pending.size());
+
+        if (prefixTerm == null) {
+          sumDocFreq += state.docFreq;
+          sumTotalTermFreq += state.totalTermFreq;
+          numTerms++;
+          if (firstPendingTerm == null) {
+            firstPendingTerm = term;
+          }
+          lastPendingTerm = term;
         }
-        lastPendingTerm = term;
+      //} else {
+      //  if (DEBUG) System.out.println("  skip: null term state");
       }
     }
 
@@ -920,6 +1087,7 @@
         // TODO: if pending.size() is already 1 with a non-zero prefix length
         // we can save writing a "degenerate" root block, but we have to
         // fix all the places that assume the root block's prefix is the empty string:
+        pushTerm(new BytesRef());
         writeBlocks(0, pending.size());
 
         // We better have one final "root" block:
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
index e7b051e..54e86d7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
@@ -41,6 +41,8 @@
  */
 public final class FieldReader extends Terms implements Accountable {
 
+  // private final boolean DEBUG = BlockTreeTermsWriter.DEBUG;
+
   private static final long BASE_RAM_BYTES_USED =
       RamUsageEstimator.shallowSizeOfInstance(FieldReader.class)
       + 3 * RamUsageEstimator.shallowSizeOfInstance(BytesRef.class);
@@ -125,6 +127,7 @@
   /** For debugging -- used by CheckIndex too*/
   @Override
   public Stats getStats() throws IOException {
+    // TODO: add auto-prefix terms into stats
     return new SegmentTermsEnum(this).computeBlockStats();
   }
 
@@ -175,10 +178,11 @@
 
   @Override
   public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
-    if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
-      throw new IllegalArgumentException("please use CompiledAutomaton.getTermsEnum instead");
-    }
-    return new IntersectTermsEnum(this, compiled, startTerm);
+    // if (DEBUG) System.out.println("  FieldReader.intersect startTerm=" + BlockTreeTermsWriter.brToString(startTerm));
+    //System.out.println("intersect: " + compiled.type + " a=" + compiled.automaton);
+    // TODO: we could push "it's a range" or "it's a prefix" down into IntersectTermsEnum?
+    // can we optimize knowing that...?
+    return new IntersectTermsEnum(this, compiled.automaton, compiled.runAutomaton, compiled.commonSuffixRef, startTerm, compiled.sinkState);
   }
     
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsDocsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsDocsEnum.java
new file mode 100644
index 0000000..6435410
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsDocsEnum.java
@@ -0,0 +1,66 @@
+package org.apache.lucene.codecs.blocktree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.BitSetIterator;
+import org.apache.lucene.util.FixedBitSet;
+
+/** Takes a {@link FixedBitSet} and creates a DOCS_ONLY {@link DocsEnum} from it. */
+
+class FixedBitsDocsEnum extends DocsEnum {
+  private final FixedBitSet bits;
+  private DocIdSetIterator in;
+  
+  FixedBitsDocsEnum(FixedBitSet bits) {
+    this.bits = bits;
+    reset();
+  }
+
+  @Override
+  public int freq() throws IOException {
+    return 1;
+  }
+
+  @Override
+  public int docID() {
+    return in.docID();
+  }
+
+  @Override
+  public int nextDoc() throws IOException {
+    return in.nextDoc();
+  }
+
+  @Override
+  public int advance(int target) throws IOException {
+    return in.advance(target);
+  }
+
+  @Override
+  public long cost() {
+    return in.cost();
+  }
+  
+  void reset() {
+    in = new BitSetIterator(bits, 0);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsTermsEnum.java
new file mode 100644
index 0000000..0443784
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FixedBitsTermsEnum.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.codecs.blocktree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.codecs.PostingsWriterBase;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+
+/** Silly stub class, used only when writing an auto-prefix
+ *  term in order to expose DocsEnum over a FixedBitSet.  We
+ *  pass this to {@link PostingsWriterBase#writeTerm} so 
+ *  that it can pull .docs() multiple times for the
+ *  current term. */
+
+class FixedBitsTermsEnum extends TermsEnum {
+  private final FixedBitsDocsEnum docsEnum;
+
+  public FixedBitsTermsEnum(FixedBitSet docs) {
+    docsEnum = new FixedBitsDocsEnum(docs);
+  }
+
+  @Override
+  public SeekStatus seekCeil(BytesRef text) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void seekExact(long ord) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public BytesRef term() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public BytesRef next() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public long ord() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int docFreq() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public long totalTermFreq() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+    if (liveDocs != null) {
+      throw new IllegalArgumentException("cannot handle live docs");
+    }
+    docsEnum.reset();
+    return docsEnum;
+  }
+
+  @Override
+  public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+    // We only work with DOCS_ONLY fields
+    return null;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 952d226..28e40ab 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
@@ -30,23 +31,38 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.automaton.CompiledAutomaton;
+import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.automaton.RunAutomaton;
+import org.apache.lucene.util.automaton.Transition;
 import org.apache.lucene.util.fst.ByteSequenceOutputs;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.Outputs;
 
-// NOTE: cannot seek!
+/** This is used to implement efficient {@link Terms#intersect} for
+ *  block-tree.  Note that it cannot seek, except for the initial term on
+ *  init.  It just "nexts" through the intersection of the automaton and
+ *  the terms.  It does not use the terms index at all: on init, it
+ *  loads the root block, and scans its way to the initial term.
+ *  Likewise, in next it scans until it finds a term that matches the
+ *  current automaton transition.  If the index has auto-prefix terms
+ *  (only for DOCS_ONLY fields currently) it will visit these terms
+ *  when possible and then skip the real terms that auto-prefix term
+ *  matched. */
+
 final class IntersectTermsEnum extends TermsEnum {
+
+  //static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
+
   final IndexInput in;
   final static Outputs<BytesRef> fstOutputs = ByteSequenceOutputs.getSingleton();
 
-  private IntersectTermsEnumFrame[] stack;
+  IntersectTermsEnumFrame[] stack;
       
   @SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc<BytesRef>[] arcs = new FST.Arc[5];
 
   final RunAutomaton runAutomaton;
-  final CompiledAutomaton compiledAutomaton;
+  final Automaton automaton;
+  final BytesRef commonSuffix;
 
   private IntersectTermsEnumFrame currentFrame;
 
@@ -54,19 +70,33 @@
 
   private final FST.BytesReader fstReader;
 
+  private final boolean allowAutoPrefixTerms;
+
   final FieldReader fr;
 
+  /** Which state in the automaton accepts all possible suffixes. */
+  private final int sinkState;
+
   private BytesRef savedStartTerm;
       
+  /** True if we did return the current auto-prefix term */
+  private boolean useAutoPrefixTerm;
+
   // TODO: in some cases we can filter by length?  eg
   // regexp foo*bar must be at least length 6 bytes
-  public IntersectTermsEnum(FieldReader fr, CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
-    // if (DEBUG) {
-    //   System.out.println("\nintEnum.init seg=" + segment + " commonSuffix=" + brToString(compiled.commonSuffixRef));
-    // }
+  public IntersectTermsEnum(FieldReader fr, Automaton automaton, RunAutomaton runAutomaton, BytesRef commonSuffix, BytesRef startTerm, int sinkState) throws IOException {
+    //if (DEBUG) System.out.println("\nintEnum.init seg=" + fr.parent.segment + " commonSuffix=" + commonSuffix);
     this.fr = fr;
-    runAutomaton = compiled.runAutomaton;
-    compiledAutomaton = compiled;
+    this.sinkState = sinkState;
+
+    assert automaton != null;
+    assert runAutomaton != null;
+
+    //if (DEBUG) System.out.println("sinkState=" + sinkState + " AUTOMATON:\n" + automaton.toDot());
+    this.runAutomaton = runAutomaton;
+    this.allowAutoPrefixTerms = sinkState != -1;
+    this.automaton = automaton;
+    this.commonSuffix = commonSuffix;
     in = fr.parent.termsIn.clone();
     stack = new IntersectTermsEnumFrame[5];
     for(int idx=0;idx<stack.length;idx++) {
@@ -154,7 +184,7 @@
         
     f.fp = f.fpOrig = currentFrame.lastSubFP;
     f.prefix = currentFrame.prefix + currentFrame.suffix;
-    // if (DEBUG) System.out.println("    pushFrame state=" + state + " prefix=" + f.prefix);
+    //if (DEBUG) System.out.println("    pushFrame state=" + state + " prefix=" + f.prefix);
     f.setState(state);
 
     // Walk the arc through the index -- we only
@@ -233,7 +263,7 @@
   // arbitrary seekExact/Ceil.  Note that this is a
   // seekFloor!
   private void seekToStartTerm(BytesRef target) throws IOException {
-    //if (DEBUG) System.out.println("seek to startTerm=" + target.utf8ToString());
+    //if (DEBUG) System.out.println("seek to startTerm=" + target.utf8ToString() + " length=" + target.length);
     assert currentFrame.ord == 0;
     if (term.length < target.length) {
       term.bytes = ArrayUtil.grow(term.bytes, target.length);
@@ -242,23 +272,29 @@
     assert arc == currentFrame.arc;
 
     for(int idx=0;idx<=target.length;idx++) {
+      //if (DEBUG) System.out.println("cycle idx=" + idx);
 
       while (true) {
+        final int savNextEnt = currentFrame.nextEnt;
         final int savePos = currentFrame.suffixesReader.getPosition();
         final int saveStartBytePos = currentFrame.startBytePos;
         final int saveSuffix = currentFrame.suffix;
         final long saveLastSubFP = currentFrame.lastSubFP;
         final int saveTermBlockOrd = currentFrame.termState.termBlockOrd;
+        final boolean saveIsAutoPrefixTerm = currentFrame.isAutoPrefixTerm;
+
+        //if (DEBUG) System.out.println("    cycle isAutoPrefix=" + saveIsAutoPrefixTerm + " ent=" + currentFrame.nextEnt + " (of " + currentFrame.entCount + ") prefix=" + currentFrame.prefix + " suffix=" + currentFrame.suffix + " firstLabel=" + (currentFrame.suffix == 0 ? "" : (currentFrame.suffixBytes[currentFrame.startBytePos])&0xff));
 
         final boolean isSubBlock = currentFrame.next();
 
-        //if (DEBUG) System.out.println("    cycle ent=" + currentFrame.nextEnt + " (of " + currentFrame.entCount + ") prefix=" + currentFrame.prefix + " suffix=" + currentFrame.suffix + " isBlock=" + isSubBlock + " firstLabel=" + (currentFrame.suffix == 0 ? "" : (currentFrame.suffixBytes[currentFrame.startBytePos])&0xff));
         term.length = currentFrame.prefix + currentFrame.suffix;
         if (term.bytes.length < term.length) {
           term.bytes = ArrayUtil.grow(term.bytes, term.length);
         }
         System.arraycopy(currentFrame.suffixBytes, currentFrame.startBytePos, term.bytes, currentFrame.prefix, currentFrame.suffix);
 
+        //if (DEBUG) System.out.println("      isSubBlock=" + isSubBlock + " term/prefix=" + brToString(term) + " saveIsAutoPrefixTerm=" + saveIsAutoPrefixTerm + " allowAutoPrefixTerms=" + allowAutoPrefixTerms);
+
         if (isSubBlock && StringHelper.startsWith(target, term)) {
           // Recurse
           //if (DEBUG) System.out.println("      recurse!");
@@ -266,9 +302,11 @@
           break;
         } else {
           final int cmp = term.compareTo(target);
+          //if (DEBUG) System.out.println("      cmp=" + cmp);
           if (cmp < 0) {
             if (currentFrame.nextEnt == currentFrame.entCount) {
               if (!currentFrame.isLastInFloor) {
+                // Advance to next floor block
                 //if (DEBUG) System.out.println("  load floorBlock");
                 currentFrame.loadNextFloorBlock();
                 continue;
@@ -279,19 +317,24 @@
             }
             continue;
           } else if (cmp == 0) {
+            if (allowAutoPrefixTerms == false && currentFrame.isAutoPrefixTerm) {
+              continue;
+            }
             //if (DEBUG) System.out.println("  return term=" + brToString(term));
             return;
-          } else {
+          } else if (allowAutoPrefixTerms || currentFrame.isAutoPrefixTerm == false) {
             // Fallback to prior entry: the semantics of
             // this method is that the first call to
             // next() will return the term after the
             // requested term
-            currentFrame.nextEnt--;
+            //if (DEBUG) System.out.println("    fallback prior entry");
+            currentFrame.nextEnt = savNextEnt;
             currentFrame.lastSubFP = saveLastSubFP;
             currentFrame.startBytePos = saveStartBytePos;
             currentFrame.suffix = saveSuffix;
             currentFrame.suffixesReader.setPosition(savePos);
             currentFrame.termState.termBlockOrd = saveTermBlockOrd;
+            currentFrame.isAutoPrefixTerm = saveIsAutoPrefixTerm;
             System.arraycopy(currentFrame.suffixBytes, currentFrame.startBytePos, term.bytes, currentFrame.prefix, currentFrame.suffix);
             term.length = currentFrame.prefix + currentFrame.suffix;
             // If the last entry was a block we don't
@@ -310,77 +353,245 @@
   @Override
   public BytesRef next() throws IOException {
 
-    // if (DEBUG) {
-    //   System.out.println("\nintEnum.next seg=" + segment);
-    //   System.out.println("  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]) + " outputPrefix=" + currentFrame.outputPrefix);
-    // }
+    //if (DEBUG) {
+    //  System.out.println("\nintEnum.next seg=" + fr.parent.segment);
+    //  System.out.println("  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " outputPrefix=" + currentFrame.outputPrefix + " trans: " + currentFrame.transition + " useAutoPrefix=" + useAutoPrefixTerm);
+    //}
 
     nextTerm:
-    while(true) {
-      // Pop finished frames
-      while (currentFrame.nextEnt == currentFrame.entCount) {
-        if (!currentFrame.isLastInFloor) {
-          //if (DEBUG) System.out.println("    next-floor-block");
-          currentFrame.loadNextFloorBlock();
-          //if (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]) + " outputPrefix=" + currentFrame.outputPrefix);
-        } else {
-          //if (DEBUG) System.out.println("  pop frame");
-          if (currentFrame.ord == 0) {
-            return null;
+    while (true) {
+
+      boolean isSubBlock;
+
+      if (useAutoPrefixTerm) {
+
+        assert currentFrame.isAutoPrefixTerm;
+        useAutoPrefixTerm = false;
+        currentFrame.termState.isRealTerm = true;
+
+        //if (DEBUG) System.out.println("    now scan beyond auto-prefix term=" + brToString(term) + " floorSuffixLeadEnd=" + Integer.toHexString(currentFrame.floorSuffixLeadEnd));
+        // If we last returned an auto-prefix term, we must now skip all
+        // actual terms sharing that prefix.  At most, that skipping
+        // requires popping one frame, but it can also require simply
+        // scanning ahead within the current frame.  This scanning will
+        // skip sub-blocks that contain many terms, which is why the
+        // optimization "works":
+        int floorSuffixLeadEnd = currentFrame.floorSuffixLeadEnd;
+        if (floorSuffixLeadEnd == -1) {
+          // An ordinary prefix, e.g. foo*
+          int prefix = currentFrame.prefix;
+          int suffix = currentFrame.suffix;
+          //if (DEBUG) System.out.println("    prefix=" + prefix + " suffix=" + suffix);
+          if (suffix == 0) {
+            //if (DEBUG) System.out.println("    pop frame & nextTerm");
+
+            // Easy case: the prefix term's suffix is the empty string,
+            // meaning the prefix corresponds to all terms in the
+            // current block, so we just pop this entire block:
+            if (currentFrame.ord == 0) {
+              //if (DEBUG) System.out.println("  return null");
+              return null;
+            }
+            currentFrame = stack[currentFrame.ord-1];
+            continue nextTerm;
+          } else {
+
+            // Just next() until we hit an entry that doesn't share this
+            // prefix.  The first next should be a sub-block sharing the
+            // same prefix, because if there are enough terms matching a
+            // given prefix to warrant an auto-prefix term, then there
+            // must also be enough to make a sub-block (assuming
+            // minItemsInPrefix > minItemsInBlock):
+            scanPrefix:
+            while (true) {
+              //if (DEBUG) System.out.println("    scan next");
+              if (currentFrame.nextEnt == currentFrame.entCount) {
+                if (currentFrame.isLastInFloor == false) {
+                  currentFrame.loadNextFloorBlock();
+                } else if (currentFrame.ord == 0) {
+                  //if (DEBUG) System.out.println("  return null0");
+                  return null;
+                } else {
+                  // Pop frame, which also means we've moved beyond this
+                  // auto-prefix term:
+                  //if (DEBUG) System.out.println("  pop; nextTerm");
+                  currentFrame = stack[currentFrame.ord-1];
+                  continue nextTerm;
+                }
+              }
+              isSubBlock = currentFrame.next();
+              //if (DEBUG) {
+              //  BytesRef suffixBytes = new BytesRef(currentFrame.suffix);
+              //  System.arraycopy(currentFrame.suffixBytes, currentFrame.startBytePos, suffixBytes.bytes, 0, currentFrame.suffix);
+              //  suffixBytes.length = currentFrame.suffix;
+              //  System.out.println("      currentFrame.suffix=" + brToString(suffixBytes));
+              //}
+              for(int i=0;i<suffix;i++) {
+                if (term.bytes[prefix+i] != currentFrame.suffixBytes[currentFrame.startBytePos+i]) {
+                  //if (DEBUG) System.out.println("      done; now stop scan");
+                  break scanPrefix;
+                }
+              }
+            }
           }
-          final long lastFP = currentFrame.fpOrig;
-          currentFrame = stack[currentFrame.ord-1];
-          assert currentFrame.lastSubFP == lastFP;
-          //if (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]) + " outputPrefix=" + currentFrame.outputPrefix);
+        } else {
+          // Floor'd auto-prefix term; in this case we must skip all
+          // terms e.g. matching foo[a-m]*.  We are currently "on" fooa,
+          // which the automaton accepted (fooa* through foom*), and
+          // floorSuffixLeadEnd is m, so we must now scan to foon:
+          int prefix = currentFrame.prefix;
+          int suffix = currentFrame.suffix;
+
+          if (currentFrame.floorSuffixLeadStart == -1) {
+            suffix++;
+          }
+
+          //if (DEBUG) System.out.println("      prefix=" + prefix + " suffix=" + suffix);
+
+          if (suffix == 0) {
+
+            //if (DEBUG) System.out.println("  pop frame");
+
+            // This means current frame is fooa*, so we have to first
+            // pop the current frame, then scan in parent frame:
+            if (currentFrame.ord == 0) {
+              //if (DEBUG) System.out.println("  return null");
+              return null;
+            }
+            currentFrame = stack[currentFrame.ord-1];
+
+            // Current (parent) frame is now foo*, so now we just scan
+            // until the lead suffix byte is > floorSuffixLeadEnd
+            //assert currentFrame.prefix == prefix-1;
+            //prefix = currentFrame.prefix;
+
+            // In case when we pop, and the parent block is not just prefix-1, e.g. in block 417* on
+            // its first term = floor prefix term 41[7-9], popping to block 4*:
+            prefix = currentFrame.prefix;
+
+            suffix = term.length - currentFrame.prefix;
+          } else {
+            // No need to pop; just scan in currentFrame:
+          }
+
+          //if (DEBUG) System.out.println("    start scan: prefix=" + prefix + " suffix=" + suffix);
+
+          // Now we scan until the lead suffix byte is > floorSuffixLeadEnd
+          scanFloor:
+          while (true) {
+            //if (DEBUG) System.out.println("      scan next");
+            if (currentFrame.nextEnt == currentFrame.entCount) {
+              if (currentFrame.isLastInFloor == false) {
+                //if (DEBUG) System.out.println("      next floor block");
+                currentFrame.loadNextFloorBlock();
+              } else if (currentFrame.ord == 0) {
+                //if (DEBUG) System.out.println("  return null");
+                return null;
+              } else {
+                // Pop frame, which also means we've moved beyond this
+                // auto-prefix term:
+                currentFrame = stack[currentFrame.ord-1];
+                //if (DEBUG) System.out.println("      pop, now curFrame.prefix=" + currentFrame.prefix);
+                continue nextTerm;
+              }
+            }
+            isSubBlock = currentFrame.next();
+            //if (DEBUG) {
+            //  BytesRef suffixBytes = new BytesRef(currentFrame.suffix);
+            //  System.arraycopy(currentFrame.suffixBytes, currentFrame.startBytePos, suffixBytes.bytes, 0, currentFrame.suffix);
+            //  suffixBytes.length = currentFrame.suffix;
+            //  System.out.println("      currentFrame.suffix=" + brToString(suffixBytes));
+            //}
+            for(int i=0;i<suffix-1;i++) {
+              if (term.bytes[prefix+i] != currentFrame.suffixBytes[currentFrame.startBytePos+i]) {
+                //if (DEBUG) System.out.println("      done; now stop scan");
+                break scanFloor;
+              }
+            }
+            //if (DEBUG) {
+            //  if (currentFrame.suffix >= suffix) {
+            //    System.out.println("      cmp label=" + Integer.toHexString(currentFrame.suffixBytes[currentFrame.startBytePos+suffix-1]) + " vs " + floorSuffixLeadEnd);
+            //  }
+            //}
+            if (currentFrame.suffix >= suffix && (currentFrame.suffixBytes[currentFrame.startBytePos+suffix-1]&0xff) > floorSuffixLeadEnd) {
+              // Done scanning: we are now on the first term after all
+              // terms matched by this auto-prefix term
+              //if (DEBUG) System.out.println("      done; now stop scan");
+              break;
+            }
+          }
         }
+      } else {
+        // Pop finished frames
+        while (currentFrame.nextEnt == currentFrame.entCount) {
+          if (!currentFrame.isLastInFloor) {
+            //if (DEBUG) System.out.println("    next-floor-block: trans: " + currentFrame.transition);
+            // Advance to next floor block
+            currentFrame.loadNextFloorBlock();
+            //if (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " outputPrefix=" + currentFrame.outputPrefix);
+            break;
+          } else {
+            //if (DEBUG) System.out.println("  pop frame");
+            if (currentFrame.ord == 0) {
+              //if (DEBUG) System.out.println("  return null");
+              return null;
+            }
+            final long lastFP = currentFrame.fpOrig;
+            currentFrame = stack[currentFrame.ord-1];
+            assert currentFrame.lastSubFP == lastFP;
+            //if (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " outputPrefix=" + currentFrame.outputPrefix);
+          }
+        }
+
+        isSubBlock = currentFrame.next();
       }
 
-      final boolean isSubBlock = currentFrame.next();
-      // if (DEBUG) {
-      //   final BytesRef suffixRef = new BytesRef();
-      //   suffixRef.bytes = currentFrame.suffixBytes;
-      //   suffixRef.offset = currentFrame.startBytePos;
-      //   suffixRef.length = currentFrame.suffix;
-      //   System.out.println("    " + (isSubBlock ? "sub-block" : "term") + " " + currentFrame.nextEnt + " (of " + currentFrame.entCount + ") suffix=" + brToString(suffixRef));
-      // }
+      //if (DEBUG) {
+      //  final BytesRef suffixRef = new BytesRef();
+      //  suffixRef.bytes = currentFrame.suffixBytes;
+      //  suffixRef.offset = currentFrame.startBytePos;
+      //  suffixRef.length = currentFrame.suffix;
+      //  System.out.println("    " + (isSubBlock ? "sub-block" : "term") + " " + currentFrame.nextEnt + " (of " + currentFrame.entCount + ") suffix=" + brToString(suffixRef));
+      //}
 
       if (currentFrame.suffix != 0) {
+        // Advance where we are in the automaton to match what terms
+        // dict next'd to:
         final int label = currentFrame.suffixBytes[currentFrame.startBytePos] & 0xff;
+        //if (DEBUG) System.out.println("    move automaton to label=" + label + " vs curMax=" + currentFrame.curTransitionMax);
         while (label > currentFrame.curTransitionMax) {
           if (currentFrame.transitionIndex >= currentFrame.transitionCount-1) {
-            // Stop processing this frame -- no further
-            // matches are possible because we've moved
-            // beyond what the max transition will allow
-            //if (DEBUG) System.out.println("      break: trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]));
-
-            // sneaky!  forces a pop above
-            currentFrame.isLastInFloor = true;
-            currentFrame.nextEnt = currentFrame.entCount;
+            // Pop this frame: no further matches are possible because
+            // we've moved beyond what the max transition will allow
+            //if (DEBUG) System.out.println("      break: trans");
+            if (currentFrame.ord == 0) {
+              //if (DEBUG) System.out.println("  return null");
+              return null;
+            }
+            currentFrame = stack[currentFrame.ord-1];
             continue nextTerm;
           }
           currentFrame.transitionIndex++;
-          compiledAutomaton.automaton.getNextTransition(currentFrame.transition);
+          automaton.getNextTransition(currentFrame.transition);
           currentFrame.curTransitionMax = currentFrame.transition.max;
-          //if (DEBUG) System.out.println("      next trans=" + currentFrame.transitions[currentFrame.transitionIndex]);
+          //if (DEBUG) System.out.println("      next trans");
         }
       }
 
       // First test the common suffix, if set:
-      if (compiledAutomaton.commonSuffixRef != null && !isSubBlock) {
+      if (commonSuffix != null && !isSubBlock) {
         final int termLen = currentFrame.prefix + currentFrame.suffix;
-        if (termLen < compiledAutomaton.commonSuffixRef.length) {
+        if (termLen < commonSuffix.length) {
           // No match
-          // if (DEBUG) {
-          //   System.out.println("      skip: common suffix length");
-          // }
+          //if (DEBUG) System.out.println("      skip: common suffix length");
           continue nextTerm;
         }
 
         final byte[] suffixBytes = currentFrame.suffixBytes;
-        final byte[] commonSuffixBytes = compiledAutomaton.commonSuffixRef.bytes;
+        final byte[] commonSuffixBytes = commonSuffix.bytes;
 
-        final int lenInPrefix = compiledAutomaton.commonSuffixRef.length - currentFrame.suffix;
-        assert compiledAutomaton.commonSuffixRef.offset == 0;
+        final int lenInPrefix = commonSuffix.length - currentFrame.suffix;
+        assert commonSuffix.offset == 0;
         int suffixBytesPos;
         int commonSuffixBytesPos = 0;
 
@@ -394,24 +605,20 @@
           final int termBytesPosEnd = currentFrame.prefix;
           while (termBytesPos < termBytesPosEnd) {
             if (termBytes[termBytesPos++] != commonSuffixBytes[commonSuffixBytesPos++]) {
-              // if (DEBUG) {
-              //   System.out.println("      skip: common suffix mismatch (in prefix)");
-              // }
+              //if (DEBUG) System.out.println("      skip: common suffix mismatch (in prefix)");
               continue nextTerm;
             }
           }
           suffixBytesPos = currentFrame.startBytePos;
         } else {
-          suffixBytesPos = currentFrame.startBytePos + currentFrame.suffix - compiledAutomaton.commonSuffixRef.length;
+          suffixBytesPos = currentFrame.startBytePos + currentFrame.suffix - commonSuffix.length;
         }
 
         // Test overlapping suffix part:
-        final int commonSuffixBytesPosEnd = compiledAutomaton.commonSuffixRef.length;
+        final int commonSuffixBytesPosEnd = commonSuffix.length;
         while (commonSuffixBytesPos < commonSuffixBytesPosEnd) {
           if (suffixBytes[suffixBytesPos++] != commonSuffixBytes[commonSuffixBytesPos++]) {
-            // if (DEBUG) {
-            //   System.out.println("      skip: common suffix mismatch");
-            // }
+            //if (DEBUG) System.out.println("      skip: common suffix mismatch");
             continue nextTerm;
           }
         }
@@ -423,10 +630,17 @@
       // "temporarily" accepted, we just blindly .next()
       // until the limit
 
-      // See if the term prefix matches the automaton:
+      // TODO: for first iter of this loop can't we just use the current trans?  we already advanced it and confirmed it matches lead
+      // byte of the suffix
+
+      // See if the term suffix matches the automaton:
       int state = currentFrame.state;
+      int lastState = currentFrame.lastState;
+      //if (DEBUG) System.out.println("  a state=" + state + " curFrame.suffix.len=" + currentFrame.suffix + " curFrame.prefix=" + currentFrame.prefix);
       for (int idx=0;idx<currentFrame.suffix;idx++) {
-        state = runAutomaton.step(state,  currentFrame.suffixBytes[currentFrame.startBytePos+idx] & 0xff);
+        lastState = state;
+        //if (DEBUG) System.out.println("    step label=" + (char) (currentFrame.suffixBytes[currentFrame.startBytePos+idx] & 0xff));
+        state = runAutomaton.step(state, currentFrame.suffixBytes[currentFrame.startBytePos+idx] & 0xff);
         if (state == -1) {
           // No match
           //System.out.println("    no s=" + state);
@@ -436,16 +650,59 @@
         }
       }
 
+      //if (DEBUG) System.out.println("    after suffix: state=" + state + " lastState=" + lastState);
+
       if (isSubBlock) {
         // Match!  Recurse:
         //if (DEBUG) System.out.println("      sub-block match to state=" + state + "; recurse fp=" + currentFrame.lastSubFP);
         copyTerm();
         currentFrame = pushFrame(state);
-        //if (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]) + " outputPrefix=" + currentFrame.outputPrefix);
+        currentFrame.lastState = lastState;
+        //xif (DEBUG) System.out.println("\n  frame ord=" + currentFrame.ord + " prefix=" + brToString(new BytesRef(term.bytes, term.offset, currentFrame.prefix)) + " state=" + currentFrame.state + " lastInFloor?=" + currentFrame.isLastInFloor + " fp=" + currentFrame.fp + " trans=" + (currentFrame.transitions.length == 0 ? "n/a" : currentFrame.transitions[currentFrame.transitionIndex]) + " outputPrefix=" + currentFrame.outputPrefix);
+      } else if (currentFrame.isAutoPrefixTerm) {
+        // We are on an auto-prefix term, meaning this term was compiled
+        // at indexing time, matching all terms sharing this prefix (or,
+        // a floor'd subset of them if that count was too high).  A
+        // prefix term represents a range of terms, so we now need to
+        // test whether, from the current state in the automaton, it
+        // accepts all terms in that range.  As long as it does, we can
+        // use this term and then later skip ahead past all terms in
+        // this range:
+        if (allowAutoPrefixTerms) {
+
+          if (currentFrame.floorSuffixLeadEnd == -1) {
+            // Simple prefix case
+            useAutoPrefixTerm = state == sinkState;
+          } else {
+            if (currentFrame.floorSuffixLeadStart == -1) {
+              // Must also accept the empty string in this case
+              if (automaton.isAccept(state)) {
+                //if (DEBUG) System.out.println("      state is accept");
+                useAutoPrefixTerm = acceptsSuffixRange(state, 0, currentFrame.floorSuffixLeadEnd);
+              }
+            } else {
+              useAutoPrefixTerm = acceptsSuffixRange(lastState, currentFrame.floorSuffixLeadStart, currentFrame.floorSuffixLeadEnd);
+            }
+          }
+
+          //if (DEBUG) System.out.println("  useAutoPrefixTerm=" + useAutoPrefixTerm);
+
+          if (useAutoPrefixTerm) {
+            copyTerm();
+            currentFrame.termState.isRealTerm = false;
+            //if (DEBUG) System.out.println("  return auto prefix term: " + brToString(term));
+            return term;
+          } else {
+            // We move onto the next term
+          }
+        } else {
+          // We are not allowed to use auto-prefix terms, so we just skip it
+        }
       } else if (runAutomaton.isAccept(state)) {
         copyTerm();
-        //if (DEBUG) System.out.println("      term match to state=" + state + "; return term=" + brToString(term));
+        //if (DEBUG) System.out.println("      term match to state=" + state);
         assert savedStartTerm == null || term.compareTo(savedStartTerm) > 0: "saveStartTerm=" + savedStartTerm.utf8ToString() + " term=" + term.utf8ToString();
+        //if (DEBUG) System.out.println("      return term=" + brToString(term));
         return term;
       } else {
         //System.out.println("    no s=" + state);
@@ -453,6 +710,41 @@
     }
   }
 
+  private final Transition transition = new Transition();
+
+  /** Returns true if, from this state, the automaton accepts any suffix
+   *  starting with a label between start and end, inclusive.  We just
+   *  look for a transition, matching this range, to the sink state.  */
+  private boolean acceptsSuffixRange(int state, int start, int end) {
+
+    //xif (DEBUG) System.out.println("    acceptsSuffixRange state=" + state + " start=" + start + " end=" + end);
+
+    int count = automaton.initTransition(state, transition);
+    //xif (DEBUG) System.out.println("      transCount=" + count);
+    //xif (DEBUG) System.out.println("      trans=" + transition);
+    for(int i=0;i<count;i++) {
+      automaton.getNextTransition(transition);
+      if (start >= transition.min && end <= transition.max && transition.dest == sinkState) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+  // for debugging
+  @SuppressWarnings("unused")
+  static String brToString(BytesRef b) {
+    try {
+      return b.utf8ToString() + " " + b;
+    } catch (Throwable t) {
+      // If BytesRef isn't actually UTF8, or it's eg a
+      // prefix of UTF8 that ends mid-unicode-char, we
+      // fallback to hex:
+      return b.toString();
+    }
+  }
+
   private void copyTerm() {
     //System.out.println("      copyTerm cur.prefix=" + currentFrame.prefix + " cur.suffix=" + currentFrame.suffix + " first=" + (char) currentFrame.suffixBytes[currentFrame.startBytePos]);
     final int len = currentFrame.prefix + currentFrame.suffix;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
index 06ab6aa..c361255 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
@@ -35,9 +35,14 @@
   long fpEnd;
   long lastSubFP;
 
+  // private static boolean DEBUG = IntersectTermsEnum.DEBUG;
+
   // State in automaton
   int state;
 
+  // State just before the last label
+  int lastState;
+
   int metaDataUpto;
 
   byte[] suffixBytes = new byte[128];
@@ -73,6 +78,8 @@
   int transitionIndex;
   int transitionCount;
 
+  final boolean versionAutoPrefix;
+
   FST.Arc<BytesRef> arc;
 
   final BlockTermState termState;
@@ -89,6 +96,17 @@
   int startBytePos;
   int suffix;
 
+  // When we are on an auto-prefix term this is the starting lead byte
+  // of the suffix (e.g. 'a' for the foo[a-m]* case):
+  int floorSuffixLeadStart;
+
+  // When we are on an auto-prefix term this is the ending lead byte
+  // of the suffix (e.g. 'm' for the foo[a-m]* case):
+  int floorSuffixLeadEnd;
+
+  // True if the term we are currently on is an auto-prefix term:
+  boolean isAutoPrefixTerm;
+
   private final IntersectTermsEnum ite;
 
   public IntersectTermsEnumFrame(IntersectTermsEnum ite, int ord) throws IOException {
@@ -97,35 +115,39 @@
     this.termState = ite.fr.parent.postingsReader.newTermState();
     this.termState.totalTermFreq = -1;
     this.longs = new long[ite.fr.longsSize];
+    this.versionAutoPrefix = ite.fr.parent.version >= BlockTreeTermsReader.VERSION_AUTO_PREFIX_TERMS;
   }
 
   void loadNextFloorBlock() throws IOException {
     assert numFollowFloorBlocks > 0;
-    //if (DEBUG) System.out.println("    loadNextFoorBlock trans=" + transitions[transitionIndex]);
+    //if (DEBUG) System.out.println("    loadNextFloorBlock transition.min=" + transition.min);
 
     do {
       fp = fpOrig + (floorDataReader.readVLong() >>> 1);
       numFollowFloorBlocks--;
-      // if (DEBUG) System.out.println("    skip floor block2!  nextFloorLabel=" + (char) nextFloorLabel + " vs target=" + (char) transitions[transitionIndex].getMin() + " newFP=" + fp + " numFollowFloorBlocks=" + numFollowFloorBlocks);
+      //if (DEBUG) System.out.println("    skip floor block2!  nextFloorLabel=" + (char) nextFloorLabel + " newFP=" + fp + " numFollowFloorBlocks=" + numFollowFloorBlocks);
       if (numFollowFloorBlocks != 0) {
         nextFloorLabel = floorDataReader.readByte() & 0xff;
       } else {
         nextFloorLabel = 256;
       }
-      // if (DEBUG) System.out.println("    nextFloorLabel=" + (char) nextFloorLabel);
+      //if (DEBUG) System.out.println("    nextFloorLabel=" + (char) nextFloorLabel);
     } while (numFollowFloorBlocks != 0 && nextFloorLabel <= transition.min);
 
+    //if (DEBUG) System.out.println("      done loadNextFloorBlock");
+
     load(null);
   }
 
   public void setState(int state) {
     this.state = state;
     transitionIndex = 0;
-    transitionCount = ite.compiledAutomaton.automaton.getNumTransitions(state);
+    transitionCount = ite.automaton.getNumTransitions(state);
     if (transitionCount != 0) {
-      ite.compiledAutomaton.automaton.initTransition(state, transition);
-      ite.compiledAutomaton.automaton.getNextTransition(transition);
+      ite.automaton.initTransition(state, transition);
+      ite.automaton.getNextTransition(transition);
       curTransitionMax = transition.max;
+      //if (DEBUG) System.out.println("    after setState state=" + state + " trans: " + transition + " transCount=" + transitionCount);
     } else {
       curTransitionMax = -1;
     }
@@ -133,7 +155,7 @@
 
   void load(BytesRef frameIndexData) throws IOException {
 
-    // if (DEBUG) System.out.println("    load fp=" + fp + " fpOrig=" + fpOrig + " frameIndexData=" + frameIndexData + " trans=" + (transitions.length != 0 ? transitions[0] : "n/a" + " state=" + state));
+    //xif (DEBUG) System.out.println("    load fp=" + fp + " fpOrig=" + fpOrig + " frameIndexData=" + frameIndexData + " trans=" + (transitions.length != 0 ? transitions[0] : "n/a" + " state=" + state));
 
     if (frameIndexData != null && transitionCount != 0) {
       // Floor frame
@@ -148,7 +170,7 @@
       if ((code & BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR) != 0) {
         numFollowFloorBlocks = floorDataReader.readVInt();
         nextFloorLabel = floorDataReader.readByte() & 0xff;
-        // if (DEBUG) System.out.println("    numFollowFloorBlocks=" + numFollowFloorBlocks + " nextFloorLabel=" + nextFloorLabel);
+        //if (DEBUG) System.out.println("    numFollowFloorBlocks=" + numFollowFloorBlocks + " nextFloorLabel=" + nextFloorLabel);
 
         // If current state is accept, we must process
         // first block in case it has empty suffix:
@@ -158,7 +180,7 @@
           while (numFollowFloorBlocks != 0 && nextFloorLabel <= transition.min) {
             fp = fpOrig + (floorDataReader.readVLong() >>> 1);
             numFollowFloorBlocks--;
-            // if (DEBUG) System.out.println("    skip floor block!  nextFloorLabel=" + (char) nextFloorLabel + " vs target=" + (char) transitions[0].getMin() + " newFP=" + fp + " numFollowFloorBlocks=" + numFollowFloorBlocks);
+            //xif (DEBUG) System.out.println("    skip floor block!  nextFloorLabel=" + (char) nextFloorLabel + " vs target=" + (char) transitions[0].getMin() + " newFP=" + fp + " numFollowFloorBlocks=" + numFollowFloorBlocks);
             if (numFollowFloorBlocks != 0) {
               nextFloorLabel = floorDataReader.readByte() & 0xff;
             } else {
@@ -179,7 +201,7 @@
     code = ite.in.readVInt();
     isLeafBlock = (code & 1) != 0;
     int numBytes = code >>> 1;
-    // if (DEBUG) System.out.println("      entCount=" + entCount + " lastInFloor?=" + isLastInFloor + " leafBlock?=" + isLeafBlock + " numSuffixBytes=" + numBytes);
+    //if (DEBUG) System.out.println("      entCount=" + entCount + " lastInFloor?=" + isLastInFloor + " leafBlock?=" + isLeafBlock + " numSuffixBytes=" + numBytes);
     if (suffixBytes.length < numBytes) {
       suffixBytes = new byte[ArrayUtil.oversize(numBytes, 1)];
     }
@@ -214,41 +236,102 @@
       // written one after another -- tail recurse:
       fpEnd = ite.in.getFilePointer();
     }
+
+    // Necessary in case this ord previously was an auto-prefix
+    // term but now we recurse to a new leaf block
+    isAutoPrefixTerm = false;
   }
 
   // TODO: maybe add scanToLabel; should give perf boost
 
+  // Decodes next entry; returns true if it's a sub-block
   public boolean next() {
-    return isLeafBlock ? nextLeaf() : nextNonLeaf();
+    if (isLeafBlock) {
+      nextLeaf();
+      return false;
+    } else {
+      return nextNonLeaf();
+    }
   }
 
-  // Decodes next entry; returns true if it's a sub-block
-  public boolean nextLeaf() {
-    //if (DEBUG) System.out.println("  frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount);
+  public void nextLeaf() {
+    //if (DEBUG) System.out.println("  frame.nextLeaf ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount);
     assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
     nextEnt++;
     suffix = suffixesReader.readVInt();
     startBytePos = suffixesReader.getPosition();
     suffixesReader.skipBytes(suffix);
-    return false;
   }
 
   public boolean nextNonLeaf() {
-    //if (DEBUG) System.out.println("  frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount);
+    //if (DEBUG) System.out.println("  frame.nextNonLeaf ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount + " versionAutoPrefix=" + versionAutoPrefix + " fp=" + suffixesReader.getPosition());
     assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
     nextEnt++;
     final int code = suffixesReader.readVInt();
-    suffix = code >>> 1;
-    startBytePos = suffixesReader.getPosition();
-    suffixesReader.skipBytes(suffix);
-    if ((code & 1) == 0) {
-      // A normal term
-      termState.termBlockOrd++;
-      return false;
+    if (versionAutoPrefix == false) {
+      suffix = code >>> 1;
+      startBytePos = suffixesReader.getPosition();
+      suffixesReader.skipBytes(suffix);
+      if ((code & 1) == 0) {
+        // A normal term
+        termState.termBlockOrd++;
+        return false;
+      } else {
+        // A sub-block; make sub-FP absolute:
+        lastSubFP = fp - suffixesReader.readVLong();
+        return true;
+      }
     } else {
-      // A sub-block; make sub-FP absolute:
-      lastSubFP = fp - suffixesReader.readVLong();
-      return true;
+      suffix = code >>> 2;
+      startBytePos = suffixesReader.getPosition();
+      suffixesReader.skipBytes(suffix);
+      switch (code & 3) {
+      case 0:
+        // A normal term
+        //if (DEBUG) System.out.println("    ret: term");
+        isAutoPrefixTerm = false;
+        termState.termBlockOrd++;
+        return false;
+      case 1:
+        // A sub-block; make sub-FP absolute:
+        isAutoPrefixTerm = false;
+        lastSubFP = fp - suffixesReader.readVLong();
+        //if (DEBUG) System.out.println("    ret: sub-block");
+        return true;
+      case 2:
+        // A normal prefix term, suffix leads with empty string
+        floorSuffixLeadStart = -1;
+        termState.termBlockOrd++;
+        floorSuffixLeadEnd = suffixesReader.readByte() & 0xff;
+        if (floorSuffixLeadEnd == 0xff) {
+          floorSuffixLeadEnd = -1;
+          //System.out.println("  fill in -1");
+        }
+        //if (DEBUG) System.out.println("    ret: floor prefix term: start=-1 end=" + floorSuffixLeadEnd);
+        isAutoPrefixTerm = true;
+        return false;
+      case 3:
+        // A floor'd prefix term, suffix leads with real byte
+        if (suffix == 0) {
+          // TODO: this is messy, but necessary because we are an auto-prefix term, but our suffix is the empty string here, so we have to
+          // look at the parent block to get the lead suffix byte:
+          assert ord > 0;
+          IntersectTermsEnumFrame parent = ite.stack[ord-1];
+          floorSuffixLeadStart = parent.suffixBytes[parent.startBytePos+parent.suffix-1] & 0xff;
+          //if (DEBUG) System.out.println("    peek-parent: suffix=" + floorSuffixLeadStart);
+        } else {
+          floorSuffixLeadStart = suffixBytes[startBytePos+suffix-1] & 0xff;
+        }
+        termState.termBlockOrd++;
+        isAutoPrefixTerm = true;
+        floorSuffixLeadEnd = suffixesReader.readByte() & 0xff;
+        //if (DEBUG) System.out.println("    ret: floor prefix term start=" + floorSuffixLeadStart + " end=" + floorSuffixLeadEnd);
+        return false;
+      default:
+        // Silly javac:
+        assert false;
+        return false;
+      }
     }
   }
 
@@ -277,10 +360,10 @@
 
       // stats
       termState.docFreq = statsReader.readVInt();
-      //if (DEBUG) System.out.println("    dF=" + state.docFreq);
+      //xif (DEBUG) System.out.println("    dF=" + state.docFreq);
       if (ite.fr.fieldInfo.getIndexOptions() != IndexOptions.DOCS) {
         termState.totalTermFreq = termState.docFreq + statsReader.readVLong();
-        //if (DEBUG) System.out.println("    totTF=" + state.totalTermFreq);
+        //xif (DEBUG) System.out.println("    totTF=" + state.totalTermFreq);
       }
       // metadata 
       for (int i = 0; i < ite.fr.longsSize; i++) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
index df67b07..b80665f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
@@ -36,7 +36,9 @@
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.Util;
 
-/** Iterates through terms in this field */
+/** Iterates through terms in this field.  This implementation skips
+ *  any auto-prefix terms it encounters. */
+
 final class SegmentTermsEnum extends TermsEnum {
 
   // Lazy init:
@@ -50,7 +52,7 @@
 
   private int targetBeforeCurrentLength;
 
-  // static boolean DEBUG = false;
+  // static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
 
   private final ByteArrayDataInput scratchReader = new ByteArrayDataInput();
 
@@ -121,6 +123,8 @@
    *  computing aggregate statistics. */
   public Stats computeBlockStats() throws IOException {
 
+    // TODO: add total auto-prefix term count
+
     Stats stats = new Stats(fr.parent.segment, fr.fieldInfo.name);
     if (fr.index != null) {
       stats.indexNodeCount = fr.index.getNodeCount();
@@ -154,8 +158,10 @@
       while (currentFrame.nextEnt == currentFrame.entCount) {
         stats.endBlock(currentFrame);
         if (!currentFrame.isLastInFloor) {
+          // Advance to next floor block
           currentFrame.loadNextFloorBlock();
           stats.startBlock(currentFrame, true);
+          break;
         } else {
           if (currentFrame.ord == 0) {
             break allTerms;
@@ -177,8 +183,6 @@
           // This is a "next" frame -- even if it's
           // floor'd we must pretend it isn't so we don't
           // try to scan to the right floor frame:
-          currentFrame.isFloor = false;
-          //currentFrame.hasTerms = true;
           currentFrame.loadBlock();
           stats.startBlock(currentFrame, !currentFrame.isLastInFloor);
         } else {
@@ -254,6 +258,7 @@
   // Pushes next'd frame or seek'd frame; we later
   // lazy-load the frame only when needed
   SegmentTermsEnumFrame pushFrame(FST.Arc<BytesRef> arc, long fp, int length) throws IOException {
+    //if (DEBUG) System.out.println("pushFrame length=" + length + " fp=" + fp);
     final SegmentTermsEnumFrame f = getFrame(1+currentFrame.ord);
     f.arc = arc;
     if (f.fpOrig == fp && f.nextEnt != -1) {
@@ -309,8 +314,14 @@
     }
   }
 
+  // for debugging
+  @SuppressWarnings("unused")
+  static String brToString(BytesRefBuilder b) {
+    return brToString(b.get());
+  }
+
   @Override
-  public boolean seekExact(final BytesRef target) throws IOException {
+  public boolean seekExact(BytesRef target) throws IOException {
 
     if (fr.index == null) {
       throw new IllegalStateException("terms index was not loaded");
@@ -567,7 +578,8 @@
   }
 
   @Override
-  public SeekStatus seekCeil(final BytesRef target) throws IOException {
+  public SeekStatus seekCeil(BytesRef target) throws IOException {
+
     if (fr.index == null) {
       throw new IllegalStateException("terms index was not loaded");
     }
@@ -577,7 +589,7 @@
     assert clearEOF();
 
     // if (DEBUG) {
-    //   System.out.println("\nBTTR.seekCeil seg=" + fr.parent.segment + " target=" + fr.fieldInfo.name + ":" + target.utf8ToString() + " " + target + " current=" + brToString(term) + " (exists?=" + termExists + ") validIndexPrefix=  " + validIndexPrefix);
+    //   System.out.println("\nBTTR.seekCeil seg=" + fr.parent.segment + " target=" + fr.fieldInfo.name + ":" + brToString(target) + " " + target + " current=" + brToString(term) + " (exists?=" + termExists + ") validIndexPrefix=  " + validIndexPrefix);
     //   printSeekState(System.out);
     // }
 
@@ -619,7 +631,7 @@
       while (targetUpto < targetLimit) {
         cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF);
         //if (DEBUG) {
-        //System.out.println("    cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")"   + " arc.output=" + arc.output + " output=" + output);
+        //System.out.println("    cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.byteAt(targetUpto)) + ")"   + " arc.output=" + arc.output + " output=" + output);
         //}
         if (cmp != 0) {
           break;
@@ -649,7 +661,7 @@
         while (targetUpto < targetLimit2) {
           cmp = (term.byteAt(targetUpto)&0xFF) - (target.bytes[target.offset + targetUpto]&0xFF);
           //if (DEBUG) {
-          //System.out.println("    cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")");
+          //System.out.println("    cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + " vs termLabel=" + (char) (term.byteAt(targetUpto)) + ")");
           //}
           if (cmp != 0) {
             break;
@@ -735,7 +747,7 @@
 
         // Index is exhausted
         // if (DEBUG) {
-        //   System.out.println("    index: index exhausted label=" + ((char) targetLabel) + " " + toHex(targetLabel));
+        //   System.out.println("    index: index exhausted label=" + ((char) targetLabel) + " " + targetLabel);
         // }
             
         validIndexPrefix = currentFrame.prefix;
@@ -745,6 +757,7 @@
 
         currentFrame.loadBlock();
 
+        //if (DEBUG) System.out.println("  now scanToTerm");
         final SeekStatus result = currentFrame.scanToTerm(target, false);
         if (result == SeekStatus.END) {
           term.copyBytes(target);
@@ -752,7 +765,7 @@
 
           if (next() != null) {
             //if (DEBUG) {
-            //System.out.println("  return NOT_FOUND term=" + brToString(term) + " " + term);
+            //System.out.println("  return NOT_FOUND term=" + brToString(term));
             //}
             return SeekStatus.NOT_FOUND;
           } else {
@@ -763,7 +776,7 @@
           }
         } else {
           //if (DEBUG) {
-          //System.out.println("  return " + result + " term=" + brToString(term) + " " + term);
+          //System.out.println("  return " + result + " term=" + brToString(term));
           //}
           return result;
         }
@@ -778,7 +791,7 @@
         }
 
         //if (DEBUG) {
-        //System.out.println("    index: follow label=" + toHex(target.bytes[target.offset + targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput);
+        //System.out.println("    index: follow label=" + (target.bytes[target.offset + targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput);
         //}
         targetUpto++;
 
@@ -804,7 +817,7 @@
       termExists = false;
       if (next() != null) {
         //if (DEBUG) {
-        //System.out.println("  return NOT_FOUND term=" + term.utf8ToString() + " " + term);
+        //System.out.println("  return NOT_FOUND term=" + term.get().utf8ToString() + " " + term);
         //}
         return SeekStatus.NOT_FOUND;
       } else {
@@ -888,10 +901,10 @@
     targetBeforeCurrentLength = currentFrame.ord;
 
     assert !eof;
-    // if (DEBUG) {
-    //   System.out.println("\nBTTR.next seg=" + fr.parent.segment + " term=" + brToString(term) + " termExists?=" + termExists + " field=" + fr.fieldInfo.name + " termBlockOrd=" + currentFrame.state.termBlockOrd + " validIndexPrefix=" + validIndexPrefix);
-    //   printSeekState(System.out);
-    // }
+    //if (DEBUG) {
+    //  System.out.println("\nBTTR.next seg=" + fr.parent.segment + " term=" + brToString(term) + " termExists?=" + termExists + " field=" + fr.fieldInfo.name + " termBlockOrd=" + currentFrame.state.termBlockOrd + " validIndexPrefix=" + validIndexPrefix);
+    //  printSeekState(System.out);
+    //}
 
     if (currentFrame == staticFrame) {
       // If seek was previously called and the term was
@@ -900,7 +913,7 @@
       // docFreq, etc.  But, if they then call next(),
       // this method catches up all internal state so next()
       // works properly:
-      //if (DEBUG) System.out.println("  re-seek to pending term=" + term.utf8ToString() + " " + term);
+      //if (DEBUG) System.out.println("  re-seek to pending term=" + brToString(term) + " " + term);
       final boolean result = seekExact(term.get());
       assert result;
     }
@@ -908,7 +921,9 @@
     // Pop finished blocks
     while (currentFrame.nextEnt == currentFrame.entCount) {
       if (!currentFrame.isLastInFloor) {
+        // Advance to next floor block
         currentFrame.loadNextFloorBlock();
+        break;
       } else {
         //if (DEBUG) System.out.println("  pop frame");
         if (currentFrame.ord == 0) {
@@ -934,9 +949,7 @@
         // Note that the seek state (last seek) has been
         // invalidated beyond this depth
         validIndexPrefix = Math.min(validIndexPrefix, currentFrame.prefix);
-        //if (DEBUG) {
-        //System.out.println("  reset validIndexPrefix=" + validIndexPrefix);
-        //}
+        //if (DEBUG) System.out.println("  reset validIndexPrefix=" + validIndexPrefix);
       }
     }
 
@@ -948,11 +961,9 @@
         // This is a "next" frame -- even if it's
         // floor'd we must pretend it isn't so we don't
         // try to scan to the right floor frame:
-        currentFrame.isFloor = false;
-        //currentFrame.hasTerms = true;
         currentFrame.loadBlock();
       } else {
-        //if (DEBUG) System.out.println("  return term=" + term.utf8ToString() + " " + term + " currentFrame.ord=" + currentFrame.ord);
+        //if (DEBUG) System.out.println("  return term=" + brToString(term) + " currentFrame.ord=" + currentFrame.ord);
         return term.get();
       }
     }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
index 15d6a91..fcbf9ec 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
@@ -37,6 +37,10 @@
 
   FST.Arc<BytesRef> arc;
 
+  final boolean versionAutoPrefix;
+
+  //static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
+
   // File pointer where this block was loaded from
   long fp;
   long fpOrig;
@@ -96,6 +100,8 @@
     this.state = ste.fr.parent.postingsReader.newTermState();
     this.state.totalTermFreq = -1;
     this.longs = new long[ste.fr.longsSize];
+    this.versionAutoPrefix = ste.fr.parent.version >= BlockTreeTermsReader.VERSION_AUTO_PREFIX_TERMS;
+    //System.out.println("STE.init seg=" + ste.fr.parent.segment + " versionAutoPrefix=" + versionAutoPrefix);
   }
 
   public void setFloorData(ByteArrayDataInput in, BytesRef source) {
@@ -117,9 +123,7 @@
   }
 
   void loadNextFloorBlock() throws IOException {
-    //if (DEBUG) {
-    //System.out.println("    loadNextFloorBlock fp=" + fp + " fpEnd=" + fpEnd);
-    //}
+    //if (DEBUG) System.out.println("    loadNextFloorBlock fp=" + fp + " fpEnd=" + fpEnd);
     assert arc == null || isFloor: "arc=" + arc + " isFloor=" + isFloor;
     fp = fpEnd;
     nextEnt = -1;
@@ -137,6 +141,8 @@
      use. */
   void loadBlock() throws IOException {
 
+    //if (DEBUG) System.out.println("loadBlock fp=" + fp);
+
     // Clone the IndexInput lazily, so that consumers
     // that just pull a TermsEnum to
     // seekExact(TermState) don't pay this cost:
@@ -164,6 +170,7 @@
     // term suffixes:
     code = ste.in.readVInt();
     isLeafBlock = (code & 1) != 0;
+    //if (DEBUG) System.out.println("  isLeafBlock=" + isLeafBlock);
     int numBytes = code >>> 1;
     if (suffixBytes.length < numBytes) {
       suffixBytes = new byte[ArrayUtil.oversize(numBytes, 1)];
@@ -171,13 +178,13 @@
     ste.in.readBytes(suffixBytes, 0, numBytes);
     suffixesReader.reset(suffixBytes, 0, numBytes);
 
-    /*if (DEBUG) {
-      if (arc == null) {
-      System.out.println("    loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
-      } else {
-      System.out.println("    loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
-      }
-      }*/
+    //if (DEBUG) {
+    //  if (arc == null) {
+    //    System.out.println("    loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
+    //  } else {
+    //    System.out.println("    loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
+    //  }
+    //}
 
     // stats
     numBytes = ste.in.readVInt();
@@ -205,7 +212,6 @@
     ste.in.readBytes(bytes, 0, numBytes);
     bytesReader.reset(bytes, 0, numBytes);
 
-
     // Sub-blocks of a single floor block are always
     // written one after another -- tail recurse:
     fpEnd = ste.in.getFilePointer();
@@ -262,50 +268,101 @@
     */
   }
 
-  public boolean next() {
-    return isLeafBlock ? nextLeaf() : nextNonLeaf();
+  // Decodes next entry; returns true if it's a sub-block
+  public boolean next() throws IOException {
+    if (isLeafBlock) {
+      nextLeaf();
+      return false;
+    } else {
+      return nextNonLeaf();
+    }
   }
 
-  // Decodes next entry; returns true if it's a sub-block
-  public boolean nextLeaf() {
-    //if (DEBUG) System.out.println("  frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount);
+  public void nextLeaf() {
+    //if (DEBUG) System.out.println("  frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp);
     assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
     nextEnt++;
     suffix = suffixesReader.readVInt();
+    //if (DEBUG) System.out.println("    suffix=" + suffix + " prefix=" + prefix);
     startBytePos = suffixesReader.getPosition();
     ste.term.setLength(prefix + suffix);
     ste.term.grow(ste.term.length());
     suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
-    // A normal term
     ste.termExists = true;
-    return false;
   }
 
-  public boolean nextNonLeaf() {
-    //if (DEBUG) System.out.println("  frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount);
-    assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
-    nextEnt++;
-    final int code = suffixesReader.readVInt();
-    suffix = code >>> 1;
-    startBytePos = suffixesReader.getPosition();
-    ste.term.setLength(prefix + suffix);
-    ste.term.grow(ste.term.length());
-    suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
-    if ((code & 1) == 0) {
-      // A normal term
-      ste.termExists = true;
-      subCode = 0;
-      state.termBlockOrd++;
-      return false;
-    } else {
-      // A sub-block; make sub-FP absolute:
-      ste.termExists = false;
-      subCode = suffixesReader.readVLong();
-      lastSubFP = fp - subCode;
-      //if (DEBUG) {
-      //System.out.println("    lastSubFP=" + lastSubFP);
-      //}
-      return true;
+  public boolean nextNonLeaf() throws IOException {
+    //if (DEBUG) System.out.println("  stef.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + suffixesReader.getPosition());
+    while (true) {
+      if (nextEnt == entCount) {
+        assert arc == null || (isFloor && isLastInFloor == false): "isFloor=" + isFloor + " isLastInFloor=" + isLastInFloor;
+        //if (DEBUG) System.out.println("    stef: loadNextFloorBlock");
+        loadNextFloorBlock();
+        if (isLeafBlock) {
+          nextLeaf();
+          return false;
+        } else {
+          continue;
+        }
+      }
+        
+      assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
+      nextEnt++;
+      //System.out.println("    stef: readSuffix @ fp=" + suffixesReader.getPosition());
+      final int code = suffixesReader.readVInt();
+      if (versionAutoPrefix == false) {
+        suffix = code >>> 1;
+      } else {
+        suffix = code >>> 2;
+      }
+      //System.out.println("  next suffix=" + suffix + " versionAutoPrefix=" + versionAutoPrefix);
+      startBytePos = suffixesReader.getPosition();
+      ste.term.setLength(prefix + suffix);
+      ste.term.grow(ste.term.length());
+      suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
+      if (versionAutoPrefix == false) {
+        if ((code & 1) == 0) {
+          // A normal term
+          ste.termExists = true;
+          subCode = 0;
+          state.termBlockOrd++;
+          return false;
+        } else {
+          // A sub-block; make sub-FP absolute:
+          ste.termExists = false;
+          subCode = suffixesReader.readVLong();
+          lastSubFP = fp - subCode;
+          //if (DEBUG) {
+          //System.out.println("    lastSubFP=" + lastSubFP);
+          //}
+          return true;
+        }
+      } else {
+
+        switch(code & 3) {
+        case 0:
+          // A normal term
+          ste.termExists = true;
+          subCode = 0;
+          state.termBlockOrd++;
+          return false;
+        case 1:
+          // A sub-block; make sub-FP absolute:
+          ste.termExists = false;
+          subCode = suffixesReader.readVLong();
+          lastSubFP = fp - subCode;
+          //if (DEBUG) {
+          //System.out.println("    lastSubFP=" + lastSubFP);
+          //}
+          return true;
+        case 2:
+        case 3:
+          // A prefix term: skip it
+          state.termBlockOrd++;
+          suffixesReader.readByte();
+          continue;
+        }
+      }
     }
   }
         
@@ -448,18 +505,38 @@
       assert nextEnt < entCount;
       nextEnt++;
       final int code = suffixesReader.readVInt();
-      suffixesReader.skipBytes(isLeafBlock ? code : code >>> 1);
-      //if (DEBUG) System.out.println("    " + nextEnt + " (of " + entCount + ") ent isSubBlock=" + ((code&1)==1));
-      if ((code & 1) != 0) {
-        final long subCode = suffixesReader.readVLong();
-        //if (DEBUG) System.out.println("      subCode=" + subCode);
-        if (targetSubCode == subCode) {
-          //if (DEBUG) System.out.println("        match!");
-          lastSubFP = subFP;
-          return;
+      if (versionAutoPrefix == false) {
+        suffixesReader.skipBytes(code >>> 1);
+        if ((code & 1) != 0) {
+          final long subCode = suffixesReader.readVLong();
+          if (targetSubCode == subCode) {
+            //if (DEBUG) System.out.println("        match!");
+            lastSubFP = subFP;
+            return;
+          }
+        } else {
+          state.termBlockOrd++;
         }
       } else {
-        state.termBlockOrd++;
+        int flag = code & 3;
+        suffixesReader.skipBytes(code >>> 2);
+        //if (DEBUG) System.out.println("    " + nextEnt + " (of " + entCount + ") ent isSubBlock=" + ((code&1)==1));
+        if (flag == 1) {
+          // Sub-block
+          final long subCode = suffixesReader.readVLong();
+          //if (DEBUG) System.out.println("      subCode=" + subCode);
+          if (targetSubCode == subCode) {
+            //if (DEBUG) System.out.println("        match!");
+            lastSubFP = subFP;
+            return;
+          }
+        } else {
+          state.termBlockOrd++;
+          if (flag == 2 || flag == 3) {
+            // Floor'd prefix term
+            suffixesReader.readByte();
+          }
+        }
       }
     }
   }
@@ -473,6 +550,21 @@
   private int suffix;
   private long subCode;
 
+  // for debugging
+  /*
+  @SuppressWarnings("unused")
+  static String brToString(BytesRef b) {
+    try {
+      return b.utf8ToString() + " " + b;
+    } catch (Throwable t) {
+      // If BytesRef isn't actually UTF8, or it's eg a
+      // prefix of UTF8 that ends mid-unicode-char, we
+      // fallback to hex:
+      return b.toString();
+    }
+  }
+  */
+
   // Target's prefix matches this block's prefix; we
   // scan the entries check if the suffix matches.
   public SeekStatus scanToTermLeaf(BytesRef target, boolean exactOnly) throws IOException {
@@ -535,9 +627,6 @@
           // keep scanning
 
           if (nextEnt == entCount) {
-            if (exactOnly) {
-              fillTerm();
-            }
             // We are done scanning this block
             break nextTerm;
           } else {
@@ -590,7 +679,7 @@
   // scan the entries check if the suffix matches.
   public SeekStatus scanToTermNonLeaf(BytesRef target, boolean exactOnly) throws IOException {
 
-    //if (DEBUG) System.out.println("    scanToTermNonLeaf: block fp=" + fp + " prefix=" + prefix + " nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" + brToString(term));
+    //if (DEBUG) System.out.println("    scanToTermNonLeaf: block fp=" + fp + " prefix=" + prefix + " nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" + brToString(target));
 
     assert nextEnt != -1;
 
@@ -605,30 +694,60 @@
     assert prefixMatches(target);
 
     // Loop over each entry (term or sub-block) in this block:
-    //nextTerm: while(nextEnt < entCount) {
-    nextTerm: while (true) {
+    nextTerm: while(nextEnt < entCount) {
+
       nextEnt++;
 
       final int code = suffixesReader.readVInt();
-      suffix = code >>> 1;
-      // if (DEBUG) {
-      //   BytesRef suffixBytesRef = new BytesRef();
-      //   suffixBytesRef.bytes = suffixBytes;
-      //   suffixBytesRef.offset = suffixesReader.getPosition();
-      //   suffixBytesRef.length = suffix;
-      //   System.out.println("      cycle: " + ((code&1)==1 ? "sub-block" : "term") + " " + (nextEnt-1) + " (of " + entCount + ") suffix=" + brToString(suffixBytesRef));
-      // }
+      if (versionAutoPrefix == false) {
+        suffix = code >>> 1;
+      } else {
+        suffix = code >>> 2;
+      }
 
-      ste.termExists = (code & 1) == 0;
+      //if (DEBUG) {
+      //  BytesRef suffixBytesRef = new BytesRef();
+      //  suffixBytesRef.bytes = suffixBytes;
+      //  suffixBytesRef.offset = suffixesReader.getPosition();
+      //  suffixBytesRef.length = suffix;
+      //  System.out.println("      cycle: " + ((code&1)==1 ? "sub-block" : "term") + " " + (nextEnt-1) + " (of " + entCount + ") suffix=" + brToString(suffixBytesRef));
+      //}
+
       final int termLen = prefix + suffix;
       startBytePos = suffixesReader.getPosition();
       suffixesReader.skipBytes(suffix);
-      if (ste.termExists) {
-        state.termBlockOrd++;
-        subCode = 0;
+      if (versionAutoPrefix == false) {
+        ste.termExists = (code & 1) == 0;
+        if (ste.termExists) {
+          state.termBlockOrd++;
+          subCode = 0;
+        } else {
+          subCode = suffixesReader.readVLong();
+          lastSubFP = fp - subCode;
+        }
       } else {
-        subCode = suffixesReader.readVLong();
-        lastSubFP = fp - subCode;
+        switch (code & 3) {
+        case 0:
+          // Normal term
+          ste.termExists = true;
+          state.termBlockOrd++;
+          subCode = 0;
+          break;
+        case 1:
+          // Sub-block
+          ste.termExists = false;
+          subCode = suffixesReader.readVLong();
+          lastSubFP = fp - subCode;
+          break;
+        case 2:
+        case 3:
+          // Floor prefix term: skip it
+          //if (DEBUG) System.out.println("        skip floor prefix term");
+          suffixesReader.readByte();
+          ste.termExists = false;
+          state.termBlockOrd++;
+          continue;
+        }
       }
 
       final int targetLimit = target.offset + (target.length < termLen ? target.length : termLen);
@@ -637,7 +756,7 @@
       // Loop over bytes in the suffix, comparing to
       // the target
       int bytePos = startBytePos;
-      while(true) {
+      while (true) {
         final int cmp;
         final boolean stop;
         if (targetPos < targetLimit) {
@@ -652,24 +771,18 @@
         if (cmp < 0) {
           // Current entry is still before the target;
           // keep scanning
-
-          if (nextEnt == entCount) {
-            if (exactOnly) {
-              fillTerm();
-              //termExists = true;
-            }
-            // We are done scanning this block
-            break nextTerm;
-          } else {
-            continue nextTerm;
-          }
+          continue nextTerm;
         } else if (cmp > 0) {
 
           // Done!  Current entry is after target --
           // return NOT_FOUND:
           fillTerm();
 
+          //if (DEBUG) System.out.println("        maybe done exactOnly=" + exactOnly + " ste.termExists=" + ste.termExists);
+
           if (!exactOnly && !ste.termExists) {
+            //System.out.println("  now pushFrame");
+            // TODO this 
             // We are on a sub-block, and caller wants
             // us to position to the next term after
             // the target, so we must recurse into the
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java
index 710acd1..4a8efd5 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java
@@ -48,6 +48,8 @@
   /** Total number of bytes (sum of term lengths) across all terms in the field. */
   public long totalTermBytes;
 
+  // TODO: add total auto-prefix term count
+
   /** The number of normal (non-floor) blocks in the terms file. */
   public int nonFloorBlockCount;
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
index dd32083..8e9944d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
@@ -28,9 +28,9 @@
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MergeState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -248,7 +248,7 @@
   byte scratchBytes[] = new byte[16];
 
   @Override
-  public void writeField(FieldInfo info, StorableField field)
+  public void writeField(FieldInfo info, IndexableField field)
       throws IOException {
 
     ++numStoredFieldsInDoc;
@@ -281,7 +281,7 @@
         bits = STRING;
         string = field.stringValue();
         if (string == null) {
-          throw new IllegalArgumentException("field " + field.name() + " is stored but does not have binaryValue, stringValue nor numericValue");
+          throw new IllegalArgumentException("field \"" + field.name() + "\" is stored but does not have binaryValue, stringValue nor numericValue");
         }
       }
     }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
index bcb352b..997bf4b 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
@@ -391,8 +391,10 @@
   final static int VERSION_START = 0;
   final static int VERSION_CURRENT = VERSION_START;
 
-  private final int minTermBlockSize;
-  private final int maxTermBlockSize;
+  private final int minTemsInBlock;
+  private final int maxItemsInBlock;
+  private final int minItemsInAutoPrefix;
+  private final int maxItemsInAutoPrefix;
 
   /**
    * Fixed packed block size, number of integers encoded in 
@@ -404,18 +406,33 @@
   /** Creates {@code Lucene50PostingsFormat} with default
    *  settings. */
   public Lucene50PostingsFormat() {
-    this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
+    this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, 0, 0);
   }
 
   /** Creates {@code Lucene50PostingsFormat} with custom
    *  values for {@code minBlockSize} and {@code
-   *  maxBlockSize} passed to block terms dictionary.
+   *  maxBlockSize} and default values for {@code minItemsInAutoPrefix} and
+   *  {@code maxItemsInAutoPrefix}, passed to block tree terms dictionary.
    *  @see BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int) */
-  public Lucene50PostingsFormat(int minTermBlockSize, int maxTermBlockSize) {
+  public Lucene50PostingsFormat(int minTemsInBlock, int maxItemsInBlock) {
+    this(minTemsInBlock, maxItemsInBlock, 0, 0);
+  }
+
+  /** Creates {@code Lucene50PostingsFormat} with custom
+   *  values for {@code minBlockSize}, {@code
+   *  maxBlockSize}, {@code minItemsInAutoPrefix} and {@code maxItemsInAutoPrefix}, passed
+   *  to block tree terms dictionary.
+   *  @see BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int,int,int) */
+  public Lucene50PostingsFormat(int minTemsInBlock, int maxItemsInBlock, int minItemsInAutoPrefix, int maxItemsInAutoPrefix) {
     super("Lucene50");
-    BlockTreeTermsWriter.validateSettings(minTermBlockSize, maxTermBlockSize);
-    this.minTermBlockSize = minTermBlockSize;
-    this.maxTermBlockSize = maxTermBlockSize;
+    BlockTreeTermsWriter.validateSettings(minTemsInBlock,
+                                          maxItemsInBlock);
+    BlockTreeTermsWriter.validateAutoPrefixSettings(minItemsInAutoPrefix,
+                                                    maxItemsInAutoPrefix);
+    this.minTemsInBlock = minTemsInBlock;
+    this.maxItemsInBlock = maxItemsInBlock;
+    this.minItemsInAutoPrefix = minItemsInAutoPrefix;
+    this.maxItemsInAutoPrefix = maxItemsInAutoPrefix;
   }
 
   @Override
@@ -431,8 +448,10 @@
     try {
       FieldsConsumer ret = new BlockTreeTermsWriter(state, 
                                                     postingsWriter,
-                                                    minTermBlockSize, 
-                                                    maxTermBlockSize);
+                                                    minTemsInBlock, 
+                                                    maxItemsInBlock,
+                                                    minItemsInAutoPrefix,
+                                                    maxItemsInAutoPrefix);
       success = true;
       return ret;
     } finally {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
index ec0cfea..df1c710 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
@@ -127,9 +127,9 @@
       // First pass: assign field -> PostingsFormat
       for(String field : fields) {
         FieldInfo fieldInfo = writeState.fieldInfos.fieldInfo(field);
+        assert fieldInfo != null: "field=" + field;
 
         final PostingsFormat format = getPostingsFormatForField(field);
-  
         if (format == null) {
           throw new IllegalStateException("invalid null PostingsFormat for field=\"" + field + "\"");
         }
@@ -192,7 +192,6 @@
                 return group.fields.iterator();
               }
             };
-
           FieldsConsumer consumer = format.fieldsConsumer(group.state);
           toClose.add(consumer);
           consumer.write(maskedFields);
diff --git a/lucene/core/src/java/org/apache/lucene/document/BigDecimalComparator.java b/lucene/core/src/java/org/apache/lucene/document/BigDecimalComparator.java
new file mode 100644
index 0000000..4cf940d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/BigDecimalComparator.java
@@ -0,0 +1,124 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Arrays;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.SimpleFieldComparator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+// TODO: this doesn't use the ord; we could index BinaryDV instead for the single valued case?
+class BigDecimalComparator extends SimpleFieldComparator<BigDecimal> {
+  private final String field;
+  private final BytesRef[] values;
+  private final int byteWidth;
+  private final BytesRef missingValue;
+  private final int scale;
+  private BytesRef bottom;
+  private BytesRef topValue;
+  private Bits docsWithField;
+  private SortedDocValues currentReaderValues;
+
+  public BigDecimalComparator(int numHits, String field, int byteWidth, boolean missingLast, int scale) {
+    this.scale = scale;
+    this.field = field;
+    this.byteWidth = byteWidth;
+    values = new BytesRef[numHits];
+    missingValue = new BytesRef(byteWidth);
+    missingValue.length = byteWidth;
+    if (missingLast) {
+      Arrays.fill(missingValue.bytes, (byte) 0xff);
+    }
+    for(int i=0;i<numHits;i++) {
+      values[i] = new BytesRef(byteWidth);
+      values[i].length = byteWidth;
+    }
+  }
+
+  @Override
+  public int compare(int slot1, int slot2) {
+    return values[slot1].compareTo(values[slot2]);
+  }
+
+  private BytesRef getDocValue(int doc) {
+    BytesRef v;
+    if (docsWithField != null && docsWithField.get(doc) == false) {
+      v = missingValue;
+    } else {
+      v = currentReaderValues.get(doc);
+    }
+    assert v.length == byteWidth: v.length + " vs " +  byteWidth;
+    return v;
+  }
+
+  @Override
+  public int compareBottom(int doc) {
+    return bottom.compareTo(getDocValue(doc));
+  }
+
+  @Override
+  public void copy(int slot, int doc) {
+    BytesRef v = getDocValue(doc);
+    System.arraycopy(v.bytes, v.offset, values[slot].bytes, 0, byteWidth);
+  }
+    
+  @Override
+  public void setBottom(final int bottom) {
+    this.bottom = values[bottom];
+  }
+
+  @Override
+  public void setTopValue(BigDecimal value) {
+    topValue = NumericUtils.bigIntToBytes(value.unscaledValue(), byteWidth);
+  }
+
+  @Override
+  public BigDecimal value(int slot) {
+    return new BigDecimal(NumericUtils.bytesToBigInt(values[slot]), scale);
+  }
+
+  @Override
+  public int compareTop(int doc) {
+    BytesRef v = getDocValue(doc);
+    return topValue.compareTo(v);
+  }
+
+  @Override
+  public void doSetNextReader(LeafReaderContext context) throws IOException {
+    currentReaderValues = getDocValues(context);
+    assert currentReaderValues != null;
+    docsWithField = DocValues.getDocsWithField(context.reader(), field);
+    assert docsWithField != null;
+    // optimization to remove unneeded checks on the bit interface:
+    if (docsWithField instanceof Bits.MatchAllBits) {
+      docsWithField = null;
+    }
+  }
+
+  protected SortedDocValues getDocValues(LeafReaderContext context) throws IOException {
+    return context.reader().getSortedDocValues(field);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/BigIntComparator.java b/lucene/core/src/java/org/apache/lucene/document/BigIntComparator.java
new file mode 100644
index 0000000..03b774a
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/BigIntComparator.java
@@ -0,0 +1,122 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.Arrays;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SimpleFieldComparator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+
+// TODO: this doesn't use the ord; we could index BinaryDV instead for the single valued case?
+class BigIntComparator extends SimpleFieldComparator<BigInteger> {
+  private final String field;
+  private final BytesRef[] values;
+  private final int byteWidth;
+  private final BytesRef missingValue;
+  private BytesRef bottom;
+  private BytesRef topValue;
+  private Bits docsWithField;
+  private SortedDocValues currentReaderValues;
+
+  public BigIntComparator(int numHits, String field, int byteWidth, boolean missingLast) {
+    this.field = field;
+    this.byteWidth = byteWidth;
+    values = new BytesRef[numHits];
+    missingValue = new BytesRef(byteWidth);
+    missingValue.length = byteWidth;
+    if (missingLast) {
+      Arrays.fill(missingValue.bytes, (byte) 0xff);
+    }
+    for(int i=0;i<numHits;i++) {
+      values[i] = new BytesRef(byteWidth);
+      values[i].length = byteWidth;
+    }
+  }
+
+  @Override
+  public int compare(int slot1, int slot2) {
+    return values[slot1].compareTo(values[slot2]);
+  }
+
+  private BytesRef getDocValue(int doc) {
+    BytesRef v;
+    if (docsWithField != null && docsWithField.get(doc) == false) {
+      v = missingValue;
+    } else {
+      v = currentReaderValues.get(doc);
+    }
+    assert v.length == byteWidth: v.length;
+    return v;
+  }
+
+  @Override
+  public int compareBottom(int doc) {
+    return bottom.compareTo(getDocValue(doc));
+  }
+
+  @Override
+  public void copy(int slot, int doc) {
+    BytesRef v = getDocValue(doc);
+    System.arraycopy(v.bytes, v.offset, values[slot].bytes, 0, byteWidth);
+  }
+    
+  @Override
+  public void setBottom(final int bottom) {
+    this.bottom = values[bottom];
+  }
+
+  @Override
+  public void setTopValue(BigInteger value) {
+    topValue = NumericUtils.bigIntToBytes(value, byteWidth);
+  }
+
+  @Override
+  public BigInteger value(int slot) {
+    return NumericUtils.bytesToBigInt(values[slot]);
+  }
+
+  @Override
+  public int compareTop(int doc) {
+    BytesRef v = getDocValue(doc);
+    return topValue.compareTo(v);
+  }
+
+  @Override
+  public void doSetNextReader(LeafReaderContext context) throws IOException {
+    currentReaderValues = getDocValues(context);
+    assert currentReaderValues != null;
+    docsWithField = DocValues.getDocsWithField(context.reader(), field);
+    assert docsWithField != null;
+    // optimization to remove unneeded checks on the bit interface:
+    if (docsWithField instanceof Bits.MatchAllBits) {
+      docsWithField = null;
+    }
+  }
+
+  protected SortedDocValues getDocValues(LeafReaderContext context) throws IOException {
+    return context.reader().getSortedDocValues(field);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java
deleted file mode 100644
index c0c9c63..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java
+++ /dev/null
@@ -1,63 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * Field that stores a per-document {@link BytesRef} value.  
- * <p>
- * The values are stored directly with no sharing, which is a good fit when
- * the fields don't share (many) values, such as a title field.  If values 
- * may be shared and sorted it's better to use {@link SortedDocValuesField}.  
- * Here's an example usage:
- * 
- * <pre class="prettyprint">
- *   document.add(new BinaryDocValuesField(name, new BytesRef("hello")));
- * </pre>
- * 
- * <p>
- * If you also need to store the value, you should add a
- * separate {@link StoredField} instance.
- * 
- * @see BinaryDocValues
- * */
-public class BinaryDocValuesField extends Field {
-  
-  /**
-   * Type for straight bytes DocValues.
-   */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setDocValuesType(DocValuesType.BINARY);
-    TYPE.freeze();
-  }
-  
-  /**
-   * Create a new binary DocValues field.
-   * @param name field name
-   * @param value binary content
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public BinaryDocValuesField(String name, BytesRef value) {
-    super(name, TYPE);
-    fieldsData = value;
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/BinaryTokenStream.java b/lucene/core/src/java/org/apache/lucene/document/BinaryTokenStream.java
new file mode 100644
index 0000000..bfaf72f
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/BinaryTokenStream.java
@@ -0,0 +1,86 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+n * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.BytesRef;
+
+final class BinaryTokenStream extends TokenStream {
+  private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);
+  private boolean available = true;
+  
+  public BinaryTokenStream() {
+  }
+
+  public void setValue(BytesRef value) {
+    bytesAtt.setBytesRef(value);
+  }
+  
+  @Override
+  public boolean incrementToken() {
+    if (available) {
+      clearAttributes();
+      available = false;
+      return true;
+    }
+    return false;
+  }
+  
+  @Override
+  public void reset() {
+    available = true;
+  }
+  
+  public interface ByteTermAttribute extends TermToBytesRefAttribute {
+    public void setBytesRef(BytesRef bytes);
+  }
+  
+  public static class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute, TermToBytesRefAttribute {
+    private BytesRef bytes;
+    
+    @Override
+    public void fillBytesRef() {
+      // no-op: the bytes was already filled by our owner's incrementToken
+    }
+    
+    @Override
+    public BytesRef getBytesRef() {
+      return bytes;
+    }
+
+    @Override
+    public void setBytesRef(BytesRef bytes) {
+      this.bytes = bytes;
+    }
+    
+    @Override
+    public void clear() {
+      // NOTE: we should set null bytes here but then we'd have extra BytesRef copy?
+    }
+    
+    @Override
+    public void copyTo(AttributeImpl target) {
+      ByteTermAttributeImpl other = (ByteTermAttributeImpl) target;
+      other.bytes = bytes;
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/DateTools.java b/lucene/core/src/java/org/apache/lucene/document/DateTools.java
index 6df73f1..3f23495 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DateTools.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DateTools.java
@@ -17,10 +17,8 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.NumericRangeQuery; // for javadocs
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.util.NumericUtils;        // for javadocs
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
@@ -40,9 +38,6 @@
  * {@link TermRangeQuery} and {@link PrefixQuery} will require more memory and become slower.
  * 
  * <P>
- * Another approach is {@link NumericUtils}, which provides
- * a sortable binary representation (prefix encoded) of numeric values, which
- * date/time are.
  * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as
  * <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and
  * index this as a numeric value with {@link LongField}
diff --git a/lucene/core/src/java/org/apache/lucene/document/Document.java b/lucene/core/src/java/org/apache/lucene/document/Document.java
index 503363c..c1456ed 100644
--- a/lucene/core/src/java/org/apache/lucene/document/Document.java
+++ b/lucene/core/src/java/org/apache/lucene/document/Document.java
@@ -17,174 +17,517 @@
  * limitations under the License.
  */
 
-import java.util.*;
+import java.io.IOException;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Set;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.FieldTypes.FieldType;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexDocument;
 import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexReader;  // for javadoc
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.search.IndexSearcher;  // for javadoc
-import org.apache.lucene.search.ScoreDoc; // for javadoc
+import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.FilterIterator;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.NumericUtils;
 
-/** Documents are the unit of indexing and search.
- *
- * A Document is a set of fields.  Each field has a name and a textual value.
- * A field may be {@link org.apache.lucene.index.IndexableFieldType#stored() stored} with the document, in which
- * case it is returned with search hits on the document.  Thus each document
- * should typically contain one or more stored fields which uniquely identify
- * it.
- *
- * <p>Note that fields which are <i>not</i> {@link org.apache.lucene.index.IndexableFieldType#stored() stored} are
- * <i>not</i> available in documents retrieved from the index, e.g. with {@link
- * ScoreDoc#doc} or {@link IndexReader#document(int)}.
- */
+/** Holds one document, either created anew for indexing, or retrieved a search time.
+ *  When you add fields, their type and properties are tracked by an instance of
+ *  {@link FieldTypes} held by the {@link IndexWriter#getFieldTypes()}. */
 
-public final class Document implements IndexDocument {
+public class Document implements Iterable<IndexableField> {
 
-  private final List<Field> fields = new ArrayList<>();
+  private static final float DEFAULT_BOOST = 1.0f;
 
-  /** Constructs a new document with no fields. */
-  public Document() {}
-  
+  private final FieldTypes fieldTypes;
+  private final List<IndexableField> fields = new ArrayList<>();
+  private final boolean changeSchema;
+  private final Set<String> seenFields;
 
-  /**
-  * Creates a Document from StoredDocument so it that can be used e.g. for another
-  * round of indexing.
-  *
-  */
-  public Document(StoredDocument storedDoc) {
-    for (StorableField field : storedDoc.getFields()) {
-      Field newField = new Field(field.name(), (FieldType) field.fieldType());
-     
-      newField.fieldsData = field.stringValue();
-      if (newField.fieldsData == null) {
-        newField.fieldsData = field.numericValue();
-      }
-      if (newField.fieldsData == null) {
-        newField.fieldsData = field.binaryValue();
-      }
-      if (newField.fieldsData == null) {
-        newField.fieldsData = field.readerValue();
-      }
-     
-      add(newField);
+  private class FieldValue implements IndexableField {
+    final String fieldName;
+    final Object value;
+    final float boost;
+    final FieldType fieldType;
+
+    public FieldValue(String name, Object value) {
+      this(name, value, DEFAULT_BOOST);
     }
-  }
 
-  
-  /**
-   * <p>Adds a field to a document.  Several fields may be added with
-   * the same name.  In this case, if the fields are indexed, their text is
-   * treated as though appended for the purposes of search.</p>
-   * <p> Note that add like the removeField(s) methods only makes sense 
-   * prior to adding a document to an index. These methods cannot
-   * be used to change the content of an existing index! In order to achieve this,
-   * a document has to be deleted from an index and a new changed version of that
-   * document has to be added.</p>
-   */
-  public final void add(Field field) {
-    fields.add(field);
-  }
-  
-  /**
-   * <p>Removes field with the specified name from the document.
-   * If multiple fields exist with this name, this method removes the first field that has been added.
-   * If there is no field with the specified name, the document remains unchanged.</p>
-   * <p> Note that the removeField(s) methods like the add method only make sense 
-   * prior to adding a document to an index. These methods cannot
-   * be used to change the content of an existing index! In order to achieve this,
-   * a document has to be deleted from an index and a new changed version of that
-   * document has to be added.</p>
-   */
-  public final void removeField(String name) {
-    Iterator<Field> it = fields.iterator();
-    while (it.hasNext()) {
-      Field field = it.next();
-      if (field.name().equals(name)) {
-        it.remove();
-        return;
+    public FieldValue(String fieldName, Object value, float boost) {
+      if (fieldName == null) {
+        throw new IllegalArgumentException("field name cannot be null");
+      }
+      if (value == null) {
+        throw new IllegalArgumentException("field=\"" + fieldName + "\": value cannot be null");
+      }
+      this.fieldName = fieldName;
+      this.value = value;
+      this.boost = boost;
+      FieldType curFieldType;
+      if (changeSchema == false) {
+        if (fieldTypes != null) {
+          try {
+            curFieldType = fieldTypes.getFieldType(fieldName);
+          } catch (IllegalArgumentException iae) {
+            curFieldType = null;
+          }
+        } else {
+          curFieldType = null;
+        }
+      } else {
+        curFieldType = fieldTypes.getFieldType(fieldName);
+      }
+      this.fieldType = curFieldType;
+      if (seenFields != null && seenFields.add(fieldName) == false && fieldType.multiValued != Boolean.TRUE) {
+        throw new IllegalArgumentException("field=\"" + fieldName + "\": this field is added more than once but is not multiValued");
       }
     }
-  }
-  
-  /**
-   * <p>Removes all fields with the given name from the document.
-   * If there is no field with the specified name, the document remains unchanged.</p>
-   * <p> Note that the removeField(s) methods like the add method only make sense 
-   * prior to adding a document to an index. These methods cannot
-   * be used to change the content of an existing index! In order to achieve this,
-   * a document has to be deleted from an index and a new changed version of that
-   * document has to be added.</p>
-   */
-  public final void removeFields(String name) {
-    Iterator<Field> it = fields.iterator();
-    while (it.hasNext()) {
-      Field field = it.next();
-      if (field.name().equals(name)) {
-        it.remove();
+    
+    @Override
+    public String name() {
+      return fieldName;
+    }
+
+    @Override
+    public IndexableFieldType fieldType() {
+      return fieldType;
+    }
+
+    @Override
+    public float boost() {
+      return boost;
+    }
+
+    private TokenStream getReusedBinaryTokenStream(BytesRef value, TokenStream reuse) {
+      BinaryTokenStream bts;
+      // It might be non-null and not a BinaryTokenStream if this is an atom field that just add a too small or too big term:
+      if (reuse != null && reuse instanceof BinaryTokenStream) {
+        bts = (BinaryTokenStream) reuse;
+      } else {
+        bts = new BinaryTokenStream();
+      }
+      bts.setValue(value);
+      return bts;
+    }
+
+    private TokenStream getReusedStringTokenStream(String value, TokenStream reuse) {
+      StringTokenStream sts;
+      // It might be non-null and not a StringTokenStream if this is an atom field that just add a too small or too big term:
+      if (reuse != null && reuse instanceof StringTokenStream) {
+        sts = (StringTokenStream) reuse;
+      } else {
+        sts = new StringTokenStream();
+      }
+      sts.setValue(value);
+      return sts;
+    }
+    
+    @Override
+    public String toString() {
+      return fieldName + ": " + value;
+    }
+
+    @Override
+    public TokenStream tokenStream(TokenStream reuse) throws IOException {
+      Analyzer analyzer = fieldTypes.getIndexAnalyzer();
+
+      assert fieldTypes.getIndexOptions(fieldName) != IndexOptions.NONE;
+
+      FieldTypes.FieldType fieldType = fieldTypes.getFieldType(fieldName);
+
+      switch (fieldType.valueType) {
+      case INT:
+        return getReusedBinaryTokenStream(NumericUtils.intToBytes(((Number) value).intValue()), reuse);
+      case HALF_FLOAT:
+        return getReusedBinaryTokenStream(NumericUtils.halfFloatToBytes(((Number) value).floatValue()), reuse);
+      case FLOAT:
+        return getReusedBinaryTokenStream(NumericUtils.floatToBytes(((Number) value).floatValue()), reuse);
+      case LONG:
+        return getReusedBinaryTokenStream(NumericUtils.longToBytes(((Number) value).longValue()), reuse);
+      case DOUBLE:
+        return getReusedBinaryTokenStream(NumericUtils.doubleToBytes(((Number) value).doubleValue()), reuse);
+      case BIG_INT:
+        {
+          BytesRef bytes;
+          try {
+            bytes = NumericUtils.bigIntToBytes((BigInteger) value, fieldType.bigIntByteWidth.intValue());
+          } catch (IllegalArgumentException iae) {
+            FieldTypes.illegalState(fieldName, iae.getMessage());
+            // Dead code but compile disagrees:
+            bytes = null;
+          }
+          return getReusedBinaryTokenStream(bytes, reuse);
+        }
+      case BIG_DECIMAL:
+        {
+          BigDecimal dec = (BigDecimal) value;
+          if (dec.scale() != fieldType.bigDecimalScale.intValue()) {
+            FieldTypes.illegalState(fieldName, "BIG_DECIMAL was configured with scale=" + fieldType.bigDecimalScale + ", but indexed value has scale=" + dec.scale());
+          }
+          BytesRef bytes;
+          try {
+            bytes = NumericUtils.bigIntToBytes(dec.unscaledValue(), fieldType.bigIntByteWidth.intValue());
+          } catch (IllegalArgumentException iae) {
+            FieldTypes.illegalState(fieldName, iae.getMessage());
+            // Dead code but compile disagrees:
+            bytes = null;
+          }
+          return getReusedBinaryTokenStream(bytes, reuse);
+        }
+      case DATE:
+        return getReusedBinaryTokenStream(NumericUtils.longToBytes(((Date) value).getTime()), reuse);
+      case ATOM:
+        // TODO: we could/should just wrap a SingleTokenTokenizer here?:
+        if (fieldType.minTokenLength != null) {
+          if (value instanceof String) {
+            String s = (String) value;
+            if (s.length() < fieldType.minTokenLength.intValue() ||
+                s.length() > fieldType.maxTokenLength.intValue()) {
+              return EMPTY_TOKEN_STREAM;
+            }
+          } else if (value instanceof BytesRef) {
+            BytesRef b = (BytesRef) value;
+            if (b.length < fieldType.minTokenLength.intValue() ||
+                b.length > fieldType.maxTokenLength.intValue()) {
+              return EMPTY_TOKEN_STREAM;
+            }
+          }
+        }
+
+        Object indexValue;
+
+        if (fieldType.reversedTerms == Boolean.TRUE) {
+          if (value instanceof String) {
+            indexValue = new StringBuilder((String) value).reverse().toString();
+          } else {
+            BytesRef valueBR = (BytesRef) value;
+            BytesRef br = new BytesRef(valueBR.length);
+            for(int i=0;i<valueBR.length;i++) {
+              br.bytes[i] = valueBR.bytes[valueBR.offset+valueBR.length-i-1];
+            }
+            br.length = valueBR.length;
+            indexValue = br;
+          }
+        } else {
+          indexValue = value;
+        }
+        if (value instanceof String) {
+          return getReusedStringTokenStream((String) indexValue, reuse);
+        } else {
+          assert value instanceof BytesRef;
+          return getReusedBinaryTokenStream((BytesRef) indexValue, reuse);
+        }
+
+      case BINARY:
+        {
+          assert value instanceof BytesRef;
+          BinaryTokenStream bts;
+          if (reuse != null) {
+            if (reuse instanceof BinaryTokenStream == false) {
+              FieldTypes.illegalState(fieldName, "should have had BinaryTokenStream for reuse, but got " + reuse);
+            }
+            bts = (BinaryTokenStream) reuse;
+          } else {
+            bts = new BinaryTokenStream();
+          }
+          bts.setValue((BytesRef) value);
+          return bts;
+        }
+
+      case INET_ADDRESS:
+        {
+          assert value instanceof InetAddress;
+          BinaryTokenStream bts;
+          if (reuse != null) {
+            if (reuse instanceof BinaryTokenStream == false) {
+              FieldTypes.illegalState(fieldName, "should have had BinaryTokenStream for reuse, but got " + reuse);
+            }
+            bts = (BinaryTokenStream) reuse;
+          } else {
+            bts = new BinaryTokenStream();
+          }
+          bts.setValue(new BytesRef(((InetAddress) value).getAddress()));
+          return bts;
+        }
+
+      case SHORT_TEXT:
+      case TEXT:
+        if (value instanceof TokenStream) {
+          return (TokenStream) value;
+        } else if (value instanceof StringAndTokenStream) {
+          return ((StringAndTokenStream) value).tokens;
+        } else if (value instanceof Reader) {
+          return analyzer.tokenStream(name(), (Reader) value);
+        } else {
+          return analyzer.tokenStream(name(), (String) value);
+        }
+
+      case BOOLEAN:
+        byte[] token = new byte[1];
+        if (value == Boolean.TRUE) {
+          token[0] = 1;
+        }
+        return getReusedBinaryTokenStream(new BytesRef(token), reuse);
+
+      default:
+        FieldTypes.illegalState(fieldName, "valueType=" + fieldType.valueType + " cannot be indexed");
+
+        // Dead code but javac disagrees:
+        return null;
       }
     }
-  }
 
-
-  /**
-  * Returns an array of byte arrays for of the fields that have the name specified
-  * as the method parameter.  This method returns an empty
-  * array when there are no matching fields.  It never
-  * returns null.
-  *
-  * @param name the name of the field
-  * @return a <code>BytesRef[]</code> of binary field values
-  */
-  public final BytesRef[] getBinaryValues(String name) {
-    final List<BytesRef> result = new ArrayList<>();
-
-    for (Iterator<StorableField> it = storedFieldsIterator(); it.hasNext(); ) {
-      StorableField field = it.next();
-      if (field.name().equals(name)) {
-        final BytesRef bytes = field.binaryValue();
-        if (bytes != null) {
-          result.add(bytes);
+    @Override
+    public Number numericValue() {
+      if (fieldType == null) {
+        if (value instanceof Number) {
+          return (Number) value;
+        } else {
+          return null;
         }
       }
-    }
-  
-    return result.toArray(new BytesRef[result.size()]);
-  }
-  
-  /**
-  * Returns an array of bytes for the first (or only) field that has the name
-  * specified as the method parameter. This method will return <code>null</code>
-  * if no binary fields with the specified name are available.
-  * There may be non-binary fields with the same name.
-  *
-  * @param name the name of the field.
-  * @return a <code>BytesRef</code> containing the binary field value or <code>null</code>
-  */
-  public final BytesRef getBinaryValue(String name) {
-    for (Iterator<StorableField> it = storedFieldsIterator(); it.hasNext(); ) {
-      StorableField field = it.next();
-      if (field.name().equals(name)) {
-        final BytesRef bytes = field.binaryValue();
-        if (bytes != null) {
-          return bytes;
+      switch (fieldType.valueType) {
+      case INT:
+      case LONG:
+      case FLOAT:
+      case DOUBLE:
+        return (Number) value;
+      case HALF_FLOAT:
+        return Short.valueOf(NumericUtils.halfFloatToShort((Float) value));
+      case DATE:
+        return ((Date) value).getTime();
+      case BOOLEAN:
+        if (value == Boolean.TRUE) {
+          return Integer.valueOf(1);
+        } else {
+          return Integer.valueOf(0);
         }
+      default:
+        return null;
       }
     }
-    return null;
+
+    @Override
+    public Number numericDocValue() {
+      if (fieldType == null) {
+        return null;
+      }
+      switch (fieldType.valueType) {
+      case INT:
+        return (Number) value;
+      case LONG:
+        return (Number) value;
+      case HALF_FLOAT:
+        short shortBits = HalfFloat.floatToShort((Float) value);
+        shortBits = NumericUtils.sortableHalfFloatBits(shortBits);
+        return Short.valueOf(shortBits);
+      case FLOAT:
+        return Integer.valueOf(NumericUtils.floatToInt((Float) value));
+      case DOUBLE:
+        return Long.valueOf(NumericUtils.doubleToLong((Double) value));
+      case DATE:
+        return Long.valueOf(((Date) value).getTime());
+      case BOOLEAN:
+        if (value == Boolean.TRUE) {
+          return Integer.valueOf(1);
+        } else {
+          return Integer.valueOf(0);
+        }
+      default:
+        return null;
+      }
+    }
+
+    @Override
+    public String stringValue() {
+      if (fieldType == null) {
+        if (value instanceof String) {
+          return (String) value;
+        } else {
+          return null;
+        }
+      }
+
+      switch (fieldType.valueType) {
+      case SHORT_TEXT:
+      case TEXT:
+        if (value instanceof String) {
+          return (String) value;
+        } else if (value instanceof StringAndTokenStream) {
+          return ((StringAndTokenStream) value).value;
+        } else {
+          return null;
+        }
+      case ATOM:
+        if (value instanceof String) {
+          return (String) value;
+        } else {
+          return null;
+        }
+      default:
+        return null;
+      }
+    }
+
+    @Override
+    public BytesRef binaryValue() {
+      if (fieldType == null) {
+        if (value instanceof BytesRef) {
+          return (BytesRef) value;
+        } else {
+          return null;
+        }
+      }
+
+      if (fieldType.valueType == FieldTypes.ValueType.BOOLEAN) {
+        byte[] bytes = new byte[1];
+        if (value == Boolean.TRUE) {
+          bytes[0] = 1;
+        }
+        return new BytesRef(bytes);
+      } else if (fieldType.valueType == FieldTypes.ValueType.INET_ADDRESS) {
+        return new BytesRef(((InetAddress) value).getAddress());
+      } else if (fieldType.valueType == FieldTypes.ValueType.BIG_INT) { 
+        return new BytesRef(((BigInteger) value).toByteArray());
+      } else if (fieldType.valueType == FieldTypes.ValueType.BIG_DECIMAL) { 
+        BigDecimal dec = (BigDecimal) value;
+        if (dec.scale() != fieldType.bigDecimalScale) {
+          FieldTypes.illegalState(fieldName, "BIG_DECIMAL was configured with scale=" + fieldType.bigDecimalScale + ", but stored value has scale=" + dec.scale());
+        }
+        return new BytesRef(dec.unscaledValue().toByteArray());
+      } else if (value instanceof BytesRef) {
+        return (BytesRef) value;
+      } else {
+        return null;
+      }
+    }
+
+    @Override
+    public BytesRef binaryDocValue() {
+      if (value instanceof BytesRef) {
+        return (BytesRef) value;
+      } else if (fieldType.docValuesType == DocValuesType.BINARY || fieldType.docValuesType == DocValuesType.SORTED || fieldType.docValuesType == DocValuesType.SORTED_SET) {
+        if (fieldType.valueType == FieldTypes.ValueType.INET_ADDRESS) {
+          return new BytesRef(((InetAddress) value).getAddress());
+        } else if (fieldType.valueType == FieldTypes.ValueType.BIG_INT) {
+          // TODO: can we do this only once, if it's DV'd & indexed?
+          return NumericUtils.bigIntToBytes((BigInteger) value, fieldType.bigIntByteWidth);
+        } else if (fieldType.valueType == FieldTypes.ValueType.BIG_DECIMAL) { 
+          // TODO: can we do this only once, if it's DV'd & indexed?
+          return NumericUtils.bigIntToBytes(((BigDecimal) value).unscaledValue(), fieldType.bigIntByteWidth);
+        } else if (value instanceof String) {
+          String s = (String) value;
+          BytesRef br;
+          if (fieldType.sortCollator != null) {
+            // TOOD: thread local clones instead of sync'd on one instance?
+            byte[] bytes;
+            synchronized (fieldType.sortCollator) {
+              bytes = fieldType.sortCollator.getCollationKey(s).toByteArray();
+            }
+            br = new BytesRef(bytes);
+          } else {
+            // TODO: somewhat evil we utf8-encode your string?
+            br = new BytesRef(s);
+          }
+
+          return br;
+        }
+      }
+
+      return null;
+    }
   }
 
-  /** Returns a field with the given name if any exist in this document, or
-   * null.  If multiple fields exists with this name, this method returns the
-   * first value added.
-   */
-  public final Field getField(String name) {
-    for (Field field : fields) {
+  private static class StringAndTokenStream {
+    public final String value;
+    public final TokenStream tokens;
+    public StringAndTokenStream(String value, TokenStream tokens) {
+      this.value = value;
+      this.tokens = tokens;
+    }
+  }
+
+  public Document(FieldTypes fieldTypes) {
+    this(fieldTypes, true);
+  }
+
+  public Document(Document other) {
+    this.fieldTypes = other.fieldTypes;
+    this.changeSchema = other.changeSchema;
+    if (changeSchema) {
+      seenFields = new HashSet<>();
+    } else {
+      seenFields = null;
+    }
+    addAll(other);
+  }
+
+  Document(FieldTypes fieldTypes, boolean changeSchema) {
+    this.fieldTypes = fieldTypes;
+    this.changeSchema = changeSchema;
+    if (changeSchema) {
+      seenFields = new HashSet<>();
+    } else {
+      seenFields = null;
+    }
+  }
+
+  private boolean enableExistsField = true;
+  
+  /** Disables indexing of field names for this one document.
+   *  To disable globally use {@link FieldTypes#disableExistsFilters}. */
+  public void disableExistsField() {
+    enableExistsField = false;
+  }
+
+  @Override
+  public Iterator<IndexableField> iterator() {
+    if (fieldTypes != null) {
+      assert fieldTypes.getStored(FieldTypes.FIELD_NAMES_FIELD) == false;
+    }
+
+    return new Iterator<IndexableField>() {
+      int index;
+      int fieldNamesIndex;
+
+      public boolean hasNext() {
+        return index < fields.size() || (enableExistsField && changeSchema && fieldTypes != null && fieldTypes.enableExistsFilters && fieldNamesIndex < fields.size());
+      }
+
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+
+      public IndexableField next() {
+        if (index < fields.size()) {
+          return fields.get(index++);
+        } else if (enableExistsField && fieldTypes != null && changeSchema && fieldTypes.enableExistsFilters && fieldNamesIndex < fields.size()) {
+          // TODO: maybe single method call to add multiple atoms?  addAtom(String...)
+          return new FieldValue(FieldTypes.FIELD_NAMES_FIELD, fields.get(fieldNamesIndex++).name());
+        } else {
+          throw new NoSuchElementException();
+        }
+      }
+    };
+  }
+
+  public List<IndexableField> getFields() {
+    return fields;
+  }
+
+  public IndexableField getField(String name) {
+    for (IndexableField field : fields) {
       if (field.name().equals(name)) {
         return field;
       }
@@ -192,142 +535,578 @@
     return null;
   }
 
-  /**
-   * Returns an array of {@link IndexableField}s with the given name.
-   * This method returns an empty array when there are no
-   * matching fields.  It never returns null.
-   *
-   * @param name the name of the field
-   * @return a <code>Field[]</code> array
-   */
-  public Field[] getFields(String name) {
-    List<Field> result = new ArrayList<>();
-    for (Field field : fields) {
+  public List<IndexableField> getFields(String name) {
+    List<IndexableField> result = new ArrayList<>();
+    for (IndexableField field : fields) {
       if (field.name().equals(name)) {
         result.add(field);
       }
     }
 
-    return result.toArray(new Field[result.size()]);
+    return result;
   }
-  
-  /** Returns a List of all the fields in a document.
-   * <p>Note that fields which are <i>not</i> stored are
-   * <i>not</i> available in documents retrieved from the
-   * index, e.g. {@link IndexSearcher#doc(int)} or {@link
-   * IndexReader#document(int)}.
-   * 
-   * @return an immutable <code>List&lt;Field&gt;</code> 
-   */
-  public final List<Field> getFields() {
-    return Collections.unmodifiableList(fields);
+
+  /** E.g. a "country" field.  Default: indexes this value as a single token, and disables norms and freqs, and also enables sorting (indexes doc values) and stores it. */
+  public void addAtom(String fieldName, String value) {
+    if (changeSchema) {
+      fieldTypes.recordStringAtomValueType(fieldName, false);
+    }
+    fields.add(new FieldValue(fieldName, value));
   }
-  
-   private final static String[] NO_STRINGS = new String[0];
 
-  /**
-   * Returns an array of values of the field specified as the method parameter.
-   * This method returns an empty array when there are no
-   * matching fields.  It never returns null.
-   * For {@link IntField}, {@link LongField}, {@link
-   * FloatField} and {@link DoubleField} it returns the string value of the number. If you want
-   * the actual numeric field instances back, use {@link #getFields}.
-   * @param name the name of the field
-   * @return a <code>String[]</code> of field values
-   */
-  public final String[] getValues(String name) {
-    List<String> result = new ArrayList<>();
+  /** E.g. a binary single-token field. */
+  public void addAtom(String fieldName, byte[] value) {
+    addAtom(fieldName, new BytesRef(value));
+  }
 
-    for (Iterator<StorableField> it = storedFieldsIterator(); it.hasNext(); ) {
-      StorableField field = it.next();
-      if (field.name().equals(name) && field.stringValue() != null) {
-        result.add(field.stringValue());
+  public void addAtom(String fieldName, BytesRef value) {
+    if (changeSchema) {
+      fieldTypes.recordBinaryAtomValueType(fieldName, false);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  // nocommit explain/verify how there can be only one unique field per doc
+
+  /** E.g. a primary key field. */
+  public void addUniqueAtom(String fieldName, String value) {
+    if (changeSchema) {
+      fieldTypes.recordStringAtomValueType(fieldName, true);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** E.g. a primary key field. */
+  public void addUniqueAtom(String fieldName, byte[] value) {
+    addUniqueAtom(fieldName, new BytesRef(value));
+  }
+
+  /** E.g. a primary key field. */
+  public void addUniqueAtom(String fieldName, BytesRef value) {
+    if (changeSchema) {
+      fieldTypes.recordBinaryAtomValueType(fieldName, true);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** E.g. a "title" field.  Default: indexes this value as multiple tokens from analyzer, and disables norms and freqs, and also enables
+   *  sorting (indexes sorted doc values). */
+  public void addShortText(String fieldName, String value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.SHORT_TEXT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredBinary(String fieldName, BytesRef value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.BINARY);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredBinary(String fieldName, byte[] value) {
+    addStoredBinary(fieldName, new BytesRef(value));
+  }
+
+  /** Only store this value. */
+  public void addStoredString(String fieldName, String value) {
+    if (changeSchema) {
+      fieldTypes.recordLargeTextType(fieldName, true, false);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredInt(String fieldName, int value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.INT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredLong(String fieldName, long value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.LONG);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredFloat(String fieldName, float value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.FLOAT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredHalfFloat(String fieldName, float value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.HALF_FLOAT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredDouble(String fieldName, double value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.DOUBLE);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredDate(String fieldName, Date value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.DATE);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredInetAddress(String fieldName, InetAddress value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.INET_ADDRESS);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Not indexed, stored, doc values. */
+  public void addBinary(String fieldName, BytesRef value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.BINARY);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredBigInteger(String fieldName, BigInteger value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.BIG_INT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Only store this value. */
+  public void addStoredBigDecimal(String fieldName, BigDecimal value) {
+    if (changeSchema) {
+      fieldTypes.recordStoredValueType(fieldName, FieldTypes.ValueType.BIG_DECIMAL);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Default: store this value. */
+  public void addBinary(String fieldName, byte[] value) {
+    addBinary(fieldName, new BytesRef(value));
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer and stores the value. */
+  public void addLargeText(String fieldName, String value) {
+    addLargeText(fieldName, value, DEFAULT_BOOST);
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer and stores the value. */
+  public void addLargeText(String fieldName, String value, float boost) {
+    if (changeSchema) {
+      fieldTypes.recordLargeTextType(fieldName, true, true);
+    }
+    fields.add(new FieldValue(fieldName, value, boost));
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer. */
+  public void addLargeText(String fieldName, TokenStream value) {
+    addLargeText(fieldName, value, DEFAULT_BOOST);
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer. */
+  public void addLargeText(String fieldName, TokenStream value, float boost) {
+    if (changeSchema) {
+      fieldTypes.recordLargeTextType(fieldName, false, true);
+    }
+    fields.add(new FieldValue(fieldName, value, boost));
+  }
+
+  public void addLargeText(String fieldName, String value, TokenStream tokens, float boost) {
+    if (changeSchema) {
+      fieldTypes.recordLargeTextType(fieldName, true, true);
+    }
+    fields.add(new FieldValue(fieldName, new StringAndTokenStream(value, tokens), boost));
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer. */
+  public void addLargeText(String fieldName, Reader reader) {
+    addLargeText(fieldName, reader, DEFAULT_BOOST);
+  }
+
+  /** E.g. a "body" field.  Default: indexes this value as multiple tokens from analyzer. */
+  public void addLargeText(String fieldName, Reader value, float boost) {
+    if (changeSchema) {
+      fieldTypes.recordLargeTextType(fieldName, false, true);
+    }
+    fields.add(new FieldValue(fieldName, value, boost));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addInt(String fieldName, int value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.INT);
+    }
+    fields.add(new FieldValue(fieldName, Integer.valueOf(value)));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addUniqueInt(String fieldName, int value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.INT, true);
+    }
+    fields.add(new FieldValue(fieldName, Integer.valueOf(value)));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addFloat(String fieldName, float value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.FLOAT);
+    }
+    fields.add(new FieldValue(fieldName, Float.valueOf(value)));
+  }
+
+  /** Adds half precision (2 bytes) float.  Note that the value is stored with 2 bytes in doc values, but in stored fields it's stored as an
+   *  ordinary 4 byte float.  Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addHalfFloat(String fieldName, float value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.HALF_FLOAT);
+    }
+    fields.add(new FieldValue(fieldName, Float.valueOf(value)));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addLong(String fieldName, long value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.LONG);
+    }
+    fields.add(new FieldValue(fieldName, Long.valueOf(value)));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addUniqueLong(String fieldName, long value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.LONG, true);
+    }
+    fields.add(new FieldValue(fieldName, Long.valueOf(value)));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using numeric doc values). */
+  public void addDouble(String fieldName, double value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.DOUBLE);
+    }
+    fields.add(new FieldValue(fieldName, Double.valueOf(value)));
+  }
+
+  // TODO: addUniqueBigInteger?
+
+  /** Default: support for range filtering/querying and sorting (using sorted doc values). */
+  public void addBigInteger(String fieldName, BigInteger value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.BIG_INT);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Default: support for range filtering/querying and sorting (using sorted doc values). */
+  public void addBigDecimal(String fieldName, BigDecimal value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.BIG_DECIMAL);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  public void addBoolean(String fieldName, boolean value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.BOOLEAN);
+    }
+    fields.add(new FieldValue(fieldName, Boolean.valueOf(value)));
+  }
+
+  public void addDate(String fieldName, Date value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.DATE);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  // nocommit should we map v4 addresses like this: http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_IPv6_addresses
+
+  /** Add an {@code InetAddress} field.  This is indexed as a binary atom under the hood, for sorting,
+   *  range filtering and stored. */
+  public void addInetAddress(String fieldName, InetAddress value) {
+    if (changeSchema) {
+      fieldTypes.recordValueType(fieldName, FieldTypes.ValueType.INET_ADDRESS);
+    }
+    fields.add(new FieldValue(fieldName, value));
+  }
+
+  /** Adds an abitrary external, opaque {@link IndexableField} without updating the field types. */
+  public void add(IndexableField field) {
+    fields.add(field);
+  }
+
+  static {
+    assert FieldTypes.ValueType.values().length == 15: "missing case for switch statement below";
+  }
+
+  /** Note: the FieldTypes must already know about all the fields in the incoming doc. */
+  public void addAll(Document other) {
+    for (IndexableField indexableField : other.fields) {
+      String fieldName = indexableField.name();
+      if (indexableField instanceof FieldValue) {
+        FieldValue field = (FieldValue) indexableField;
+        FieldType fieldType = other.fieldTypes.getFieldType(fieldName);
+        switch (fieldType.valueType) {
+        case TEXT:
+          addLargeText(fieldName, field.stringValue());
+          break;
+        case SHORT_TEXT:
+          addShortText(fieldName, field.stringValue());
+          break;
+        case ATOM:
+          if (field.value instanceof BytesRef) {
+            if (fieldType.isUnique == Boolean.TRUE) {
+              addUniqueAtom(fieldName, (BytesRef) field.value);
+            } else {
+              addAtom(fieldName, (BytesRef) field.value);
+            }
+          } else {
+            if (fieldType.isUnique == Boolean.TRUE) {
+              addUniqueAtom(fieldName, (String) field.value);
+            } else {
+              addAtom(fieldName, (String) field.value);
+            }
+          }
+          break;
+        case INT:
+          if (fieldType.isUnique == Boolean.TRUE) {
+            addUniqueInt(fieldName, field.numericValue().intValue());
+          } else {
+            addInt(fieldName, field.numericValue().intValue());
+          }
+          break;
+        case HALF_FLOAT:
+          addHalfFloat(fieldName, field.numericValue().floatValue());
+          break;
+        case FLOAT:
+          addFloat(fieldName, field.numericValue().floatValue());
+          break;
+        case LONG:
+          if (fieldType.isUnique == Boolean.TRUE) {
+            addUniqueLong(fieldName, field.numericValue().longValue());
+          } else {
+            addLong(fieldName, field.numericValue().longValue());
+          }
+          break;
+        case DOUBLE:
+          addDouble(fieldName, field.numericValue().doubleValue());
+          break;
+        case BIG_INT:
+          addBigInteger(fieldName, (BigInteger) field.value);
+          break;
+        case BIG_DECIMAL:
+          addBigDecimal(fieldName, (BigDecimal) field.value);
+          break;
+        case BINARY:
+          addStoredBinary(fieldName, field.binaryValue());
+          break;
+        case BOOLEAN:
+          addBoolean(fieldName, ((Boolean) field.value).booleanValue());
+          break;
+        case DATE:
+          addDate(fieldName, (Date) field.value);
+          break;
+        case INET_ADDRESS:
+          addInetAddress(fieldName, (InetAddress) field.value);
+          break;
+        default:
+          // BUG:
+          throw new AssertionError("missing valueType=" + fieldType.valueType + " in switch");
+        }
+      } else {
+        add(indexableField);
       }
     }
-    
-    if (result.size() == 0) {
-      return NO_STRINGS;
-    }
-    
-    return result.toArray(new String[result.size()]);
   }
 
-  /** Returns the string value of the field with the given name if any exist in
-   * this document, or null.  If multiple fields exist with this name, this
-   * method returns the first value added. If only binary fields with this name
-   * exist, returns null.
-   * For {@link IntField}, {@link LongField}, {@link
-   * FloatField} and {@link DoubleField} it returns the string value of the number. If you want
-   * the actual numeric field instance back, use {@link #getField}.
-   */
-  public final String get(String name) {
-    for (Iterator<StorableField> it = storedFieldsIterator(); it.hasNext(); ) {
-      StorableField field = it.next();
-      if (field.name().equals(name) && field.stringValue() != null) {
-        return field.stringValue();
+  public Boolean getBoolean(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Boolean) fieldValue.value;
+    }
+  }
+
+  public Date getDate(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Date) fieldValue.value;
+    }
+  }
+
+  public InetAddress getInetAddress(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (InetAddress) fieldValue.value;
+    }
+  }
+
+  public BigInteger getBigInteger(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (BigInteger) fieldValue.value;
+    }
+  }
+
+  public BigDecimal getBigDecimal(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (BigDecimal) fieldValue.value;
+    }
+  }
+
+  public String getString(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (String) fieldValue.value;
+    }
+  }
+
+  public String[] getStrings(String fieldName) {
+    List<String> values = new ArrayList<>();
+    for(IndexableField fieldValue : fields) {
+      if (fieldValue.name().equals(fieldName) && fieldValue instanceof FieldValue) {
+        values.add((String) ((FieldValue) fieldValue).value);
+      }
+    }
+
+    return values.toArray(new String[values.size()]);
+  }
+
+  public BytesRef getBinary(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (BytesRef) fieldValue.value;
+    }
+  }
+
+  public Integer getInt(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Integer) fieldValue.value;
+    }
+  }
+
+  public Long getLong(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Long) fieldValue.value;
+    }
+  }
+
+  public Float getHalfFloat(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Float) fieldValue.value;
+    }
+  }
+
+  public Float getFloat(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Float) fieldValue.value;
+    }
+  }
+
+  public Double getDouble(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return (Double) fieldValue.value;
+    }
+  }
+
+  public Object get(String fieldName) {
+    FieldValue fieldValue = getFirstFieldValue(fieldName);
+    if (fieldValue == null) {
+      return null;
+    } else {
+      return fieldValue.value;
+    }
+  }
+
+  private FieldValue getFirstFieldValue(String name) {
+    for(IndexableField fieldValue : fields) {
+      if (fieldValue.name().equals(name) && fieldValue instanceof FieldValue) {
+        return (FieldValue) fieldValue;
       }
     }
     return null;
   }
-  
-  /** Prints the fields of a document for human consumption. */
+
   @Override
-  public final String toString() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append("Document<");
-    for (int i = 0; i < fields.size(); i++) {
-      IndexableField field = fields.get(i);
-      buffer.append(field.toString());
-      if (i != fields.size()-1) {
-        buffer.append(" ");
+  public String toString() {
+    StringBuilder b = new StringBuilder();
+    for(IndexableField field : fields) {
+      b.append("\n  ");
+      b.append(field.name());
+      b.append(": ");
+      String s;
+      if (field instanceof FieldValue) {
+        s = ((FieldValue) field).value.toString();
+      } else {
+        s = field.toString();
+      }
+      if (s.length() > 20) {
+        b.append(s.substring(0, 20));
+        b.append("...");
+      } else {
+        b.append(s);
       }
     }
-    buffer.append(">");
-    return buffer.toString();
+    return b.toString();
   }
 
-  /** Obtains all indexed fields in document */
-  @Override
-  public Iterable<IndexableField> indexableFields() {
-    return new Iterable<IndexableField>() {
-      @Override
-      public Iterator<IndexableField> iterator() {
-        return Document.this.indexedFieldsIterator();
-      }
-    };
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
   }
 
-  /** Obtains all stored fields in document. */
-  @Override
-  public Iterable<StorableField> storableFields() {
-    return new Iterable<StorableField>() {
+  private static final TokenStream EMPTY_TOKEN_STREAM = new TokenStream() {
       @Override
-      public Iterator<StorableField> iterator() {
-        return Document.this.storedFieldsIterator();
+      public final boolean incrementToken() {
+        return false;
       }
     };
-  }
-
-  private Iterator<StorableField> storedFieldsIterator() {
-    return new FilterIterator<StorableField, Field>(fields.iterator()) {
-      @Override
-      protected boolean predicateFunction(Field field) {
-        return field.type.stored() || field.type.docValuesType() != DocValuesType.NONE;
-      }
-    };
-  }
-  
-  private Iterator<IndexableField> indexedFieldsIterator() {
-    return new FilterIterator<IndexableField, Field>(fields.iterator()) {
-      @Override
-      protected boolean predicateFunction(Field field) {
-        return field.type.indexOptions() != IndexOptions.NONE;
-      }
-    };
-  }
-
-  /** Removes all the fields from document. */
-  public void clear() {
-    fields.clear();
-  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java
index fdef4c1..eb7cc8c 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java
@@ -18,13 +18,19 @@
  */
 
 import java.io.IOException;
-import java.util.Set;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.util.Date;
 import java.util.HashSet;
+import java.util.Set;
 
+import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
 
 /** A {@link StoredFieldVisitor} that creates a {@link
  *  Document} containing all stored fields, or only specific
@@ -36,19 +42,24 @@
  * @lucene.experimental */
 
 public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
-  private final StoredDocument doc = new StoredDocument();
+  private final Document doc;
   private final Set<String> fieldsToAdd;
+  private final FieldTypes fieldTypes;
 
   /** 
    * Load only fields named in the provided <code>Set&lt;String&gt;</code>. 
    * @param fieldsToAdd Set of fields to load, or <code>null</code> (all fields).
    */
-  public DocumentStoredFieldVisitor(Set<String> fieldsToAdd) {
+  public DocumentStoredFieldVisitor(FieldTypes fieldTypes, Set<String> fieldsToAdd) {
+    doc = new Document(fieldTypes, false);
+    this.fieldTypes = fieldTypes;
     this.fieldsToAdd = fieldsToAdd;
   }
 
   /** Load only fields named in the provided fields. */
-  public DocumentStoredFieldVisitor(String... fields) {
+  public DocumentStoredFieldVisitor(FieldTypes fieldTypes, String... fields) {
+    doc = new Document(fieldTypes, false);
+    this.fieldTypes = fieldTypes;
     fieldsToAdd = new HashSet<>(fields.length);
     for(String field : fields) {
       fieldsToAdd.add(field);
@@ -56,42 +67,79 @@
   }
 
   /** Load all stored fields. */
-  public DocumentStoredFieldVisitor() {
+  public DocumentStoredFieldVisitor(FieldTypes fieldTypes) {
+    doc = new Document(fieldTypes, false);
+    this.fieldTypes = fieldTypes;
     this.fieldsToAdd = null;
   }
 
+  private FieldTypes.FieldType getFieldType(String fieldName) {
+    if (fieldTypes != null) {
+      try {
+        return fieldTypes.getFieldType(fieldName);
+      } catch (IllegalArgumentException iae) {
+      }
+    }
+    return null;
+  }
+
   @Override
   public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
-    doc.add(new StoredField(fieldInfo.name, value));
+    FieldTypes.FieldType fieldType = getFieldType(fieldInfo.name);
+    if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.INET_ADDRESS) {
+      doc.addInetAddress(fieldInfo.name, InetAddress.getByAddress(value));
+    } else if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.BIG_INT) {
+      doc.addBigInteger(fieldInfo.name, new BigInteger(value));
+    } else if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.BIG_DECIMAL) {
+      doc.addBigDecimal(fieldInfo.name, new BigDecimal(new BigInteger(value), fieldTypes.getBigDecimalScale(fieldInfo.name)));
+    } else {
+      doc.addBinary(fieldInfo.name, new BytesRef(value));
+    }
   }
 
   @Override
   public void stringField(FieldInfo fieldInfo, String value) throws IOException {
-    final FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(fieldInfo.hasVectors());
-    ft.setOmitNorms(fieldInfo.omitsNorms());
-    ft.setIndexOptions(fieldInfo.getIndexOptions());
-    doc.add(new StoredField(fieldInfo.name, value, ft));
+    doc.addLargeText(fieldInfo.name, value);
   }
 
   @Override
-  public void intField(FieldInfo fieldInfo, int value) {
-    doc.add(new StoredField(fieldInfo.name, value));
+  public void intField(FieldInfo fieldInfo, int value) throws CorruptIndexException {
+    FieldTypes.FieldType fieldType = getFieldType(fieldInfo.name);
+    if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.BOOLEAN) {
+      boolean b;
+      if (value == 0) {
+        b = false;
+      } else if (value == 1) {
+        b = true;
+      } else {
+        throw new CorruptIndexException("boolean field \"" + fieldInfo.name + "\" should have 0 or 1 underlying value but got " + value, "stored fields file");
+      }
+      doc.addBoolean(fieldInfo.name, b);
+    } else if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.HALF_FLOAT) {
+      doc.addHalfFloat(fieldInfo.name, NumericUtils.shortToHalfFloat((short) value));
+    } else {
+      doc.addInt(fieldInfo.name, value);
+    }
   }
 
   @Override
   public void longField(FieldInfo fieldInfo, long value) {
-    doc.add(new StoredField(fieldInfo.name, value));
+    FieldTypes.FieldType fieldType = getFieldType(fieldInfo.name);
+    if (fieldType != null && fieldType.valueType == FieldTypes.ValueType.DATE) {
+      doc.addDate(fieldInfo.name, new Date(value));
+    } else {
+      doc.addLong(fieldInfo.name, value);
+    }
   }
 
   @Override
   public void floatField(FieldInfo fieldInfo, float value) {
-    doc.add(new StoredField(fieldInfo.name, value));
+    doc.addFloat(fieldInfo.name, value);
   }
 
   @Override
   public void doubleField(FieldInfo fieldInfo, double value) {
-    doc.add(new StoredField(fieldInfo.name, value));
+    doc.addDouble(fieldInfo.name, value);
   }
 
   @Override
@@ -101,12 +149,12 @@
 
   /**
    * Retrieve the visited document.
-   * @return {@link StoredDocument} populated with stored fields. Note that only
+   * @return {@link Document} populated with stored fields. Note that only
    *         the stored information in the field instances is valid,
    *         data such as indexing options, term vector options,
    *         etc is not set.
    */
-  public StoredDocument getDocument() {
+  public Document getDocument() {
     return doc;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java
deleted file mode 100644
index f0e14d5..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/DoubleDocValuesField.java
+++ /dev/null
@@ -1,52 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Syntactic sugar for encoding doubles as NumericDocValues
- * via {@link Double#doubleToRawLongBits(double)}.
- * <p>
- * Per-document double values can be retrieved via
- * {@link org.apache.lucene.index.LeafReader#getNumericDocValues(String)}.
- * <p>
- * <b>NOTE</b>: In most all cases this will be rather inefficient,
- * requiring eight bytes per document. Consider encoding double
- * values yourself with only as much precision as you require.
- */
-public class DoubleDocValuesField extends NumericDocValuesField {
-
-  /** 
-   * Creates a new DocValues field with the specified 64-bit double value 
-   * @param name field name
-   * @param value 64-bit double value
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public DoubleDocValuesField(String name, double value) {
-    super(name, Double.doubleToRawLongBits(value));
-  }
-
-  @Override
-  public void setDoubleValue(double value) {
-    super.setLongValue(Double.doubleToRawLongBits(value));
-  }
-  
-  @Override
-  public void setLongValue(long value) {
-    throw new IllegalArgumentException("cannot change value type from Double to Long");
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoubleField.java b/lucene/core/src/java/org/apache/lucene/document/DoubleField.java
deleted file mode 100644
index cebc441..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/DoubleField.java
+++ /dev/null
@@ -1,172 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.NumericTokenStream; // javadocs
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.search.NumericRangeFilter; // javadocs
-import org.apache.lucene.search.NumericRangeQuery; // javadocs
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>double</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new DoubleField(name, 6.0, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>DoubleField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  DoubleField field = new DoubleField(name, 0.0, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setDoubleValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link IntField}, {@link LongField}, {@link
- * FloatField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>DoubleField</code>, use {@link NumericRangeQuery} or {@link
- * NumericRangeFilter}.  To sort according to a
- * <code>DoubleField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>DoubleField</code> 
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>DoubleField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>DoubleField</code>.</p>
- *
- * <p>A <code>DoubleField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 4, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * NumericRangeQuery} or {@link NumericRangeFilter}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link NumericRangeQuery}. The format of
- * indexed values is described in {@link NumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * NumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @since 2.9
- */
-
-public final class DoubleField extends Field {
-  
-  /** 
-   * Type for a DoubleField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.NumericType.DOUBLE);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored DoubleField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.NumericType.DOUBLE);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored DoubleField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  NumericUtils#PRECISION_STEP_DEFAULT} (16). 
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null. 
-   */
-  public DoubleField(String name, double value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Double.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link FieldType.NumericType#DOUBLE}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a DOUBLE numericType()
-   */
-  public DoubleField(String name, double value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.NumericType.DOUBLE) {
-      throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
-    }
-    fieldsData = Double.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/Field.java b/lucene/core/src/java/org/apache/lucene/document/Field.java
deleted file mode 100644
index aa0aecf..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/Field.java
+++ /dev/null
@@ -1,625 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.io.Reader;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.index.FieldInvertState; // javadocs
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexWriter; // javadocs
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.IndexableFieldType;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * Expert: directly create a field for a document.  Most
- * users should use one of the sugar subclasses: {@link
- * IntField}, {@link LongField}, {@link FloatField}, {@link
- * DoubleField}, {@link BinaryDocValuesField}, {@link
- * NumericDocValuesField}, {@link SortedDocValuesField}, {@link
- * StringField}, {@link TextField}, {@link StoredField}.
- *
- * <p/> A field is a section of a Document. Each field has three
- * parts: name, type and value. Values may be text
- * (String, Reader or pre-analyzed TokenStream), binary
- * (byte[]), or numeric (a Number).  Fields are optionally stored in the
- * index, so that they may be returned with hits on the document.
- *
- * <p/>
- * NOTE: the field type is an {@link IndexableFieldType}.  Making changes
- * to the state of the IndexableFieldType will impact any
- * Field it is used in.  It is strongly recommended that no
- * changes be made after Field instantiation.
- */
-public class Field implements IndexableField, StorableField {
-
-  /**
-   * Field's type
-   */
-  protected final FieldType type;
-
-  /**
-   * Field's name
-   */
-  protected final String name;
-
-  /** Field's value */
-  protected Object fieldsData;
-
-  /** Pre-analyzed tokenStream for indexed fields; this is
-   * separate from fieldsData because you are allowed to
-   * have both; eg maybe field has a String value but you
-   * customize how it's tokenized */
-  protected TokenStream tokenStream;
-
-  /**
-   * Field's boost
-   * @see #boost()
-   */
-  protected float boost = 1.0f;
-
-  /**
-   * Expert: creates a field with no initial value.
-   * Intended only for custom Field subclasses.
-   * @param name field name
-   * @param type field type
-   * @throws IllegalArgumentException if either the name or type
-   *         is null.
-   */
-  protected Field(String name, FieldType type) {
-    if (name == null) {
-      throw new IllegalArgumentException("name cannot be null");
-    }
-    this.name = name;
-    if (type == null) {
-      throw new IllegalArgumentException("type cannot be null");
-    }
-    this.type = type;
-  }
-
-  /**
-   * Create field with Reader value.
-   * @param name field name
-   * @param reader reader value
-   * @param type field type
-   * @throws IllegalArgumentException if either the name or type
-   *         is null, or if the field's type is stored(), or
-   *         if tokenized() is false.
-   * @throws NullPointerException if the reader is null
-   */
-  public Field(String name, Reader reader, FieldType type) {
-    if (name == null) {
-      throw new IllegalArgumentException("name cannot be null");
-    }
-    if (type == null) {
-      throw new IllegalArgumentException("type cannot be null");
-    }
-    if (reader == null) {
-      throw new NullPointerException("reader cannot be null");
-    }
-    if (type.stored()) {
-      throw new IllegalArgumentException("fields with a Reader value cannot be stored");
-    }
-    if (type.indexOptions() != IndexOptions.NONE && !type.tokenized()) {
-      throw new IllegalArgumentException("non-tokenized fields must use String values");
-    }
-    
-    this.name = name;
-    this.fieldsData = reader;
-    this.type = type;
-  }
-
-  /**
-   * Create field with TokenStream value.
-   * @param name field name
-   * @param tokenStream TokenStream value
-   * @param type field type
-   * @throws IllegalArgumentException if either the name or type
-   *         is null, or if the field's type is stored(), or
-   *         if tokenized() is false, or if indexed() is false.
-   * @throws NullPointerException if the tokenStream is null
-   */
-  public Field(String name, TokenStream tokenStream, FieldType type) {
-    if (name == null) {
-      throw new IllegalArgumentException("name cannot be null");
-    }
-    if (tokenStream == null) {
-      throw new NullPointerException("tokenStream cannot be null");
-    }
-    if (type.indexOptions() == IndexOptions.NONE || !type.tokenized()) {
-      throw new IllegalArgumentException("TokenStream fields must be indexed and tokenized");
-    }
-    if (type.stored()) {
-      throw new IllegalArgumentException("TokenStream fields cannot be stored");
-    }
-    
-    this.name = name;
-    this.fieldsData = null;
-    this.tokenStream = tokenStream;
-    this.type = type;
-  }
-  
-  /**
-   * Create field with binary value.
-   * 
-   * <p>NOTE: the provided byte[] is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param value byte array pointing to binary content (not copied)
-   * @param type field type
-   * @throws IllegalArgumentException if the field name is null,
-   *         or the field's type is indexed()
-   * @throws NullPointerException if the type is null
-   */
-  public Field(String name, byte[] value, FieldType type) {
-    this(name, value, 0, value.length, type);
-  }
-
-  /**
-   * Create field with binary value.
-   * 
-   * <p>NOTE: the provided byte[] is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param value byte array pointing to binary content (not copied)
-   * @param offset starting position of the byte array
-   * @param length valid length of the byte array
-   * @param type field type
-   * @throws IllegalArgumentException if the field name is null,
-   *         or the field's type is indexed()
-   * @throws NullPointerException if the type is null
-   */
-  public Field(String name, byte[] value, int offset, int length, FieldType type) {
-    this(name, new BytesRef(value, offset, length), type);
-  }
-
-  /**
-   * Create field with binary value.
-   *
-   * <p>NOTE: the provided BytesRef is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param bytes BytesRef pointing to binary content (not copied)
-   * @param type field type
-   * @throws IllegalArgumentException if the field name is null,
-   *         or the field's type is indexed()
-   * @throws NullPointerException if the type is null
-   */
-  public Field(String name, BytesRef bytes, FieldType type) {
-    if (name == null) {
-      throw new IllegalArgumentException("name cannot be null");
-    }
-    if (bytes == null) {
-      throw new IllegalArgumentException("bytes cannot be null");
-    }
-    if (type.indexOptions() != IndexOptions.NONE) {
-      throw new IllegalArgumentException("Fields with BytesRef values cannot be indexed");
-    }
-    this.fieldsData = bytes;
-    this.type = type;
-    this.name = name;
-  }
-
-  // TODO: allow direct construction of int, long, float, double value too..?
-
-  /**
-   * Create field with String value.
-   * @param name field name
-   * @param value string value
-   * @param type field type
-   * @throws IllegalArgumentException if either the name or value
-   *         is null, or if the field's type is neither indexed() nor stored(), 
-   *         or if indexed() is false but storeTermVectors() is true.
-   * @throws NullPointerException if the type is null
-   */
-  public Field(String name, String value, FieldType type) {
-    if (name == null) {
-      throw new IllegalArgumentException("name cannot be null");
-    }
-    if (value == null) {
-      throw new IllegalArgumentException("value cannot be null");
-    }
-    if (!type.stored() && type.indexOptions() == IndexOptions.NONE) {
-      throw new IllegalArgumentException("it doesn't make sense to have a field that "
-        + "is neither indexed nor stored");
-    }
-    this.type = type;
-    this.name = name;
-    this.fieldsData = value;
-  }
-
-  /**
-   * The value of the field as a String, or null. If null, the Reader value or
-   * binary value is used. Exactly one of stringValue(), readerValue(), and
-   * getBinaryValue() must be set.
-   */
-  @Override
-  public String stringValue() {
-    if (fieldsData instanceof String || fieldsData instanceof Number) {
-      return fieldsData.toString();
-    } else {
-      return null;
-    }
-  }
-  
-  /**
-   * The value of the field as a Reader, or null. If null, the String value or
-   * binary value is used. Exactly one of stringValue(), readerValue(), and
-   * getBinaryValue() must be set.
-   */
-  @Override
-  public Reader readerValue() {
-    return fieldsData instanceof Reader ? (Reader) fieldsData : null;
-  }
-  
-  /**
-   * The TokenStream for this field to be used when indexing, or null. If null,
-   * the Reader value or String value is analyzed to produce the indexed tokens.
-   */
-  public TokenStream tokenStreamValue() {
-    return tokenStream;
-  }
-  
-  /**
-   * <p>
-   * Expert: change the value of this field. This can be used during indexing to
-   * re-use a single Field instance to improve indexing speed by avoiding GC
-   * cost of new'ing and reclaiming Field instances. Typically a single
-   * {@link Document} instance is re-used as well. This helps most on small
-   * documents.
-   * </p>
-   * 
-   * <p>
-   * Each Field instance should only be used once within a single
-   * {@link Document} instance. See <a
-   * href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed"
-   * >ImproveIndexingSpeed</a> for details.
-   * </p>
-   */
-  public void setStringValue(String value) {
-    if (!(fieldsData instanceof String)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to String");
-    }
-    if (value == null) {
-      throw new IllegalArgumentException("value cannot be null");
-    }
-    fieldsData = value;
-  }
-  
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setReaderValue(Reader value) {
-    if (!(fieldsData instanceof Reader)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Reader");
-    }
-    fieldsData = value;
-  }
-  
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setBytesValue(byte[] value) {
-    setBytesValue(new BytesRef(value));
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   *
-   * <p>NOTE: the provided BytesRef is not copied so be sure
-   * not to change it until you're done with this field.
-   */
-  public void setBytesValue(BytesRef value) {
-    if (!(fieldsData instanceof BytesRef)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to BytesRef");
-    }
-    if (type.indexOptions() != IndexOptions.NONE) {
-      throw new IllegalArgumentException("cannot set a BytesRef value on an indexed field");
-    }
-    if (value == null) {
-      throw new IllegalArgumentException("value cannot be null");
-    }
-    fieldsData = value;
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setByteValue(byte value) {
-    if (!(fieldsData instanceof Byte)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Byte");
-    }
-    fieldsData = Byte.valueOf(value);
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setShortValue(short value) {
-    if (!(fieldsData instanceof Short)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Short");
-    }
-    fieldsData = Short.valueOf(value);
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setIntValue(int value) {
-    if (!(fieldsData instanceof Integer)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Integer");
-    }
-    fieldsData = Integer.valueOf(value);
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setLongValue(long value) {
-    if (!(fieldsData instanceof Long)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Long");
-    }
-    fieldsData = Long.valueOf(value);
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setFloatValue(float value) {
-    if (!(fieldsData instanceof Float)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Float");
-    }
-    fieldsData = Float.valueOf(value);
-  }
-
-  /**
-   * Expert: change the value of this field. See 
-   * {@link #setStringValue(String)}.
-   */
-  public void setDoubleValue(double value) {
-    if (!(fieldsData instanceof Double)) {
-      throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to Double");
-    }
-    fieldsData = Double.valueOf(value);
-  }
-
-  /**
-   * Expert: sets the token stream to be used for indexing and causes
-   * isIndexed() and isTokenized() to return true. May be combined with stored
-   * values from stringValue() or getBinaryValue()
-   */
-  public void setTokenStream(TokenStream tokenStream) {
-    if (type.indexOptions() == IndexOptions.NONE || !type.tokenized()) {
-      throw new IllegalArgumentException("TokenStream fields must be indexed and tokenized");
-    }
-    if (type.numericType() != null) {
-      throw new IllegalArgumentException("cannot set private TokenStream on numeric fields");
-    }
-    this.tokenStream = tokenStream;
-  }
-  
-  @Override
-  public String name() {
-    return name;
-  }
-  
-  /** 
-   * {@inheritDoc}
-   * <p>
-   * The default value is <code>1.0f</code> (no boost).
-   * @see #setBoost(float)
-   */
-  @Override
-  public float boost() {
-    return boost;
-  }
-
-  /** 
-   * Sets the boost factor on this field.
-   * @throws IllegalArgumentException if this field is not indexed, 
-   *         or if it omits norms. 
-   * @see #boost()
-   */
-  public void setBoost(float boost) {
-    if (boost != 1.0f) {
-      if (type.indexOptions() == IndexOptions.NONE || type.omitNorms()) {
-        throw new IllegalArgumentException("You cannot set an index-time boost on an unindexed field, or one that omits norms");
-      }
-    }
-    this.boost = boost;
-  }
-
-  @Override
-  public Number numericValue() {
-    if (fieldsData instanceof Number) {
-      return (Number) fieldsData;
-    } else {
-      return null;
-    }
-  }
-
-  @Override
-  public BytesRef binaryValue() {
-    if (fieldsData instanceof BytesRef) {
-      return (BytesRef) fieldsData;
-    } else {
-      return null;
-    }
-  }
-  
-  /** Prints a Field for human consumption. */
-  @Override
-  public String toString() {
-    StringBuilder result = new StringBuilder();
-    result.append(type.toString());
-    result.append('<');
-    result.append(name);
-    result.append(':');
-
-    if (fieldsData != null) {
-      result.append(fieldsData);
-    }
-
-    result.append('>');
-    return result.toString();
-  }
-  
-  /** Returns the {@link FieldType} for this field. */
-  @Override
-  public FieldType fieldType() {
-    return type;
-  }
-
-  @Override
-  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
-    if (fieldType().indexOptions() == IndexOptions.NONE) {
-      // Not indexed
-      return null;
-    }
-
-    final NumericType numericType = fieldType().numericType();
-    if (numericType != null) {
-      if (!(reuse instanceof NumericTokenStream && ((NumericTokenStream)reuse).getPrecisionStep() == type.numericPrecisionStep())) {
-        // lazy init the TokenStream as it is heavy to instantiate
-        // (attributes,...) if not needed (stored field loading)
-        reuse = new NumericTokenStream(type.numericPrecisionStep());
-      }
-      final NumericTokenStream nts = (NumericTokenStream) reuse;
-      // initialize value in TokenStream
-      final Number val = (Number) fieldsData;
-      switch (numericType) {
-      case INT:
-        nts.setIntValue(val.intValue());
-        break;
-      case LONG:
-        nts.setLongValue(val.longValue());
-        break;
-      case FLOAT:
-        nts.setFloatValue(val.floatValue());
-        break;
-      case DOUBLE:
-        nts.setDoubleValue(val.doubleValue());
-        break;
-      default:
-        throw new AssertionError("Should never get here");
-      }
-      return reuse;
-    }
-
-    if (!fieldType().tokenized()) {
-      if (stringValue() == null) {
-        throw new IllegalArgumentException("Non-Tokenized Fields must have a String value");
-      }
-      if (!(reuse instanceof StringTokenStream)) {
-        // lazy init the TokenStream as it is heavy to instantiate
-        // (attributes,...) if not needed (stored field loading)
-        reuse = new StringTokenStream();
-      }
-      ((StringTokenStream) reuse).setValue(stringValue());
-      return reuse;
-    }
-
-    if (tokenStream != null) {
-      return tokenStream;
-    } else if (readerValue() != null) {
-      return analyzer.tokenStream(name(), readerValue());
-    } else if (stringValue() != null) {
-      return analyzer.tokenStream(name(), stringValue());
-    }
-
-    throw new IllegalArgumentException("Field must have either TokenStream, String, Reader or Number value; got " + this);
-  }
-  
-  static final class StringTokenStream extends TokenStream {
-    private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
-    private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
-    private boolean used = false;
-    private String value = null;
-    
-    /** Creates a new TokenStream that returns a String as single token.
-     * <p>Warning: Does not initialize the value, you must call
-     * {@link #setValue(String)} afterwards!
-     */
-    StringTokenStream() {
-    }
-    
-    /** Sets the string value. */
-    void setValue(String value) {
-      this.value = value;
-    }
-
-    @Override
-    public boolean incrementToken() {
-      if (used) {
-        return false;
-      }
-      clearAttributes();
-      termAttribute.append(value);
-      offsetAttribute.setOffset(0, value.length());
-      used = true;
-      return true;
-    }
-
-    @Override
-    public void end() throws IOException {
-      super.end();
-      final int finalOffset = value.length();
-      offsetAttribute.setOffset(finalOffset, finalOffset);
-    }
-    
-    @Override
-    public void reset() {
-      used = false;
-    }
-
-    @Override
-    public void close() {
-      value = null;
-    }
-  }
-
-  /** Specifies whether and how a field should be stored. */
-  public static enum Store {
-
-    /** Store the original field value in the index. This is useful for short texts
-     * like a document's title which should be displayed with the results. The
-     * value is stored in its original form, i.e. no analyzer is used before it is
-     * stored.
-     */
-    YES,
-
-    /** Do not store the field value in the index. */
-    NO
-  }
-}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java b/lucene/core/src/java/org/apache/lucene/document/FieldExistsFilter.java
similarity index 61%
copy from lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
copy to lucene/core/src/java/org/apache/lucene/document/FieldExistsFilter.java
index c075984..369446d 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldExistsFilter.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.queries;
+package org.apache.lucene.document;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,55 +17,63 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.LeafReaderContext;
+import java.io.IOException;
+
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BitsFilteredDocIdSet;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 
-import java.io.IOException;
+/** A filter that matches docs that indexed the specified field name.  This only works
+ *  if {@link FieldTypes#enableExistsFilters} is true (the default). */
+final class FieldExistsFilter extends Filter {
 
-/**
- * A filter that includes documents that match with a specific term.
- */
-final public class TermFilter extends Filter {
-
-  private final Term term;
+  private final String fieldString;
+  private final BytesRef field;
 
   /**
    * @param term The term documents need to have in order to be a match for this filter.
    */
-  public TermFilter(Term term) {
-    if (term == null) {
-      throw new IllegalArgumentException("Term must not be null");
-    } else if (term.field() == null) {
-      throw new IllegalArgumentException("Field must not be null");
+  public FieldExistsFilter(String field) {
+    if (field == null) {
+      throw new IllegalArgumentException("field must not be null");
     }
-    this.term = term;
-  }
-
-  /**
-   * @return The term this filter includes documents with.
-   */
-  public Term getTerm() {
-    return term;
+    this.fieldString = field;
+    this.field = new BytesRef(field);
   }
 
   @Override
   public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException {
-    Terms terms = context.reader().terms(term.field());
+
+    int maxDoc = context.reader().maxDoc();
+
+    Terms terms = context.reader().terms(fieldString);
+    if (terms != null && terms.getDocCount() == maxDoc) {
+      // All docs have the field
+      return BitsFilteredDocIdSet.wrap(DocIdSet.full(maxDoc), acceptDocs);
+    }
+
+    terms = context.reader().terms(FieldTypes.FIELD_NAMES_FIELD);
     if (terms == null) {
       return null;
     }
 
     final TermsEnum termsEnum = terms.iterator(null);
-    if (!termsEnum.seekExact(term.bytes())) {
+    if (!termsEnum.seekExact(field)) {
       return null;
     }
+
+    // The Terms.getDocCount() above should have kicked in:
+    assert termsEnum.docFreq() < maxDoc;
+
     return new DocIdSet() {
       @Override
       public DocIdSetIterator iterator() throws IOException {
@@ -84,21 +92,18 @@
     if (this == o) return true;
     if (o == null || getClass() != o.getClass()) return false;
 
-    TermFilter that = (TermFilter) o;
+    FieldExistsFilter that = (FieldExistsFilter) o;
 
-    if (term != null ? !term.equals(that.term) : that.term != null) return false;
-
-    return true;
+    return field.equals(that.field);
   }
 
   @Override
   public int hashCode() {
-    return term != null ? term.hashCode() : 0;
+    return field.hashCode();
   }
 
   @Override
   public String toString() {
-    return term.field() + ":" + term.text();
+    return "FieldExistsFilter(field=" + fieldString + ")";
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
deleted file mode 100644
index b2b968e..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ /dev/null
@@ -1,456 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.Analyzer; // javadocs
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexableFieldType;
-import org.apache.lucene.search.NumericRangeQuery; // javadocs
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * Describes the properties of a field.
- */
-public class FieldType implements IndexableFieldType  {
-
-  /** Data type of the numeric value
-   * @since 3.2
-   */
-  public static enum NumericType {
-    /** 32-bit integer numeric type */
-    INT, 
-    /** 64-bit long numeric type */
-    LONG, 
-    /** 32-bit float numeric type */
-    FLOAT, 
-    /** 64-bit double numeric type */
-    DOUBLE
-  }
-
-  private boolean stored;
-  private boolean tokenized = true;
-  private boolean storeTermVectors;
-  private boolean storeTermVectorOffsets;
-  private boolean storeTermVectorPositions;
-  private boolean storeTermVectorPayloads;
-  private boolean omitNorms;
-  private IndexOptions indexOptions = IndexOptions.NONE;
-  private NumericType numericType;
-  private boolean frozen;
-  private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
-  private DocValuesType docValuesType = DocValuesType.NONE;
-
-  /**
-   * Create a new mutable FieldType with all of the properties from <code>ref</code>
-   */
-  public FieldType(FieldType ref) {
-    this.stored = ref.stored();
-    this.tokenized = ref.tokenized();
-    this.storeTermVectors = ref.storeTermVectors();
-    this.storeTermVectorOffsets = ref.storeTermVectorOffsets();
-    this.storeTermVectorPositions = ref.storeTermVectorPositions();
-    this.storeTermVectorPayloads = ref.storeTermVectorPayloads();
-    this.omitNorms = ref.omitNorms();
-    this.indexOptions = ref.indexOptions();
-    this.docValuesType = ref.docValuesType();
-    this.numericType = ref.numericType();
-    // Do not copy frozen!
-  }
-  
-  /**
-   * Create a new FieldType with default properties.
-   */
-  public FieldType() {
-  }
-
-  private void checkIfFrozen() {
-    if (frozen) {
-      throw new IllegalStateException("this FieldType is already frozen and cannot be changed");
-    }
-  }
-
-  /**
-   * Prevents future changes. Note, it is recommended that this is called once
-   * the FieldTypes's properties have been set, to prevent unintentional state
-   * changes.
-   */
-  public void freeze() {
-    this.frozen = true;
-  }
-  
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>.
-   * @see #setStored(boolean)
-   */
-  @Override
-  public boolean stored() {
-    return this.stored;
-  }
-  
-  /**
-   * Set to <code>true</code> to store this field.
-   * @param value true if this field should be stored.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #stored()
-   */
-  public void setStored(boolean value) {
-    checkIfFrozen();
-    this.stored = value;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>true</code>.
-   * @see #setTokenized(boolean)
-   */
-  public boolean tokenized() {
-    return this.tokenized;
-  }
-  
-  /**
-   * Set to <code>true</code> to tokenize this field's contents via the 
-   * configured {@link Analyzer}.
-   * @param value true if this field should be tokenized.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #tokenized()
-   */
-  public void setTokenized(boolean value) {
-    checkIfFrozen();
-    this.tokenized = value;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>. 
-   * @see #setStoreTermVectors(boolean)
-   */
-  @Override
-  public boolean storeTermVectors() {
-    return this.storeTermVectors;
-  }
-  
-  /**
-   * Set to <code>true</code> if this field's indexed form should be also stored 
-   * into term vectors.
-   * @param value true if this field should store term vectors.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #storeTermVectors()
-   */
-  public void setStoreTermVectors(boolean value) {
-    checkIfFrozen();
-    this.storeTermVectors = value;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>.
-   * @see #setStoreTermVectorOffsets(boolean)
-   */
-  @Override
-  public boolean storeTermVectorOffsets() {
-    return this.storeTermVectorOffsets;
-  }
-  
-  /**
-   * Set to <code>true</code> to also store token character offsets into the term
-   * vector for this field.
-   * @param value true if this field should store term vector offsets.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #storeTermVectorOffsets()
-   */
-  public void setStoreTermVectorOffsets(boolean value) {
-    checkIfFrozen();
-    this.storeTermVectorOffsets = value;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>.
-   * @see #setStoreTermVectorPositions(boolean)
-   */
-  @Override
-  public boolean storeTermVectorPositions() {
-    return this.storeTermVectorPositions;
-  }
-  
-  /**
-   * Set to <code>true</code> to also store token positions into the term
-   * vector for this field.
-   * @param value true if this field should store term vector positions.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #storeTermVectorPositions()
-   */
-  public void setStoreTermVectorPositions(boolean value) {
-    checkIfFrozen();
-    this.storeTermVectorPositions = value;
-  }
-  
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>.
-   * @see #setStoreTermVectorPayloads(boolean) 
-   */
-  @Override
-  public boolean storeTermVectorPayloads() {
-    return this.storeTermVectorPayloads;
-  }
-  
-  /**
-   * Set to <code>true</code> to also store token payloads into the term
-   * vector for this field.
-   * @param value true if this field should store term vector payloads.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #storeTermVectorPayloads()
-   */
-  public void setStoreTermVectorPayloads(boolean value) {
-    checkIfFrozen();
-    this.storeTermVectorPayloads = value;
-  }
-  
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>false</code>.
-   * @see #setOmitNorms(boolean)
-   */
-  @Override
-  public boolean omitNorms() {
-    return this.omitNorms;
-  }
-  
-  /**
-   * Set to <code>true</code> to omit normalization values for the field.
-   * @param value true if this field should omit norms.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #omitNorms()
-   */
-  public void setOmitNorms(boolean value) {
-    checkIfFrozen();
-    this.omitNorms = value;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is {@link IndexOptions#DOCS_AND_FREQS_AND_POSITIONS}.
-   * @see #setIndexOptions(IndexOptions)
-   */
-  @Override
-  public IndexOptions indexOptions() {
-    return this.indexOptions;
-  }
-  
-  /**
-   * Sets the indexing options for the field:
-   * @param value indexing options
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #indexOptions()
-   */
-  public void setIndexOptions(IndexOptions value) {
-    checkIfFrozen();
-    if (value == null) {
-      throw new NullPointerException("IndexOptions cannot be null");
-    }
-    this.indexOptions = value;
-  }
-
-  /**
-   * Specifies the field's numeric type.
-   * @param type numeric type, or null if the field has no numeric type.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericType()
-   */
-  public void setNumericType(NumericType type) {
-    checkIfFrozen();
-    numericType = type;
-  }
-
-  /** 
-   * NumericType: if non-null then the field's value will be indexed
-   * numerically so that {@link NumericRangeQuery} can be used at 
-   * search time. 
-   * <p>
-   * The default is <code>null</code> (no numeric type) 
-   * @see #setNumericType(NumericType)
-   */
-  public NumericType numericType() {
-    return numericType;
-  }
-
-  /**
-   * Sets the numeric precision step for the field.
-   * @param precisionStep numeric precision step for the field
-   * @throws IllegalArgumentException if precisionStep is less than 1. 
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericPrecisionStep()
-   */
-  public void setNumericPrecisionStep(int precisionStep) {
-    checkIfFrozen();
-    if (precisionStep < 1) {
-      throw new IllegalArgumentException("precisionStep must be >= 1 (got " + precisionStep + ")");
-    }
-    this.numericPrecisionStep = precisionStep;
-  }
-
-  /** 
-   * Precision step for numeric field. 
-   * <p>
-   * This has no effect if {@link #numericType()} returns null.
-   * <p>
-   * The default is {@link NumericUtils#PRECISION_STEP_DEFAULT}
-   * @see #setNumericPrecisionStep(int)
-   */
-  public int numericPrecisionStep() {
-    return numericPrecisionStep;
-  }
-
-  /** Prints a Field for human consumption. */
-  @Override
-  public final String toString() {
-    StringBuilder result = new StringBuilder();
-    if (stored()) {
-      result.append("stored");
-    }
-    if (indexOptions != IndexOptions.NONE) {
-      if (result.length() > 0)
-        result.append(",");
-      result.append("indexed");
-      if (tokenized()) {
-        result.append(",tokenized");
-      }
-      if (storeTermVectors()) {
-        result.append(",termVector");
-      }
-      if (storeTermVectorOffsets()) {
-        result.append(",termVectorOffsets");
-      }
-      if (storeTermVectorPositions()) {
-        result.append(",termVectorPosition");
-      }
-      if (storeTermVectorPayloads()) {
-        result.append(",termVectorPayloads");
-      }
-      if (omitNorms()) {
-        result.append(",omitNorms");
-      }
-      if (indexOptions != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) {
-        result.append(",indexOptions=");
-        result.append(indexOptions);
-      }
-      if (numericType != null) {
-        result.append(",numericType=");
-        result.append(numericType);
-        result.append(",numericPrecisionStep=");
-        result.append(numericPrecisionStep);
-      }
-    }
-    if (docValuesType != DocValuesType.NONE) {
-      if (result.length() > 0) {
-        result.append(",");
-      }
-      result.append("docValuesType=");
-      result.append(docValuesType);
-    }
-    
-    return result.toString();
-  }
-  
-  /* from StorableFieldType */
-  
-  /**
-   * {@inheritDoc}
-   * <p>
-   * The default is <code>null</code> (no docValues) 
-   * @see #setDocValuesType(DocValuesType)
-   */
-  @Override
-  public DocValuesType docValuesType() {
-    return docValuesType;
-  }
-
-  /**
-   * Sets the field's DocValuesType
-   * @param type DocValues type, or null if no DocValues should be stored.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #docValuesType()
-   */
-  public void setDocValuesType(DocValuesType type) {
-    checkIfFrozen();
-    if (type == null) {
-      throw new NullPointerException("DocValuesType cannot be null");
-    }
-    docValuesType = type;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((docValuesType == null) ? 0 : docValuesType.hashCode());
-    result = prime * result + indexOptions.hashCode();
-    result = prime * result + numericPrecisionStep;
-    result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
-    result = prime * result + (omitNorms ? 1231 : 1237);
-    result = prime * result + (storeTermVectorOffsets ? 1231 : 1237);
-    result = prime * result + (storeTermVectorPayloads ? 1231 : 1237);
-    result = prime * result + (storeTermVectorPositions ? 1231 : 1237);
-    result = prime * result + (storeTermVectors ? 1231 : 1237);
-    result = prime * result + (stored ? 1231 : 1237);
-    result = prime * result + (tokenized ? 1231 : 1237);
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (obj == null) return false;
-    if (getClass() != obj.getClass()) return false;
-    FieldType other = (FieldType) obj;
-    if (docValuesType != other.docValuesType) return false;
-    if (indexOptions != other.indexOptions) return false;
-    if (numericPrecisionStep != other.numericPrecisionStep) return false;
-    if (numericType != other.numericType) return false;
-    if (omitNorms != other.omitNorms) return false;
-    if (storeTermVectorOffsets != other.storeTermVectorOffsets) return false;
-    if (storeTermVectorPayloads != other.storeTermVectorPayloads) return false;
-    if (storeTermVectorPositions != other.storeTermVectorPositions) return false;
-    if (storeTermVectors != other.storeTermVectors) return false;
-    if (stored != other.stored) return false;
-    if (tokenized != other.tokenized) return false;
-    return true;
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldTypes.java b/lucene/core/src/java/org/apache/lucene/document/FieldTypes.java
new file mode 100644
index 0000000..3e800ae
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldTypes.java
@@ -0,0 +1,4983 @@
+package org.apache.lucene.document;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.nio.charset.StandardCharsets;
+import java.text.Collator;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocValuesRangeFilter;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldComparatorSource;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.HalfFloatComparator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.SortedNumericSortField;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.SortedSetSortField;
+import org.apache.lucene.search.TermFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermRangeFilter;
+import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedChecksumIndexInput;
+import org.apache.lucene.store.ChecksumIndexInput;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMFile;
+import org.apache.lucene.store.RAMInputStream;
+import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.Version;
+
+// TODO
+//   - run all monster tests
+//   - write/read of field types should be up to codec?
+//   - how should Codec integrate
+//   - analyzers are not serializable
+//   - what about sparse fields... anything for us to do...
+//   - payloads just stay write once in their own way?
+//   - CheckIndex could optionally do more validation
+//     - or that terms in an ipv6 field always have 16 bytes
+//   - add query time integration
+//     - query parsers
+//       - exc if invalid field name asked for
+//       - numeric range queries "just work"
+//       - default search field
+
+// nocommit: back compat
+//   - be able to search over old numeric fields
+//   - uninverting reader should parse them to DVs
+
+// docsWithField and fieldExistsFilter are redundant if field is dv'd and indexed
+
+
+// TODO: how to allow extending this?
+//   - geo
+//   - expressions
+//   - index sorting
+//   - suggesters
+//   - icu
+//   - facets (FacetsConfig, hierarchical fields)
+//   - doc blocks (nested) / joins
+
+// nocommit how will future back-compat work?  segment must store field types as of when it was written?
+
+// NO
+//   - filter caching?  parent blockdocs filter?
+//   - required
+//   - not null
+//   - "all" field
+//   - "copy" field
+//   - dynamic fields
+//   - can we somehow always store a "source"?  can be handled above
+//   - default value (value used if the field is null/missing): this seems silly wasteful, and layer above can handle it
+//   - sort proxy field ("when I sort by X you should actually sort by Y"): can be handled above
+//   - can/should we validate field names here?
+
+// LATER
+//   - if field has dvs, and field exists filter is enabled, we can skip indexing that field into FIELD_NAMES_FIELD and just use FieldValueFilter?
+//   - can we allow collated, indexed fields (for term ranges)?
+//   - can we use CustomAnalyzer?  it can be serialized...?
+//   - let codec encode/decode field types?  should we write directly into segments_N?  or into a new separately gen'd file?
+//   - time intervals
+//   - BigDecimal
+//   - fold in compressing stored fields format params...how
+//   - add array of atom values?  fieldnamesfield would use it?
+//   - sugar for more/less ranges (where one end is null)
+//   - separate analyzer for phrase queries in suggesters
+//   - more sugar for common analyzers, e.g. accent folding (to ascii), case folding, ngrams for partial matching
+//   - can we somehow detect at search time if the field types you are using doesn't match the searcher you are now searching against?
+//   - add "partial match" option, that turns on ngrams?
+//   - can we move multi-field-ness out of IW?  so IW only gets a single instance of each field
+//   - use better pf when field is unique
+//   - nested/parent/child docs?
+//   - highlight proxy field (LUCENE-6061)
+//   - sugar API to retrieve values from DVs or stored fields or whatever?
+//   - we could track here which fields are actually searched/filtered on ... and e.g. make use of this during warmers ...
+//   - index-time sorting should be here too
+//   - make LiveFieldValues easier to use?
+//   - can we have test infra that randomly reopens writer?
+//   - newTermFilter?
+//   - PH should take this and validate highlighting was enabled?  (it already checks for OFFSETS in postings)
+//   - accent removal and lowercasing for wildcards should just work
+//   - controlling compression of stored fields
+//   - fix all change methods to call validate / rollback
+//   - collator decompositionMode?
+//   - should we have a "resolution" for Date field?
+
+// nocommit -- can we sort on a field that ran through analysis (and made a single token), e.g. case / accent folding
+
+/** Records how each field is indexed, stored, etc.
+ *
+ * @lucene.experimental */
+
+public class FieldTypes {
+
+  public static final int DEFAULT_POSITION_GAP = 0;
+
+  public static final int DEFAULT_OFFSET_GAP = 1;
+
+  /** Key used to store the field types inside {@link IndexWriter#setCommitData}. */
+  public static final String FIELD_TYPES_KEY = "FieldTypes";
+  
+  public static final String FIELD_NAMES_FIELD = "$fieldnames";
+
+  public static final int VERSION_START = 0;
+
+  public static final int VERSION_CURRENT = VERSION_START;
+
+  public static final String CODEC_NAME = "FieldTypes";
+
+  public enum ValueType {
+    NONE,
+    TEXT,
+    SHORT_TEXT,
+    ATOM,
+    INT,
+    HALF_FLOAT,
+    FLOAT,
+    LONG,
+    DOUBLE,
+    BIG_INT,
+    BIG_DECIMAL,
+    BINARY,
+    BOOLEAN,
+    DATE,
+    INET_ADDRESS,
+  }
+
+  private final boolean readOnly;
+
+  /** So exists filters are fast */
+  boolean enableExistsFilters = true;
+  private boolean indexedDocs;
+
+  private final Version indexCreatedVersion;
+
+  final Map<String,FieldType> fields = new HashMap<>();
+
+  // Null when we are readOnly:
+  private final Analyzer defaultIndexAnalyzer;
+
+  // Null when we are not readOnly:
+  private Analyzer defaultQueryAnalyzer;
+
+  private Similarity defaultSimilarity;
+
+  /** Used only in memory to record when something changed. */
+  private long changeCount;
+
+  private volatile boolean closed;
+
+  /** Just like current oal.document.FieldType, except for each setting it can also record "not-yet-set". */
+  class FieldType implements IndexableFieldType, Cloneable {
+    private final String name;
+
+    // Lucene version when we were created:
+    private final Version createdVersion;
+
+    volatile ValueType valueType = ValueType.NONE;
+    volatile DocValuesType docValuesType = DocValuesType.NONE;
+    private volatile boolean docValuesTypeSet;
+
+    // True if the term is unique across all documents (e.g. a primary key field):
+    volatile Boolean isUnique;
+
+    // True when Document.addStoredXXX was used:
+    volatile Boolean storedOnly;
+
+    // Only used for ATOM:
+    volatile Boolean isBinary;
+
+    // Expert: settings we pass to BlockTree to control how many terms are allowed in each block and auto-prefix term
+    volatile Integer blockTreeMinItemsInBlock;
+    volatile Integer blockTreeMaxItemsInBlock;
+    volatile Integer blockTreeMinItemsInAutoPrefix;
+    volatile Integer blockTreeMaxItemsInAutoPrefix;
+
+    // Gaps to add between multiple values of the same field; if these are not set, we fallback to the Analyzer for that field.
+    volatile Integer analyzerPositionGap;
+    volatile Integer analyzerOffsetGap;
+
+    // Min/max token length, or null if there are no limits:
+    volatile Integer minTokenLength;
+    volatile Integer maxTokenLength;
+
+    // Limit on number of tokens to index for this field
+    volatile Integer maxTokenCount;
+    volatile Boolean consumeAllTokens;
+
+    // Whether this field's values are stored, or null if it's not yet set:
+    private volatile Boolean stored;
+
+    // Whether this field's values should be indexed for sorting (using doc values):
+    private volatile Boolean sortable;
+    private volatile Boolean sortReversed;
+    private volatile Boolean sortMissingLast = Boolean.TRUE;
+    private volatile SortedNumericSelector.Type numericSelector = SortedNumericSelector.Type.MIN;
+    private volatile SortedSetSelector.Type sortedSetSelector = SortedSetSelector.Type.MIN;
+
+    // Whether this field's values should be indexed for fast ranges (using numeric field for now):
+    private volatile Boolean fastRanges;
+
+    // Whether this field may appear more than once per document:
+    volatile Boolean multiValued;
+
+    // Whether this field's norms are indexed:
+    private volatile Boolean indexNorms;
+
+    // Bit width for a big int field:
+    volatile Integer bigIntByteWidth;
+
+    // Scale for big decimal field:
+    volatile Integer bigDecimalScale;
+
+    private volatile Boolean storeTermVectors;
+    private volatile Boolean storeTermVectorPositions;
+    private volatile Boolean storeTermVectorOffsets;
+    private volatile Boolean storeTermVectorPayloads;
+
+    // Field is indexed if this != null:
+    private volatile IndexOptions indexOptions = IndexOptions.NONE;
+    private volatile boolean indexOptionsSet;
+
+    // TODO: not great that we can't also set other formats:
+    private volatile String postingsFormat;
+    private volatile String docValuesFormat;
+
+    private volatile Boolean highlighted;
+
+    // NOTE: not persisted, because we don't have API for persisting arbitrary analyzers, or maybe we require AnalysisFactory is always used
+    // (which we can serialize)?
+    private volatile Analyzer queryAnalyzer;
+    private volatile Analyzer indexAnalyzer;
+    private volatile Similarity similarity;
+
+    private volatile Analyzer wrappedIndexAnalyzer;
+    private volatile Analyzer wrappedQueryAnalyzer;
+
+    volatile Boolean reversedTerms;
+
+    Locale sortLocale;
+    int sortCollatorStrength;
+    Collator sortCollator;
+    SortKey sortKey;
+
+    public FieldType(String name) {
+      this(name, Version.LATEST);
+    }
+
+    public FieldType(String name, Version version) {
+      this.name = name;
+      this.createdVersion = version;
+    }
+
+    /** Copy constructor. */
+    FieldType(FieldType other) {
+      this.name = other.name;
+      this.createdVersion = other.createdVersion;
+      this.valueType = other.valueType;
+      this.docValuesType = other.docValuesType;
+      this.docValuesTypeSet = other.docValuesTypeSet;
+      this.isUnique = other.isUnique;
+      this.storedOnly = other.storedOnly;
+      this.isBinary = other.isBinary;
+      this.blockTreeMinItemsInBlock = other.blockTreeMinItemsInBlock;
+      this.blockTreeMaxItemsInBlock = other.blockTreeMaxItemsInBlock;
+      this.blockTreeMinItemsInAutoPrefix = other.blockTreeMinItemsInAutoPrefix;
+      this.blockTreeMaxItemsInAutoPrefix = other.blockTreeMaxItemsInAutoPrefix;
+      this.analyzerPositionGap = other.analyzerPositionGap;
+      this.analyzerOffsetGap = other.analyzerOffsetGap;
+      this.minTokenLength = other.minTokenLength;
+      this.maxTokenLength = other.maxTokenLength;
+      this.maxTokenCount = other.maxTokenCount;
+      this.consumeAllTokens = other.consumeAllTokens;
+      this.stored = other.stored;
+      this.sortable = other.sortable;
+      this.sortReversed = other.sortReversed;
+      this.sortMissingLast = other.sortMissingLast;
+      this.numericSelector = other.numericSelector;
+      this.sortedSetSelector = other.sortedSetSelector;
+      this.fastRanges = other.fastRanges;
+      this.multiValued = other.multiValued;
+      this.indexNorms = other.indexNorms;
+      this.bigIntByteWidth = other.bigIntByteWidth;
+      this.bigDecimalScale = other.bigDecimalScale;
+      this.storeTermVectors = other.storeTermVectors;
+      this.storeTermVectorPositions = other.storeTermVectorPositions;
+      this.storeTermVectorOffsets = other.storeTermVectorOffsets;
+      this.storeTermVectorPayloads = other.storeTermVectorPayloads;
+      this.indexOptions = other.indexOptions;
+      this.indexOptionsSet = other.indexOptionsSet;
+      this.postingsFormat = other.postingsFormat;
+      this.docValuesFormat = other.docValuesFormat;
+      this.highlighted = other.highlighted;
+      this.queryAnalyzer = other.queryAnalyzer;
+      this.indexAnalyzer = other.indexAnalyzer;
+      this.similarity = other.similarity;
+      this.wrappedIndexAnalyzer = other.wrappedIndexAnalyzer;
+      this.wrappedQueryAnalyzer = other.wrappedQueryAnalyzer;
+      this.reversedTerms = other.reversedTerms;
+      this.sortLocale = other.sortLocale;
+      this.sortCollatorStrength = other.sortCollatorStrength;
+      this.sortCollator = other.sortCollator;
+      this.sortKey = other.sortKey;
+    }
+
+    /** If this throws exc, caller must restore the previous state. */
+    public synchronized void merge(FieldType other) {
+      assert name.equals(other.name);
+
+      if (other.valueType != ValueType.NONE) {
+        if (valueType == ValueType.NONE) {
+          valueType = other.valueType;
+        } else if (other.valueType != valueType) {
+          illegalState(name, "cannot change value type from " + valueType + " to " + other.valueType);
+        }
+      }
+      if (other.docValuesTypeSet) {
+        if (docValuesTypeSet == false) {
+          docValuesType = other.docValuesType;  
+          docValuesTypeSet = true;
+        } else if (other.docValuesType != docValuesType) {
+          illegalState(name, "cannot change docValuesType from " + docValuesType + " to " + other.docValuesType);
+        }
+      }
+      if (other.isUnique != null) {
+        if (isUnique == null) {
+          isUnique = other.isUnique;
+        } else if (other.isUnique != isUnique) {
+          illegalState(name, "cannot change isUnique from " + isUnique + " to " + other.isUnique);
+        }
+      }
+      if (other.storedOnly != null) {
+        if (storedOnly == null) {
+          storedOnly = other.storedOnly;
+        } else if (other.storedOnly != storedOnly) {
+          illegalState(name, "cannot change storedOnly from " + storedOnly + " to " + other.storedOnly);
+        }
+      }
+      if (other.isBinary != null) {
+        if (isBinary == null) {
+          isBinary = other.isBinary;
+        } else if (other.isBinary != isBinary) {
+          illegalState(name, "cannot change isBinary from " + isBinary + " to " + other.isBinary);
+        }
+      }
+      if (other.blockTreeMinItemsInBlock != null) {
+        blockTreeMinItemsInBlock = other.blockTreeMinItemsInBlock;
+      }
+      if (other.blockTreeMaxItemsInBlock != null) {
+        blockTreeMaxItemsInBlock = other.blockTreeMaxItemsInBlock;
+      }
+      if (other.blockTreeMinItemsInAutoPrefix != null) {
+        blockTreeMinItemsInAutoPrefix = other.blockTreeMinItemsInAutoPrefix;
+      }
+      if (other.blockTreeMaxItemsInAutoPrefix != null) {
+        blockTreeMaxItemsInAutoPrefix = other.blockTreeMaxItemsInAutoPrefix;
+      }
+
+      if (other.analyzerPositionGap != null) {
+        if (analyzerPositionGap == null) {
+          analyzerPositionGap = other.analyzerPositionGap;
+        } else if (other.analyzerPositionGap.equals(analyzerPositionGap) == false) {
+          illegalState(name, "cannot change analyzerPositionGap from " + analyzerPositionGap + " to " + other.analyzerPositionGap);
+        }
+      }
+      if (other.analyzerOffsetGap != null) {
+        if (analyzerOffsetGap == null) {
+          analyzerOffsetGap = other.analyzerOffsetGap;
+        } else if (other.analyzerOffsetGap.equals(analyzerOffsetGap) == false) {
+          illegalState(name, "cannot change analyzerOffsetGap from " + analyzerOffsetGap + " to " + other.analyzerOffsetGap);
+        }
+      }
+      if (other.minTokenLength != null) {
+        minTokenLength = other.minTokenLength;
+      }
+      if (other.maxTokenLength != null) {
+        maxTokenLength = other.maxTokenLength;
+      }
+      if (other.maxTokenCount != null) {
+        maxTokenCount = other.maxTokenCount;
+      }
+      if (other.consumeAllTokens != null) {
+        consumeAllTokens = other.consumeAllTokens;
+      }
+      if (other.stored != null) {
+        stored = other.stored;
+      }
+      if (other.sortable != null) {
+        if (sortable == null) {
+          sortable = other.sortable;
+        } else if (other.sortable == Boolean.FALSE) {
+          sortable = other.sortable;
+        } else if (sortable == Boolean.FALSE) {
+          illegalState(name, "sorting was already disabled");
+        }
+        if (other.sortReversed != null) {
+          sortReversed = other.sortReversed;
+        }
+        if (other.sortMissingLast != null) {
+          sortMissingLast = other.sortMissingLast;
+        }
+      }
+
+      if (other.numericSelector != null) {
+        numericSelector = other.numericSelector;
+      }
+      if (other.sortedSetSelector != null) {
+        sortedSetSelector = other.sortedSetSelector;
+      }
+      if (other.fastRanges != null) {
+        if (fastRanges == null) {
+          fastRanges = other.fastRanges;
+        } else if (other.fastRanges == Boolean.FALSE) {
+          fastRanges = Boolean.FALSE;
+        } else if (fastRanges == Boolean.FALSE) {
+          illegalState(name, "fastRanges was already disabled");
+        }
+      }
+      if (other.multiValued != null) {
+        if (multiValued == null) {
+          multiValued = other.multiValued;
+        } else if (other.multiValued != multiValued) {
+          illegalState(name, "cannot change multiValued from " + multiValued + " to " + other.multiValued);
+        }
+      }
+
+      if (other.indexNorms != null) {
+        if (indexNorms == null) {
+          indexNorms = other.indexNorms;
+        } else if (other.indexNorms == Boolean.FALSE) {
+          indexNorms = Boolean.FALSE;
+        } else if (indexNorms == Boolean.FALSE) {
+          illegalState(name, "norms were already disabled");
+        }
+      }
+
+      if (other.bigIntByteWidth != null) {
+        if (bigIntByteWidth == null) {
+          bigIntByteWidth = other.bigIntByteWidth;
+        } else if (bigIntByteWidth.equals(other.bigIntByteWidth) == false) {
+          illegalState(name, "cannot change bigIntByteWidth from " + bigIntByteWidth + " to " + other.bigIntByteWidth);
+        }
+      }
+
+      if (other.bigDecimalScale != null) {
+        if (bigDecimalScale == null) {
+          bigDecimalScale = other.bigDecimalScale;
+        } else if (bigDecimalScale.equals(other.bigDecimalScale) == false) {
+          illegalState(name, "cannot change bigDecimalScale from " + bigDecimalScale + " to " + other.bigDecimalScale);
+        }
+      }
+
+      if (other.storeTermVectors != null) {
+        storeTermVectors = other.storeTermVectors;
+
+        if (other.storeTermVectorPositions != null) {
+          storeTermVectorPositions = other.storeTermVectorPositions;
+        }
+        if (other.storeTermVectorOffsets != null) {
+          storeTermVectorOffsets = other.storeTermVectorOffsets;
+        }
+        if (other.storeTermVectorPayloads != null) {
+          storeTermVectorPayloads = other.storeTermVectorPayloads;
+        }
+      }
+
+      if (other.indexOptionsSet) {
+        if (indexOptionsSet == false) {
+          indexOptions = other.indexOptions;
+          indexOptionsSet = true;
+        } else if (indexOptions.compareTo(other.indexOptions) >= 0) {
+          indexOptions = other.indexOptions;
+        } else {
+          illegalState(name, "cannot upgrade indexOptions from " + indexOptions + " to " + other.indexOptions);
+        }
+      }
+
+      if (other.postingsFormat != null) {
+        postingsFormat = other.postingsFormat;
+      }
+
+      if (other.docValuesFormat != null) {
+        docValuesFormat = other.docValuesFormat;
+      }
+
+      if (other.highlighted != null) {
+        if (highlighted == null) {
+          highlighted = other.highlighted;
+        } else if (other.highlighted == Boolean.FALSE) {
+          highlighted = Boolean.FALSE;
+        } else if (highlighted == Boolean.FALSE) {
+          illegalState(name, "highlighting was already disabled");
+        }
+      }
+
+      if (other.queryAnalyzer != null) {
+        if (queryAnalyzer == null) {
+          queryAnalyzer = other.queryAnalyzer;
+        } else if (queryAnalyzer != other.queryAnalyzer) {
+          illegalState(name, "queryAnalyzer was already set");
+        }
+      }
+
+      if (other.indexAnalyzer != null) {
+        if (indexAnalyzer == null) {
+          indexAnalyzer = other.indexAnalyzer;
+        } else if (indexAnalyzer != other.indexAnalyzer) {
+          illegalState(name, "indexAnalyzer was already set");
+        }
+      }
+
+      if (other.similarity != null) {
+        similarity = other.similarity;
+      }
+
+      if (other.wrappedIndexAnalyzer != null) {
+        wrappedIndexAnalyzer = other.wrappedIndexAnalyzer;
+      }
+      if (other.wrappedQueryAnalyzer != null) {
+        wrappedQueryAnalyzer = other.wrappedQueryAnalyzer;
+      }
+      if (other.reversedTerms != null) {
+        if (reversedTerms == null) {
+          reversedTerms = other.reversedTerms;
+        } else if (other.reversedTerms != reversedTerms) {
+          illegalState(name, "can only setReversedTerms before the field is indexed");
+        }
+      }
+      if (other.sortLocale != null) {
+        if (sortLocale == null) {
+          sortLocale = other.sortLocale;
+          sortCollator = other.sortCollator;
+          sortCollatorStrength = other.sortCollatorStrength;
+        } else if (sortLocale.equals(other.sortLocale) == false) {
+          if (valueType == null) {
+            sortLocale = other.sortLocale;
+            sortCollator = other.sortCollator;
+            sortCollatorStrength = other.sortCollatorStrength;
+          } else {
+            illegalState(name, "sortLocale can only be set before indexing");
+          }
+        }
+      }
+
+      if (other.sortKey != null) {
+        sortKey = other.sortKey;
+      }
+
+      changed(false);
+    }
+
+    boolean validate() {
+      switch (valueType) {
+      case NONE:
+        break;
+      case INT:
+      case HALF_FLOAT:
+      case FLOAT:
+      case LONG:
+      case DOUBLE:
+      case BIG_INT:
+      case BIG_DECIMAL:
+      case DATE:
+        if (highlighted == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot highlight");
+        }
+        if (indexAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have an indexAnalyzer");
+        }
+        if (queryAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have a queryAnalyzer");
+        }
+        if (valueType == ValueType.BIG_INT || valueType == ValueType.BIG_DECIMAL) {
+          if (docValuesType != DocValuesType.NONE && (docValuesType != DocValuesType.SORTED && docValuesType != DocValuesType.SORTED_SET)) {
+            illegalState(name, "type " + valueType + " must use SORTED or SORTED_SET docValuesType; got: " + docValuesType);
+          }
+        } else {
+          if (docValuesType != DocValuesType.NONE && (docValuesType != DocValuesType.NUMERIC && docValuesType != DocValuesType.SORTED_NUMERIC)) {
+            illegalState(name, "type " + valueType + " must use NUMERIC or SORTED_NUMERIC docValuesType; got: " + docValuesType);
+          }
+        }
+        if (indexOptions != IndexOptions.NONE && indexOptions.compareTo(IndexOptions.DOCS) > 0) {
+          illegalState(name, "type " + valueType + " cannot use indexOptions > DOCS (got indexOptions " + indexOptions + ")");
+        }
+        if (indexNorms == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot index norms");
+        }
+        if (minTokenLength != null) {
+          illegalState(name, "type " + valueType + " cannot set min/max token length");
+        }
+        if (maxTokenCount != null) {
+          illegalState(name, "type " + valueType + " cannot set max token count");
+        }
+        break;
+      case TEXT:
+        if (sortable == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot sort");
+        }
+        if (fastRanges == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot optimize for range queries");
+        }
+        if (docValuesType != DocValuesType.NONE) {
+          illegalState(name, "type " + valueType + " cannot use docValuesType " + docValuesType);
+        }
+        break;
+      case SHORT_TEXT:
+        if (docValuesType != DocValuesType.NONE && docValuesType != DocValuesType.BINARY && docValuesType != DocValuesType.SORTED && docValuesType != DocValuesType.SORTED_SET) {
+          illegalState(name, "type " + valueType + " cannot use docValuesType " + docValuesType);
+        }
+        if (fastRanges == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot optimize for range queries");
+        }
+        break;
+      case BINARY:
+      case INET_ADDRESS:
+        if (highlighted == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot highlight");
+        }
+        if (indexAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have an indexAnalyzer");
+        }
+        if (queryAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have a queryAnalyzer");
+        }
+        if (docValuesType != DocValuesType.NONE && docValuesType != DocValuesType.BINARY && docValuesType != DocValuesType.SORTED && docValuesType != DocValuesType.SORTED_SET) {
+          illegalState(name, "type " + valueType + " must use BINARY, SORTED or SORTED_SET docValuesType; got: " + docValuesType);
+        }
+        if (indexOptions != IndexOptions.NONE && indexOptions.compareTo(IndexOptions.DOCS) > 0) {
+          illegalState(name, "type " + valueType + " can only be indexed as DOCS; got " + indexOptions);
+        }
+        if (minTokenLength != null) {
+          illegalState(name, "type " + valueType + " cannot set min/max token length");
+        }
+        if (maxTokenCount != null) {
+          illegalState(name, "type " + valueType + " cannot set max token count");
+        }
+        break;
+      case ATOM:
+        if (indexAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have an indexAnalyzer");
+        }
+        if (queryAnalyzer != null) {
+          illegalState(name, "type " + valueType + " cannot have a queryAnalyzer");
+        }
+        if (indexNorms == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot index norms");
+        }
+        if (maxTokenCount != null) {
+          illegalState(name, "type " + valueType + " cannot set max token count");
+        }
+        break;
+      case BOOLEAN:
+        if (highlighted == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot highlight");
+        }
+        if (indexNorms == Boolean.TRUE) {
+          illegalState(name, "type " + valueType + " cannot index norms");
+        }
+        if (docValuesType != DocValuesType.NONE && docValuesType != DocValuesType.NUMERIC && docValuesType != DocValuesType.SORTED_NUMERIC) {
+          illegalState(name, "type " + valueType + " must use NUMERIC or SORTED_NUMERIC docValuesType; got: " + docValuesType);
+        }
+        if (minTokenLength != null) {
+          illegalState(name, "type " + valueType + " cannot set min/max token length");
+        }
+        if (maxTokenCount != null) {
+          illegalState(name, "type " + valueType + " cannot set max token count");
+        }
+        break;
+      default:
+        throw new AssertionError("missing value type in switch");
+      }
+
+      if (sortKey != null && valueType != ValueType.ATOM) {
+        illegalState(name, "sortKey can only be set for ATOM fields; got value type=" + valueType);
+      }
+
+      if (multiValued == Boolean.TRUE &&
+          (docValuesType == DocValuesType.NUMERIC ||
+           docValuesType == DocValuesType.SORTED ||
+           docValuesType == DocValuesType.BINARY)) {
+        illegalState(name, "DocValuesType=" + docValuesType + " cannot be multi-valued");
+      }
+
+      if (storeTermVectors == Boolean.TRUE) {
+        if (indexOptionsSet && indexOptions == IndexOptions.NONE) {
+          illegalState(name, "cannot enable term vectors when indexOptions is NONE");
+        }
+      } else {
+        if (storeTermVectorOffsets == Boolean.TRUE) {
+          illegalState(name, "cannot enable term vector offsets when term vectors are not enabled");
+        }
+        if (storeTermVectorPositions == Boolean.TRUE) {
+          illegalState(name, "cannot enable term vector positions when term vectors are not enabled");
+        }
+      }
+
+      if (sortable == Boolean.TRUE && (docValuesTypeSet && docValuesType == DocValuesType.NONE)) {
+        illegalState(name, "cannot sort when DocValuesType=" + docValuesType);
+      }
+
+      if (sortable == Boolean.FALSE && sortLocale != null) {
+        illegalState(name, "cannot set sortLocale when field is not enabled for sorting");
+      }
+
+      if (indexOptionsSet) {
+        if (indexOptions == IndexOptions.NONE) {
+          if (blockTreeMinItemsInBlock != null) {
+            illegalState(name, "can only setTermsDictBlockSize if the field is indexed");
+          }
+          if (blockTreeMinItemsInAutoPrefix != null) {
+            illegalState(name, "can only setTermsDictAutoPrefixSize if the field is indexed");
+          }
+          if (indexAnalyzer != null) {
+            illegalState(name, "can only setIndexAnalyzer if the field is indexed");
+          }
+          if (queryAnalyzer != null) {
+            illegalState(name, "can only setQueryAnalyzer if the field is indexed");
+          }
+          if (fastRanges == Boolean.TRUE) {
+            illegalState(name, "can only enableFastRanges if the field is indexed");
+          }
+          if (isUnique == Boolean.TRUE) {
+            illegalState(name, "can only setIsUnique if the field is indexed");
+          }
+          if (storeTermVectors == Boolean.TRUE) {
+            illegalState(name, "can only store term vectors if the field is indexed");
+          }
+        } else {
+          if (valueType != ValueType.TEXT && valueType != ValueType.SHORT_TEXT && indexAnalyzer != null) {
+            illegalState(name, "can only setIndexAnalyzer for short text and large text fields; got value type=" + valueType);
+          }
+          if (valueType != ValueType.TEXT && valueType != ValueType.SHORT_TEXT && queryAnalyzer != null) {
+            illegalState(name, "can only setQueryAnalyzer for short text and large text fields; got value type=" + valueType);
+          }
+          if (isUnique == Boolean.TRUE && indexOptions != IndexOptions.DOCS) {
+            illegalState(name, "unique fields should be indexed with IndexOptions.DOCS; got indexOptions=" + indexOptions);
+          }
+        }
+      }
+
+      if (reversedTerms == Boolean.TRUE) {
+        if (indexOptions == IndexOptions.NONE) {
+          illegalState(name, "can only reverse terms if the field is indexed");
+        }
+        if (valueType != ValueType.SHORT_TEXT && valueType != ValueType.TEXT && valueType != ValueType.ATOM) {
+          illegalState(name, "can only reverse terms for text and short_text value type; got value type=" + valueType);
+        }
+      }
+
+      if (fastRanges == Boolean.TRUE && indexOptions != IndexOptions.DOCS) {
+        illegalState(name, "fastRanges is only possible when indexOptions=DOCS; got: " + indexOptions);
+      }
+
+      if (analyzerPositionGap != null) {
+        if (indexOptions == IndexOptions.NONE) {
+          illegalState(name, "can only setAnalyzerPositionGap if the field is indexed");
+        }
+        if (multiValued != Boolean.TRUE) {
+          illegalState(name, "can only setAnalyzerPositionGap if the field is multi-valued");
+        }
+      }
+
+      if (analyzerOffsetGap != null) {
+        if (indexOptions == IndexOptions.NONE) {
+          illegalState(name, "can only setAnalyzerOffsetGap if the field is indexed");
+        }
+        if (multiValued != Boolean.TRUE) {
+          illegalState(name, "can only setAnalyzerOffsetGap if the field is multi-valued");
+        }
+      }
+
+      if (postingsFormat != null && blockTreeMinItemsInBlock != null) {
+        illegalState(name, "cannot use both setTermsDictBlockSize and setPostingsFormat");
+      }
+
+      if (postingsFormat != null && fastRanges == Boolean.TRUE) {
+        illegalState(name, "cannot use both enableFastRanges and setPostingsFormat");
+      }
+
+      if (postingsFormat != null && blockTreeMinItemsInAutoPrefix != null) {
+        illegalState(name, "cannot use both setTermsDictAutoPrefixSize and setPostingsFormat");
+      }
+
+      if (highlighted == Boolean.TRUE) {
+        if (valueType != ValueType.TEXT && valueType != ValueType.SHORT_TEXT && valueType != ValueType.ATOM) {
+          illegalState(name, "can only enable highlighting for TEXT or SHORT_TEXT fields; got value type=" + valueType);
+        }
+        if (indexOptions != IndexOptions.NONE && indexOptions != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
+          illegalState(name, "must index with IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS when highlighting is enabled");
+        }
+      }
+
+      return true;
+    }
+
+    private boolean needsWrapping() {
+      return minTokenLength != null || maxTokenCount != null || reversedTerms == Boolean.TRUE;
+    }
+
+    void reWrapAnalyzers(Analyzer defaultIndexAnalyzer, Analyzer defaultQueryAnalyzer) {
+      if (needsWrapping()) {
+        if (indexAnalyzer != null) {
+          wrappedIndexAnalyzer = wrapAnalyzer(indexAnalyzer);
+        } else if (defaultIndexAnalyzer != null) {
+          wrappedIndexAnalyzer = wrapAnalyzer(defaultIndexAnalyzer);
+        } else {
+          wrappedIndexAnalyzer = null;
+        }
+        if (queryAnalyzer != null) {
+          wrappedQueryAnalyzer = wrapAnalyzer(queryAnalyzer);
+        } else if (defaultQueryAnalyzer != null) {
+          wrappedQueryAnalyzer = wrapAnalyzer(defaultQueryAnalyzer);
+        } else {
+          wrappedQueryAnalyzer = null;
+        }
+      } else {
+        wrappedIndexAnalyzer = indexAnalyzer;
+        wrappedQueryAnalyzer = queryAnalyzer;
+      }
+    }
+
+    void setDefaults() {
+      switch (valueType) {
+      case NONE:
+        // bug
+        throw new AssertionError("valueType should not be NONE");
+      case INT:
+      case HALF_FLOAT:
+      case FLOAT:
+      case LONG:
+      case DOUBLE:
+      case BIG_INT:
+      case BIG_DECIMAL:
+      case DATE:
+        if (highlighted == null) {
+          highlighted = Boolean.FALSE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          if (valueType == ValueType.BIG_INT || valueType == ValueType.BIG_DECIMAL) {
+            if (docValuesTypeSet == false || docValuesType == DocValuesType.SORTED || docValuesType == DocValuesType.SORTED_SET) {
+              sortable = Boolean.TRUE;
+            } else {
+              sortable = Boolean.FALSE;
+            }
+          } else {
+            if (docValuesTypeSet == false || docValuesType == DocValuesType.NUMERIC || docValuesType == DocValuesType.SORTED_NUMERIC) {
+              sortable = Boolean.TRUE;
+            } else {
+              sortable = Boolean.FALSE;
+            }
+          }
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (indexOptionsSet == false) {
+          indexOptions = IndexOptions.DOCS;
+          indexOptionsSet = true;
+        }
+        if (docValuesTypeSet == false) {
+          if (sortable == Boolean.TRUE) {
+            if (valueType == ValueType.BIG_INT || valueType == ValueType.BIG_DECIMAL) {
+              if (multiValued == Boolean.TRUE) {
+                docValuesType = DocValuesType.SORTED_SET;
+              } else {
+                docValuesType = DocValuesType.SORTED;
+              }
+            } else {
+              if (multiValued == Boolean.TRUE) {
+                docValuesType = DocValuesType.SORTED_NUMERIC;
+              } else {
+                docValuesType = DocValuesType.NUMERIC;
+              }
+            }
+          }
+          docValuesTypeSet = true;
+        }
+        if (fastRanges == null) {
+          if (indexOptions != IndexOptions.NONE) {
+            fastRanges = Boolean.TRUE;
+          } else {
+            fastRanges = Boolean.FALSE;
+          }
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.FALSE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+
+      case SHORT_TEXT:
+        if (highlighted == null) {
+          highlighted = Boolean.TRUE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          if (docValuesTypeSet == false || docValuesType == DocValuesType.SORTED || docValuesType == DocValuesType.SORTED_SET) {
+            sortable = Boolean.TRUE;
+          } else {
+            sortable = Boolean.FALSE;
+          }
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (docValuesTypeSet == false) {
+          if (sortable == Boolean.TRUE) {
+            if (multiValued == Boolean.TRUE) {
+              docValuesType = DocValuesType.SORTED_SET;
+            } else {
+              docValuesType = DocValuesType.SORTED;
+            }
+          }
+          docValuesTypeSet = true;
+        }
+        if (indexOptionsSet == false) {
+          if (highlighted) {
+            indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+          } else {
+            indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+          }
+          indexOptionsSet = true;
+        }
+        if (fastRanges == null) {
+          fastRanges = Boolean.FALSE;
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.FALSE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+
+      case ATOM:
+      case INET_ADDRESS:
+        if (highlighted == null) {
+          highlighted = Boolean.FALSE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          if (docValuesTypeSet == false || docValuesType == DocValuesType.SORTED || docValuesType == DocValuesType.SORTED_SET) {
+            sortable = Boolean.TRUE;
+          } else {
+            sortable = Boolean.FALSE;
+          }
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (indexOptionsSet == false) { 
+          if (highlighted) {
+            indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+          } else {
+            indexOptions = IndexOptions.DOCS;
+          }
+          indexOptionsSet = true;
+        }
+        if (docValuesTypeSet == false) {
+          if (sortable == Boolean.TRUE) {
+            if (multiValued == Boolean.TRUE) {
+              docValuesType = DocValuesType.SORTED_SET;
+            } else {
+              docValuesType = DocValuesType.SORTED;
+            }
+          }
+          docValuesTypeSet = true;
+        }
+        if (fastRanges == null) {
+          if (indexOptions != IndexOptions.NONE) {
+            fastRanges = Boolean.TRUE;
+          } else {
+            fastRanges = Boolean.FALSE;
+          }
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.FALSE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+
+      case BINARY:
+        if (highlighted == null) {
+          highlighted = Boolean.FALSE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          if (docValuesTypeSet == false || docValuesType == DocValuesType.SORTED || docValuesType == DocValuesType.SORTED_SET) {
+            sortable = Boolean.TRUE;
+          } else {
+            sortable = Boolean.FALSE;
+          }
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (indexOptionsSet == false) {
+          assert indexOptions == IndexOptions.NONE;
+          indexOptionsSet = true;
+        }
+        if (docValuesTypeSet == false) {
+          if (sortable == Boolean.TRUE) {
+            if (multiValued == Boolean.TRUE) {
+              docValuesType = DocValuesType.SORTED_SET;
+            } else {
+              docValuesType = DocValuesType.SORTED;
+            }
+          } else {
+            docValuesType = DocValuesType.BINARY;
+          }
+          docValuesTypeSet = true;
+        }
+        if (fastRanges == null) {
+          fastRanges = Boolean.FALSE;
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.FALSE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+
+      case TEXT:
+        if (highlighted == null) {
+          highlighted = Boolean.TRUE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          sortable = Boolean.FALSE;
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (indexOptionsSet == false) {
+          if (highlighted) {
+            indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+          } else {
+            indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+          }
+          indexOptionsSet = true;
+        }
+
+        assert docValuesType == DocValuesType.NONE;
+        docValuesTypeSet = true;
+
+        if (fastRanges == null) {
+          fastRanges = Boolean.FALSE;
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.TRUE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+    
+      case BOOLEAN:
+        if (highlighted == null) {
+          highlighted = Boolean.FALSE;
+        }
+        if (storeTermVectors == null) {
+          storeTermVectors = Boolean.FALSE;
+        }
+        if (sortable == null) {
+          sortable = Boolean.TRUE;
+        }
+        if (multiValued == null) {
+          multiValued = Boolean.FALSE;
+        }
+        if (stored == null) {
+          stored = Boolean.TRUE;
+        }
+        if (indexOptionsSet == false) {
+          // validate enforces this:
+          assert highlighted == false;
+          indexOptions = IndexOptions.DOCS;
+          indexOptionsSet = true;
+        }
+        if (docValuesTypeSet == false) {
+          if (sortable == Boolean.TRUE) {
+            if (multiValued == Boolean.TRUE) {
+              docValuesType = DocValuesType.SORTED_NUMERIC;
+            } else {
+              docValuesType = DocValuesType.NUMERIC;
+            }
+          }
+          docValuesTypeSet = true;
+        }
+        if (fastRanges == null) {
+          fastRanges = Boolean.FALSE;
+        }
+        if (indexNorms == null) {
+          indexNorms = Boolean.FALSE;
+        }
+        if (isUnique == null) {
+          isUnique = Boolean.FALSE;
+        }
+        break;
+
+      default:
+        throw new AssertionError("missing value type in switch");
+      }
+
+      if (fastRanges == Boolean.TRUE) {
+        if (blockTreeMinItemsInAutoPrefix == null) {
+          blockTreeMinItemsInAutoPrefix = BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE;
+          blockTreeMaxItemsInAutoPrefix = BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE;
+        }
+      }
+
+      assert name != null;
+      assert createdVersion != null;
+      assert valueType != null;
+      assert docValuesTypeSet;
+      assert docValuesType != null;
+      assert isUnique != null;
+      assert storedOnly != null;
+      assert valueType != ValueType.ATOM || isBinary != null;
+      assert indexOptionsSet;
+      assert indexOptions != null;
+      assert stored != null;
+      assert sortable != null;
+      assert fastRanges != null;
+      assert multiValued != null;
+      assert indexOptions == IndexOptions.NONE || indexNorms != null;
+      assert highlighted != null;
+      assert storeTermVectors != null;
+
+      // setDefaults() should never create an invalid state:
+      validate();
+    } 
+
+    private Analyzer wrapAnalyzer(final Analyzer in) {
+      return new AnalyzerWrapper(in.getReuseStrategy()) {
+        @Override
+        protected Analyzer getWrappedAnalyzer(String fieldName) {
+          return in;
+        }
+
+        @Override
+        protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+          TokenStream end = components.getTokenStream();
+          if (minTokenLength != null) {
+            end = new LengthFilter(end,
+                                   minTokenLength.intValue(),
+                                   maxTokenLength.intValue());
+          }
+
+          if (maxTokenCount != null) {
+            end = new LimitTokenCountFilter(end, maxTokenCount.intValue(), consumeAllTokens.booleanValue());
+          }
+
+          if (reversedTerms == Boolean.TRUE) {
+            try {
+              Class c = Class.forName("org.apache.lucene.analysis.reverse.ReverseStringFilter");
+              Constructor init = c.getConstructor(new Class[] {TokenStream.class});
+              end = (TokenStream) init.newInstance(end);
+            } catch (ReflectiveOperationException roe) {
+              throw new IllegalStateException("could not locate ReverseStringFilter; ensure Lucene's analysis module is on your CLASSPATH", roe);
+            }
+          }
+
+          return new TokenStreamComponents(components.getTokenizer(), end);
+        }
+      };
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder b = new StringBuilder();
+      b.append("field \"");
+      b.append(name);
+      b.append("\":\n");
+      b.append("  value type: ");
+      b.append(valueType);
+      if (valueType == ValueType.ATOM && isBinary == Boolean.TRUE) {
+        b.append(" (binary)");
+      }
+      b.append('\n');
+
+      if (blockTreeMinItemsInBlock != null) {
+        b.append("  term blocks: ");
+        b.append(blockTreeMinItemsInBlock);
+        b.append(" - ");
+        b.append(blockTreeMaxItemsInBlock);
+      }
+
+      if (analyzerPositionGap != null) {
+        b.append("  multi-valued position gap: ");
+        b.append(analyzerPositionGap);
+        b.append('\n');
+      }
+
+      if (analyzerOffsetGap != null) {
+        b.append("  multi-valued offset gap: ");
+        b.append(analyzerOffsetGap);
+        b.append('\n');
+      }
+
+      if (multiValued == Boolean.TRUE) {
+        b.append("  multiValued: true");
+        b.append('\n');
+      }
+
+      b.append("  stored: ");
+      if (stored != null) {
+        b.append(stored);
+      } else {
+        b.append("unset");
+      }
+      b.append('\n');
+
+      b.append("  sortable: ");
+      if (sortable != null) {
+        b.append(sortable);
+        if (sortable == Boolean.TRUE) {
+          if (sortReversed != null) {
+            b.append(" reversed=");
+            b.append(sortReversed);
+          }
+          if (sortMissingLast == Boolean.TRUE) {
+            b.append(" (missing: last)");
+          } else if (sortMissingLast == Boolean.FALSE) {
+            b.append(" (missing: first)");
+          }
+        }
+
+        if (multiValued == Boolean.TRUE) {
+          if (isNumericType(valueType)) {
+            if (numericSelector != null) {
+              b.append(" (numericSelector: " + numericSelector + ")");
+            }
+          } else if (sortedSetSelector != null) {
+            b.append(" (sortedSetSelector: " + sortedSetSelector + ")");
+          }
+        }
+      } else {
+        b.append("unset");
+      }
+      b.append('\n');
+
+      b.append("  fastRanges: ");
+      if (fastRanges != null) {
+        b.append(fastRanges);
+        if (fastRanges == Boolean.TRUE) {
+          b.append(" (auto-prefix blocks: ");
+          if (blockTreeMinItemsInAutoPrefix != null) {
+            b.append(blockTreeMinItemsInAutoPrefix);
+            b.append(" - ");
+            b.append(blockTreeMaxItemsInAutoPrefix);
+          } else {
+            b.append(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE);
+            b.append(" - ");
+            b.append(BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
+          }
+          b.append(")");
+        }
+      } else {
+        b.append("unset");
+      }
+      b.append('\n');
+
+      b.append("  highlighted: ");
+      if (highlighted != null) {
+        b.append(highlighted);
+      } else {
+        b.append("unset");
+      }
+      b.append('\n');
+
+      b.append("  docValuesType: ");
+      if (docValuesTypeSet == false) {
+        b.append("unset");
+      } else if (docValuesType == DocValuesType.NONE) {
+        b.append("disabled");
+      } else {
+        b.append(docValuesType);
+      }
+      b.append('\n');
+
+      b.append("  indexOptions: ");
+      if (indexOptionsSet == false) {
+        b.append("unset");
+      } else if (indexOptions == IndexOptions.NONE) {
+        b.append("disabled");
+      } else {
+        b.append(indexOptions);
+        if (isUnique != null) {
+          b.append("\n  unique: " + isUnique);
+        }
+        if (storeTermVectors == Boolean.TRUE) {
+          b.append("\n  termVectors: yes");
+          if (storeTermVectorPositions == Boolean.TRUE) {
+            b.append(" positions");
+            if (storeTermVectorPayloads == Boolean.TRUE) {
+              b.append(" payloads");
+            }
+          }
+          if (storeTermVectorOffsets == Boolean.TRUE) {
+            b.append(" offsets");
+          }
+        } else if (storeTermVectors == Boolean.FALSE) {
+          b.append("\n  termVectors: no");
+        } else {
+          b.append("\n  termVectors: unset");
+        }
+        if (minTokenLength != null) {
+          b.append("\n  token length limit min=");
+          b.append(minTokenLength);
+          b.append(" max");
+          b.append(maxTokenLength);
+        }
+        if (maxTokenCount != null) {
+          b.append("\n  token count limit=");
+          b.append(maxTokenCount);
+          b.append(" consumeAllTokens=");
+          b.append(consumeAllTokens);
+        }
+      }
+      b.append('\n');
+
+      return b.toString();
+    }
+
+    @Override
+    public boolean stored() {
+      return stored == Boolean.TRUE;
+    }
+
+    @Override
+    public boolean storeTermVectors() {
+      return storeTermVectors == Boolean.TRUE;
+    }
+
+    @Override
+    public boolean storeTermVectorOffsets() {
+      return storeTermVectorOffsets == Boolean.TRUE;
+    }
+
+    @Override
+    public boolean storeTermVectorPositions() {
+      return storeTermVectorPositions == Boolean.TRUE;
+    }
+
+    @Override
+    public boolean storeTermVectorPayloads() {
+      return storeTermVectorPayloads == Boolean.TRUE;
+    }
+
+    @Override
+    public boolean omitNorms() {
+      return indexNorms == null || indexNorms.booleanValue() == false;
+    }
+
+    @Override
+    public IndexOptions indexOptions() {
+      return indexOptions;
+    }
+
+    @Override
+    public DocValuesType docValuesType() {
+      return docValuesType;
+    }
+
+    void write(DataOutput out) throws IOException {
+      out.writeString(name);
+
+      out.writeVInt(createdVersion.major);
+      out.writeVInt(createdVersion.minor);
+      out.writeVInt(createdVersion.bugfix);
+
+      switch (valueType) {
+      case NONE:
+        out.writeByte((byte) 0);
+        break;
+      case TEXT:
+        out.writeByte((byte) 1);
+        break;
+      case SHORT_TEXT:
+        out.writeByte((byte) 2);
+        break;
+      case ATOM:
+        out.writeByte((byte) 3);
+        break;
+      case INT:
+        out.writeByte((byte) 4);
+        break;
+      case HALF_FLOAT:
+        out.writeByte((byte) 5);
+        break;
+      case FLOAT:
+        out.writeByte((byte) 6);
+        break;
+      case LONG:
+        out.writeByte((byte) 7);
+        break;
+      case DOUBLE:
+        out.writeByte((byte) 8);
+        break;
+      case BIG_INT:
+        out.writeByte((byte) 9);
+        break;
+      case BIG_DECIMAL:
+        out.writeByte((byte) 10);
+        break;
+      case BINARY:
+        out.writeByte((byte) 11);
+        break;
+      case BOOLEAN:
+        out.writeByte((byte) 12);
+        break;
+      case DATE:
+        out.writeByte((byte) 13);
+        break;
+      case INET_ADDRESS:
+        out.writeByte((byte) 14);
+        break;
+      default:
+        throw new AssertionError("missing value type in switch");
+      }
+
+      if (docValuesTypeSet == false) {
+        assert docValuesType == DocValuesType.NONE;
+        out.writeByte((byte) 0);
+      } else {
+        switch (docValuesType) {
+        case NONE:
+          out.writeByte((byte) 1);
+          break;
+        case NUMERIC:
+          out.writeByte((byte) 2);
+          break;
+        case BINARY:
+          out.writeByte((byte) 3);
+          break;
+        case SORTED:
+          out.writeByte((byte) 4);
+          break;
+        case SORTED_NUMERIC:
+          out.writeByte((byte) 5);
+          break;
+        case SORTED_SET:
+          out.writeByte((byte) 6);
+          break;
+        default:
+          throw new AssertionError("missing DocValuesType in switch");
+        }
+      }
+
+      writeNullableInteger(out, blockTreeMinItemsInBlock);
+      writeNullableInteger(out, blockTreeMaxItemsInBlock);
+      writeNullableInteger(out, blockTreeMinItemsInAutoPrefix);
+      writeNullableInteger(out, blockTreeMaxItemsInAutoPrefix);
+      writeNullableInteger(out, analyzerPositionGap);
+      writeNullableInteger(out, analyzerOffsetGap);
+      writeNullableInteger(out, minTokenLength);
+      writeNullableInteger(out, maxTokenLength);
+      writeNullableInteger(out, maxTokenCount);
+      writeNullableBoolean(out, consumeAllTokens);
+      writeNullableBoolean(out, stored);
+      writeNullableBoolean(out, sortable);
+      writeNullableBoolean(out, sortReversed);
+      writeNullableBoolean(out, sortMissingLast);
+      out.writeString(numericSelector.toString());
+      out.writeString(sortedSetSelector.toString());
+      writeNullableBoolean(out, multiValued);
+      writeNullableBoolean(out, indexNorms);
+      writeNullableBoolean(out, reversedTerms);
+      writeNullableInteger(out, bigIntByteWidth);
+      writeNullableInteger(out, bigDecimalScale);
+      writeNullableBoolean(out, fastRanges);
+      writeNullableBoolean(out, storeTermVectors);
+      writeNullableBoolean(out, storeTermVectorPositions);
+      writeNullableBoolean(out, storeTermVectorOffsets);
+      writeNullableBoolean(out, storeTermVectorPayloads);
+      writeNullableBoolean(out, isUnique);
+      writeNullableBoolean(out, storedOnly);
+      writeNullableBoolean(out, isBinary);
+
+      if (sortLocale != null) {
+        out.writeByte((byte) 1);
+        writeNullableString(out, sortLocale.getLanguage());
+        writeNullableString(out, sortLocale.getCountry());
+        writeNullableString(out, sortLocale.getVariant());
+        writeNullableInteger(out, sortCollatorStrength);
+      } else {
+        out.writeByte((byte) 0);
+      }
+
+      if (indexOptionsSet == false) {
+        assert indexOptions == IndexOptions.NONE;
+        out.writeByte((byte) 0);
+      } else {
+        switch(indexOptions) {
+        case NONE:
+          out.writeByte((byte) 1);
+          break;
+        case DOCS:
+          out.writeByte((byte) 2);
+          break;
+        case DOCS_AND_FREQS:
+          out.writeByte((byte) 3);
+          break;
+        case DOCS_AND_FREQS_AND_POSITIONS:
+          out.writeByte((byte) 4);
+          break;
+        case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
+          out.writeByte((byte) 5);
+          break;
+        default:
+          throw new AssertionError("missing IndexOptions in switch");
+        }
+      }
+
+      writeNullableString(out, postingsFormat);
+      writeNullableString(out, docValuesFormat);
+      writeNullableBoolean(out, highlighted);
+    }
+
+    public FieldType(DataInput in) throws IOException {
+      name = in.readString();
+      createdVersion = Version.fromBits(in.readVInt(), in.readVInt(), in.readVInt());
+
+      byte b = in.readByte();
+      switch (b) {
+      case 0:
+        valueType = ValueType.NONE;
+        break;
+      case 1:
+        valueType = ValueType.TEXT;
+        break;
+      case 2:
+        valueType = ValueType.SHORT_TEXT;
+        break;
+      case 3:
+        valueType = ValueType.ATOM;
+        break;
+      case 4:
+        valueType = ValueType.INT;
+        break;
+      case 5:
+        valueType = ValueType.HALF_FLOAT;
+        break;
+      case 6:
+        valueType = ValueType.FLOAT;
+        break;
+      case 7:
+        valueType = ValueType.LONG;
+        break;
+      case 8:
+        valueType = ValueType.DOUBLE;
+        break;
+      case 9:
+        valueType = ValueType.BIG_INT;
+        break;
+      case 10:
+        valueType = ValueType.BIG_DECIMAL;
+        break;
+      case 11:
+        valueType = ValueType.BINARY;
+        break;
+      case 12:
+        valueType = ValueType.BOOLEAN;
+        break;
+      case 13:
+        valueType = ValueType.DATE;
+        break;
+      case 14:
+        valueType = ValueType.INET_ADDRESS;
+        break;
+      default:
+        throw new CorruptIndexException("invalid byte for value type: " + b, in);
+      }
+
+      b = in.readByte();
+      switch (b) {
+      case 0:
+        docValuesTypeSet = false;
+        docValuesType = DocValuesType.NONE;
+        break;
+      case 1:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.NONE;
+        break;
+      case 2:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.NUMERIC;
+        break;
+      case 3:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.BINARY;
+        break;
+      case 4:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.SORTED;
+        break;
+      case 5:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.SORTED_NUMERIC;
+        break;
+      case 6:
+        docValuesTypeSet = true;
+        docValuesType = DocValuesType.SORTED_SET;
+        break;
+      default:
+        throw new CorruptIndexException("invalid byte for DocValuesType: " + b, in);
+      }
+
+      blockTreeMinItemsInBlock = readNullableInteger(in);
+      blockTreeMaxItemsInBlock = readNullableInteger(in);
+      blockTreeMinItemsInAutoPrefix = readNullableInteger(in);
+      blockTreeMaxItemsInAutoPrefix = readNullableInteger(in);
+      analyzerPositionGap = readNullableInteger(in);
+      analyzerOffsetGap = readNullableInteger(in);
+      minTokenLength = readNullableInteger(in);
+      maxTokenLength = readNullableInteger(in);
+      maxTokenCount = readNullableInteger(in);
+      consumeAllTokens = readNullableBoolean(in);
+      stored = readNullableBoolean(in);
+      sortable = readNullableBoolean(in);
+      sortReversed = readNullableBoolean(in);
+      sortMissingLast = readNullableBoolean(in);
+      numericSelector = SortedNumericSelector.Type.valueOf(in.readString());
+      sortedSetSelector = SortedSetSelector.Type.valueOf(in.readString());
+      multiValued = readNullableBoolean(in);
+      indexNorms = readNullableBoolean(in);
+      reversedTerms = readNullableBoolean(in);
+      bigIntByteWidth = readNullableInteger(in);
+      bigDecimalScale = readNullableInteger(in);
+      fastRanges = readNullableBoolean(in);
+      storeTermVectors = readNullableBoolean(in);
+      storeTermVectorPositions = readNullableBoolean(in);
+      storeTermVectorOffsets = readNullableBoolean(in);
+      storeTermVectorPayloads = readNullableBoolean(in);
+      isUnique = readNullableBoolean(in);
+      storedOnly = readNullableBoolean(in);
+      isBinary = readNullableBoolean(in);
+      b = in.readByte();
+      if (b == 1) {
+        String language = readNullableString(in);
+        String country = readNullableString(in);
+        String variant = readNullableString(in);
+        sortCollatorStrength = readNullableInteger(in);
+        sortLocale = new Locale(language, country, variant);
+        sortCollator = Collator.getInstance(sortLocale);
+        sortCollator.setStrength(sortCollatorStrength);
+      } else if (b != 0) {
+        throw new CorruptIndexException("invalid byte for sortLocale: " + b, in);        
+      }
+      b = in.readByte();
+      switch (b) {
+      case 0:
+        indexOptionsSet = false;
+        indexOptions = IndexOptions.NONE;
+        break;
+      case 1:
+        indexOptionsSet = true;
+        indexOptions = IndexOptions.NONE;
+        break;
+      case 2:
+        indexOptionsSet = true;
+        indexOptions = IndexOptions.DOCS;
+        break;
+      case 3:
+        indexOptionsSet = true;
+        indexOptions = IndexOptions.DOCS_AND_FREQS;
+        break;
+      case 4:
+        indexOptionsSet = true;
+        indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        break;
+      case 5:
+        indexOptionsSet = true;
+        indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+        break;
+      default:
+        throw new CorruptIndexException("invalid byte for IndexOptions: " + b, in);
+      }
+
+      postingsFormat = readNullableString(in);
+      docValuesFormat = readNullableString(in);
+      highlighted = readNullableBoolean(in);
+    }
+
+    @Override
+    public int getPositionGap() {
+      if (analyzerPositionGap != null) {
+        return analyzerPositionGap;
+      } else if (indexAnalyzer != null) {
+        return indexAnalyzer.getPositionIncrementGap(name);
+      } else if (defaultIndexAnalyzer != null) {
+        return defaultIndexAnalyzer.getPositionIncrementGap(name);
+      } else {
+        return DEFAULT_POSITION_GAP;
+      }
+    }
+
+    @Override
+    public int getOffsetGap() {
+      if (analyzerOffsetGap != null) {
+        return analyzerOffsetGap;
+      } else if (indexAnalyzer != null) {
+        return indexAnalyzer.getOffsetGap(name);
+      } else if (defaultIndexAnalyzer != null) {
+        return defaultIndexAnalyzer.getOffsetGap(name);
+      } else {
+        return DEFAULT_OFFSET_GAP;
+      }
+    }
+  }
+
+  /** Only invoked by IndexWriter directly.
+   *
+   * @lucene.internal */
+  public FieldTypes(IndexWriter writer, boolean isNewIndex, boolean is5xIndex, Analyzer defaultIndexAnalyzer, Similarity defaultSimilarity) throws IOException {
+    this.readOnly = false;
+    indexCreatedVersion = loadFields(writer.getCommitData(), isNewIndex, is5xIndex);
+    this.defaultIndexAnalyzer = defaultIndexAnalyzer;
+    this.defaultQueryAnalyzer = null;
+    this.defaultSimilarity = defaultSimilarity;
+  }
+
+  /** Only used by index readers. */
+  private FieldTypes(SegmentInfos infos, Analyzer defaultQueryAnalyzer, Similarity defaultSimilarity) throws IOException {
+    this.readOnly = true;
+    indexCreatedVersion = loadFields(infos.getUserData(), false, infos.infosVersion < SegmentInfos.VERSION_60);
+    this.defaultIndexAnalyzer = null;
+    this.defaultQueryAnalyzer = defaultQueryAnalyzer;
+    this.defaultSimilarity = defaultSimilarity;
+  }
+
+  public FieldTypes(FieldTypes other) {
+    readOnly = true;
+    this.defaultIndexAnalyzer = null;
+    if (other != null) {
+      indexCreatedVersion = other.indexCreatedVersion;
+      this.defaultQueryAnalyzer = other.defaultQueryAnalyzer;
+      this.defaultSimilarity = other.defaultSimilarity;
+      addAll(other);
+    } else {
+      indexCreatedVersion = Version.LATEST;
+      this.defaultQueryAnalyzer = null;
+      this.defaultSimilarity = null;
+    }
+  }
+
+  public FieldTypes(FieldTypes other, Iterable<String> fieldsToKeep) {
+    readOnly = true;
+    this.defaultIndexAnalyzer = null;
+    if (other != null) {
+      this.defaultQueryAnalyzer = other.defaultQueryAnalyzer;
+      this.defaultSimilarity = other.defaultSimilarity;
+      indexCreatedVersion = other.indexCreatedVersion;
+      for(String field : fieldsToKeep) {
+        FieldType fieldType = other.fields.get(field);
+        if (fieldType != null) {
+          fields.put(field, new FieldType(fieldType));
+        } else {
+          throw new IllegalArgumentException("unknown field \"" + field + "\"");
+        }
+      }
+      FieldType fieldType = other.fields.get(FIELD_NAMES_FIELD);
+      if (fieldType != null) {
+        fields.put(FIELD_NAMES_FIELD, new FieldType(fieldType));
+      }
+      
+    } else {
+      addFieldNamesField();
+      this.defaultQueryAnalyzer = null;
+      this.defaultSimilarity = null;
+      indexCreatedVersion = Version.LATEST;
+    }
+  }
+
+  private synchronized Version loadFields(Map<String,String> commitUserData, boolean isNewIndex, boolean is5xIndex) throws IOException {
+    String currentFieldTypes = commitUserData.get(FIELD_TYPES_KEY);
+    if (currentFieldTypes != null) {
+      if (isNewIndex) {
+        throw new AssertionError("new index should not have field types");
+      }
+      return readFromString(currentFieldTypes);
+    } else if (isNewIndex == false) {
+      if (is5xIndex == false) {
+        // Index already exists, but no FieldTypes
+        throw new CorruptIndexException("FieldTypes is missing from this index", "CommitUserData");
+      } else {
+        enableExistsFilters = false;
+        return Version.LUCENE_5_0_0;
+      }
+    } else {
+      addFieldNamesField();
+      return Version.LATEST;
+    }
+  }
+
+  private void addFieldNamesField() {
+    assert fields.containsKey(FIELD_NAMES_FIELD) == false;
+
+    FieldType fieldType = new FieldType(FIELD_NAMES_FIELD);
+    fields.put(FIELD_NAMES_FIELD, fieldType);
+    fieldType.valueType = ValueType.ATOM;
+    fieldType.multiValued = Boolean.TRUE;
+    fieldType.sortable = Boolean.FALSE;
+    fieldType.stored = Boolean.FALSE;
+    fieldType.storedOnly = Boolean.FALSE;
+    fieldType.fastRanges = Boolean.FALSE;
+    fieldType.isBinary = Boolean.FALSE;
+    fieldType.setDefaults();
+  }
+
+  private synchronized FieldType newFieldType(String fieldName) {
+    if (fieldName.equals(FIELD_NAMES_FIELD)) {
+      throw new IllegalArgumentException("field name \"" + fieldName + "\" is reserved");
+    }
+
+    return new FieldType(fieldName);
+  }
+
+  /** Decodes String previously created by bytesToString. */
+  private static byte[] stringToBytes(String s) {
+    byte[] bytesIn = s.getBytes(StandardCharsets.UTF_8);
+    byte[] bytesOut = new byte[bytesIn.length*7/8];
+    int carry = 0;
+    int carryBits = 0;
+    int inUpto = 0;
+    int outUpto = 0;
+    while (inUpto < bytesIn.length) {
+      carry |= (bytesIn[inUpto++] & 0xff) << carryBits;
+      carryBits += 7;
+      if (carryBits >= 8) {
+        bytesOut[outUpto++] = (byte) (carry & 0xff);
+        carry = carry >>> 8;
+        carryBits -= 8;
+      }
+    }
+    assert outUpto == bytesOut.length;
+    return bytesOut;
+  }
+
+  /** Encodes byte[] to 7-bit clean chars (ascii). */
+  private static String bytesToString(byte[] bytesIn) {
+    byte[] bytesOut = new byte[(6+bytesIn.length*8)/7];
+    int carry = 0;
+    int carryBits = 0;
+    int inUpto = 0;
+    int outUpto = 0;
+    while (inUpto < bytesIn.length) {
+      carry |= (bytesIn[inUpto++] & 0xff) << carryBits;
+      carryBits += 8;
+      while (carryBits >= 7) {
+        bytesOut[outUpto++] = (byte) (carry & 0x7f);
+        carry = carry >>> 7;
+        carryBits -= 7;
+      }
+    }
+    if (carryBits != 0) {
+      assert carryBits <= 7;
+      bytesOut[outUpto++] = (byte) (carry & 0x7f);
+    }
+    assert outUpto == bytesOut.length: "outUpto=" + outUpto + " bytesOut.length=" + bytesOut.length + " bytesIn.length=" + bytesIn.length + " carryBits=" + carryBits;
+
+    return new String(bytesOut, StandardCharsets.UTF_8);
+  }
+
+  /** Set the {@link PostingsFormat} for this field.  This method has no effect if you pass your own {@link Codec} when
+   *  creating {@link IndexWriter}. */
+  public synchronized void setPostingsFormat(String fieldName, String postingsFormat) {
+    try {
+      // Will throw exception if this postingsFormat is unrecognized:
+      PostingsFormat.forName(postingsFormat);
+    } catch (IllegalArgumentException iae) {
+      // Insert field name into exc message
+      IllegalArgumentException iae2 = new IllegalArgumentException("field \"" + fieldName + "\": " + iae.getMessage());
+      iae2.initCause(iae);
+      throw iae2;
+    }
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.postingsFormat = postingsFormat;
+      fields.put(fieldName, current);
+      changed();
+    } else {
+      current.postingsFormat = postingsFormat;
+      changed();
+    }
+  }
+
+  public IndexableFieldType getIndexableFieldType(String fieldName) {
+    return getFieldType(fieldName);
+  }
+
+  public String getFieldTypeString(String fieldName) {
+    return getFieldType(fieldName).toString();
+  }
+
+  synchronized FieldType getFieldType(String fieldName) {
+    FieldType fieldType = fields.get(fieldName);
+    if (fieldType == null) {
+      List<String> fieldNames = new ArrayList<>(fields.keySet());
+      Collections.sort(fieldNames);
+      throw new IllegalArgumentException("unknown field \"" + fieldName + "\"; valid fields: " + fieldNames);
+    }
+    return fieldType;
+  }
+
+  public synchronized String getPostingsFormat(String fieldName) {
+    return getFieldType(fieldName).postingsFormat;
+  }
+
+  /*  Set the {@link DocValuesFormat} for this field.  This method has no effect if you pass your own {@link Codec} when
+   *  creating {@link IndexWriter}. */
+  public synchronized void setDocValuesFormat(String fieldName, String docValuesFormat) {
+    try {
+      // Will throw exception if this docValuesFormat is unrecognized:
+      DocValuesFormat.forName(docValuesFormat);
+    } catch (IllegalArgumentException iae) {
+      // Insert field name into exc message
+      IllegalArgumentException iae2 = new IllegalArgumentException("field \"" + fieldName + "\": " + iae.getMessage());
+      iae2.initCause(iae);
+      throw iae2;
+    }
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.docValuesFormat = docValuesFormat;
+      fields.put(fieldName, current);
+      changed();
+    } else {
+      current.docValuesFormat = docValuesFormat;
+      changed();
+    }
+  }
+
+  private Similarity similarity = new PerFieldSimilarityWrapper() {
+      @Override
+      public Similarity get(String fieldName) {
+        FieldType field = fields.get(fieldName);
+        if (field == null) {
+          return defaultSimilarity;
+        }
+        if (field.similarity != null) {
+          return field.similarity;
+        } else {
+          return defaultSimilarity;
+        }
+      }
+    };
+
+  // TODO: can we just absorb Codec into FieldTypes somehow?  Seems silly to have PerFieldXXXFormat when FieldTypes is already handling the
+  // per-field-ness, except, if we still allow per-field-XXX to change at any time, we'd still need to write per-segment atts:
+  private final Codec codec = new Lucene50Codec() {
+      @Override
+      public PostingsFormat getPostingsFormatForField(String fieldName) {
+        FieldType field = fields.get(fieldName);
+        if (field == null) {
+          return super.getPostingsFormatForField(fieldName);
+        }
+
+        if (field.postingsFormat != null) {
+          // Field has a custom PF:
+          return PostingsFormat.forName(field.postingsFormat);
+        } else if (field.blockTreeMinItemsInBlock != null || field.fastRanges == Boolean.TRUE) {
+          // Field has the default PF, but we customize BlockTree params:
+          int minItemsInBlock, maxItemsInBlock;
+          int minItemsInAutoPrefix, maxItemsInAutoPrefix;
+          if (field.blockTreeMinItemsInBlock != null) {
+            assert field.blockTreeMaxItemsInBlock != null;
+            minItemsInBlock = field.blockTreeMinItemsInBlock.intValue();
+            maxItemsInBlock = field.blockTreeMaxItemsInBlock.intValue();
+          } else {
+            minItemsInBlock = BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE;
+            maxItemsInBlock = BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE;
+          }
+          if (field.blockTreeMinItemsInAutoPrefix != null) {
+            assert field.blockTreeMaxItemsInAutoPrefix != null;
+            minItemsInAutoPrefix = field.blockTreeMinItemsInAutoPrefix.intValue();
+            maxItemsInAutoPrefix = field.blockTreeMaxItemsInAutoPrefix.intValue();
+          } else if (field.fastRanges == Boolean.TRUE) {
+            minItemsInAutoPrefix = BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE;
+            maxItemsInAutoPrefix = BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE;
+          } else {
+            minItemsInAutoPrefix = 0;
+            maxItemsInAutoPrefix = 0;
+          }
+
+          return new Lucene50PostingsFormat(minItemsInBlock, maxItemsInBlock,
+                                            minItemsInAutoPrefix, maxItemsInAutoPrefix);
+        }
+        return super.getPostingsFormatForField(fieldName); 
+      }
+
+      @Override
+      public DocValuesFormat getDocValuesFormatForField(String fieldName) {
+        FieldType field = fields.get(fieldName);
+        if (field != null && field.docValuesFormat != null) {
+          return DocValuesFormat.forName(field.docValuesFormat);
+        }
+        return super.getDocValuesFormatForField(fieldName); 
+      }
+    };
+
+  private static final Analyzer SINGLE_TOKEN_ANALYZER = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(final String fieldName) {
+        return new TokenStreamComponents(new SingleTokenTokenizer());
+      }
+    };
+
+  private abstract class FieldTypeAnalyzer extends DelegatingAnalyzerWrapper {
+    public FieldTypeAnalyzer() {
+      super(Analyzer.PER_FIELD_REUSE_STRATEGY);
+    }
+
+    @Override
+    public int getPositionIncrementGap(String fieldName) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getOffsetGap(String fieldName) {
+      throw new UnsupportedOperationException();
+    }
+
+    // TODO: what about wrapReader?
+  }
+
+  private final Analyzer indexAnalyzer = new FieldTypeAnalyzer() {
+      @Override
+      protected Analyzer getWrappedAnalyzer(String fieldName) {
+        FieldType field = fields.get(fieldName);
+        if (field == null) {
+          // Must be lenient in case app is using low-schema API during indexing:
+          return defaultIndexAnalyzer;
+        }
+        if (field.wrappedIndexAnalyzer != null) {
+          return field.wrappedIndexAnalyzer;
+        } else if (field.valueType == ValueType.ATOM) {
+          // BUG
+          illegalState(fieldName, "ATOM fields should not be analyzed during indexing");
+        }
+        return defaultIndexAnalyzer;
+      }
+    };
+
+  private final Analyzer queryAnalyzer = new FieldTypeAnalyzer() {
+      @Override
+      protected Analyzer getWrappedAnalyzer(String fieldName) {
+        FieldType field = fields.get(fieldName);
+        if (field == null) {
+          // Must be lenient in case app used low-schema API during indexing:
+          return defaultQueryAnalyzer;
+        }
+        if (field.wrappedQueryAnalyzer != null) {
+          return field.wrappedQueryAnalyzer;
+        } else if (field.valueType == ValueType.ATOM) {
+          return SINGLE_TOKEN_ANALYZER;
+        }
+        return defaultQueryAnalyzer;
+      }
+    };
+
+  /** Returns {@link Similarity} that returns the per-field Similarity. */
+  public Similarity getSimilarity() {
+    return similarity;
+  }
+
+  /** Returns {@link Codec} that returns the per-field formats. */
+  public Codec getCodec() {
+    if (readOnly) {
+      return null;
+    } else {
+      return codec;
+    }
+  }
+
+  /** Returns {@link Analyzer} that returns the per-field analyzer for use during indexing. */
+  public Analyzer getIndexAnalyzer() {
+    if (readOnly) {
+      return null;
+    } else {
+      return indexAnalyzer;
+    }
+  }
+
+  /** Returns {@link Analyzer} that returns the per-field analyzer for use during searching. */
+  public Analyzer getQueryAnalyzer() {
+    return queryAnalyzer;
+  }
+
+  // TODO: we could note that the field had a specific analyzer set, and then throw exc if it didn't get set again after load?
+
+  /** NOTE: analyzer does not persist, so each time you create {@code FieldTypes} from
+   *  {@linkIndexWriter} or {@link IndexReader} you must set all per-field analyzers again. */
+  public synchronized void setAnalyzer(String fieldName, Analyzer analyzer) {
+    setIndexAnalyzer(fieldName, analyzer);
+    setQueryAnalyzer(fieldName, analyzer);
+  }
+
+  /** NOTE: analyzer does not persist, so each time you create {@code FieldTypes} from
+   *  {@linkIndexWriter} or {@link IndexReader} you must set all per-field analyzers again. */
+  public synchronized void setIndexAnalyzer(String fieldName, Analyzer analyzer) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.indexAnalyzer = analyzer;
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.indexAnalyzer == null) {
+      boolean success = false;
+      try {
+        current.indexAnalyzer = analyzer;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.indexAnalyzer = null;
+        }
+      }
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else {
+      illegalState(fieldName, "indexAnalyzer was already set");
+    }
+  }
+
+  public synchronized void setDefaultSimilarity(Similarity sim) {
+    this.defaultSimilarity = sim;
+  }
+
+  public synchronized Similarity getDefaultSimilarity() {
+    return defaultSimilarity;
+  }
+
+  public synchronized void setDefaultQueryAnalyzer(Analyzer a) {
+    this.defaultQueryAnalyzer = a;
+  }
+
+  public synchronized Analyzer getDefaultQueryAnalyzer() {
+    return defaultQueryAnalyzer;
+  }
+
+  public synchronized Analyzer getIndexAnalyzer(String fieldName) {
+    return getFieldType(fieldName).indexAnalyzer;
+  }
+
+  /** NOTE: analyzer does not persist, so each time you create {@code FieldTypes} from
+   *  {@linkIndexWriter} or {@link IndexReader} you must set all per-field analyzers again. */
+  public synchronized void setQueryAnalyzer(String fieldName, Analyzer analyzer) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.queryAnalyzer = analyzer;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.queryAnalyzer == null) {
+      boolean success = false;
+      try {
+        current.queryAnalyzer = analyzer;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.queryAnalyzer = null;
+        }
+      }
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else {
+      illegalState(fieldName, "queryAnalyzer was already set");
+    }
+  }
+
+  public synchronized Analyzer getQueryAnalyzer(String fieldName) {
+    return getFieldType(fieldName).queryAnalyzer;
+  }
+
+  /** NOTE: similarity does not persist, so each time you create {@code FieldTypes} from
+   *  {@linkIndexWriter} or {@link IndexReader} you must set all per-field similarities again.  This can be changed at any time. */
+  public synchronized void setSimilarity(String fieldName, Similarity similarity) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.similarity = similarity;
+      fields.put(fieldName, current);
+      changed();
+    } else {
+      current.similarity = similarity;
+      changed();
+    }
+  }
+
+  public synchronized Similarity getSimilarity(String fieldName) {
+    return getFieldType(fieldName).similarity;
+  }
+
+  /** Notes that this field may have more than one value per document. */
+  public synchronized void setMultiValued(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.multiValued = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.multiValued == null) {
+      boolean success = false;
+      try {
+        current.multiValued = Boolean.TRUE;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.multiValued = null;
+        }
+      }
+      changed();
+    } else if (current.multiValued == Boolean.FALSE) {
+      illegalState(fieldName, "multiValued was already set to False");
+    }
+  }
+
+  /** Returns true if this field may have more than one value per document. */
+  public synchronized boolean getMultiValued(String fieldName) {
+    return getFieldType(fieldName).multiValued == Boolean.TRUE;
+  }
+  
+  /** Require that all tokens indexed for this field fall between the min and max
+   *  length, inclusive.  Any too-short or too-long tokens are silently discarded. */
+  public synchronized void setMinMaxTokenLength(String fieldName, int minTokenLength, int maxTokenLength) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.minTokenLength = minTokenLength;
+      current.maxTokenLength = maxTokenLength;
+      fields.put(fieldName, current);
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else if (current.minTokenLength == null ||
+               current.minTokenLength.intValue() != minTokenLength ||
+               current.maxTokenLength.intValue() != maxTokenLength) {
+      FieldType sav = new FieldType(current);
+      boolean success = false;
+      try {
+        current.minTokenLength = minTokenLength;
+        current.maxTokenLength = maxTokenLength;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, current);
+        }
+      }
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    }
+  }
+
+  public synchronized Integer getMinTokenLength(String fieldName) {
+    return getFieldType(fieldName).minTokenLength;
+  }
+
+  public synchronized Integer getMaxTokenLength(String fieldName) {
+    return getFieldType(fieldName).maxTokenLength;
+  }
+
+  public synchronized void setMaxTokenCount(String fieldName, int maxTokenCount) {
+    setMaxTokenCount(fieldName, maxTokenCount, false);
+  }
+
+  /** Only index up to maxTokenCount tokens for this field. */
+  public synchronized void setMaxTokenCount(String fieldName, int maxTokenCount, boolean consumeAllTokens) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.maxTokenCount = maxTokenCount;
+      current.consumeAllTokens = consumeAllTokens;
+      fields.put(fieldName, current);
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else if (current.maxTokenCount == null ||
+               current.maxTokenCount.intValue() != maxTokenCount ||
+               current.consumeAllTokens.booleanValue() != consumeAllTokens) {
+      Integer oldMax = current.maxTokenCount;
+      Boolean oldConsume = current.consumeAllTokens;
+      boolean success = false;
+      try {
+        current.maxTokenCount = maxTokenCount;
+        current.consumeAllTokens = consumeAllTokens;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.maxTokenCount = maxTokenCount;
+          current.consumeAllTokens = consumeAllTokens;
+        }
+      }
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    }
+  }
+
+  public synchronized Integer getMaxTokenCount(String fieldName) {
+    return getFieldType(fieldName).maxTokenCount;
+  }
+
+  public synchronized Boolean getMaxTokenCountConsumeAllTokens(String fieldName) {
+    return getFieldType(fieldName).consumeAllTokens;
+  }
+
+  /** The gap that should be added to token positions between each multi-valued field. */
+  public synchronized void setAnalyzerPositionGap(String fieldName, int gap) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.analyzerPositionGap = gap;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.analyzerPositionGap == null) {
+      Integer oldValue = current.analyzerPositionGap;
+      boolean success = false;
+      try {
+        current.analyzerPositionGap = gap;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.analyzerPositionGap = oldValue;
+        }
+      }
+      changed();
+    } else if (current.analyzerPositionGap.intValue() != gap) {
+      illegalState(fieldName, "analyzerPositionGap was already set to " + current.analyzerPositionGap + "; cannot change again to " + gap);
+    }
+  }
+
+  /** The gap that should be added to token positions between each multi-valued field. */
+  public synchronized void setAnalyzerOffsetGap(String fieldName, int gap) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.analyzerOffsetGap = gap;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.analyzerOffsetGap == null) {
+      Integer oldValue = current.analyzerOffsetGap;
+      boolean success = false;
+      try {
+        current.analyzerOffsetGap = gap;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.analyzerOffsetGap = oldValue;
+        }
+      }
+      changed();
+    } else if (current.analyzerOffsetGap.intValue() != gap) {
+      illegalState(fieldName, "analyzerOffsetGap was already set to " + current.analyzerOffsetGap + "; cannot change again to " + gap);
+    }
+  }
+
+  /** Sets the minimum number of terms in each term block in the terms dictionary.  These can be changed at any time, but changes only take
+   *  effect for newly written (flushed or merged) segments.  The default is 25; higher values make fewer, larger blocks, which require less
+   *  heap in the {@link IndexReader} but slows down term lookups.  This method has no effect if you pass your own {@link Codec} when
+   *  creating {@link IndexWriter}.
+   **/
+  public synchronized void setTermsDictBlockSize(String fieldName, int minItemsInBlock) {
+    setTermsDictBlockSize(fieldName, minItemsInBlock, 2*(minItemsInBlock-1));
+  }
+
+  /** Sets the minimum and maximum number of terms in each term block in the terms dictionary.  These can be changed at any time, but changes only take
+   *  effect for newly written (flushed or merged) segments.  The default is 25 and 48; higher values make fewer, larger blocks, which require less
+   *  heap in the {@link IndexReader} but slows down term lookups.   This method has no effect if you pass your own {@link Codec} when
+   *  creating {@link IndexWriter}.*/
+  public synchronized void setTermsDictBlockSize(String fieldName, int minItemsInBlock, int maxItemsInBlock) {
+    ensureWritable();
+
+    try {
+      BlockTreeTermsWriter.validateSettings(minItemsInBlock, maxItemsInBlock);
+    } catch (IllegalArgumentException iae) {
+      illegalState(fieldName, iae.getMessage());
+    }
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.blockTreeMinItemsInBlock = minItemsInBlock;
+      current.blockTreeMaxItemsInBlock = maxItemsInBlock;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.blockTreeMinItemsInBlock == null) {
+      boolean success = false;
+      try {
+        current.blockTreeMinItemsInBlock = minItemsInBlock;
+        current.blockTreeMaxItemsInBlock = maxItemsInBlock;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.blockTreeMinItemsInBlock = null;
+          current.blockTreeMaxItemsInBlock = null;
+        }
+      }
+      changed();
+    } else {
+      current.blockTreeMinItemsInBlock = minItemsInBlock;
+      current.blockTreeMaxItemsInBlock = maxItemsInBlock;
+      changed();
+      assert current.validate();
+    }
+  }
+
+  /** Sets the minimum number of terms in each term block in the terms dictionary.  These can be changed at any time, but changes only take
+   *  effect for newly written (flushed or merged) segments.  The default is 25; higher values make fewer, larger blocks, which require less
+   *  heap in the {@link IndexReader} but slows down term lookups. */
+  public synchronized void setTermsDictAutoPrefixSize(String fieldName, int minItemsInAutoPrefix) {
+    setTermsDictAutoPrefixSize(fieldName, minItemsInAutoPrefix, 2*(minItemsInAutoPrefix-1));
+  }
+
+  /** Sets the minimum and maximum number of terms in each term block in the terms dictionary.  These can be changed at any time, but changes only take
+   *  effect for newly written (flushed or merged) segments.  The default is 25 and 48; higher values make fewer, larger blocks, which require less
+   *  heap in the {@link IndexReader} but slows down term lookups. */
+  public synchronized void setTermsDictAutoPrefixSize(String fieldName, int minItemsInAutoPrefix, int maxItemsInAutoPrefix) {
+    ensureWritable();
+
+    try {
+      BlockTreeTermsWriter.validateAutoPrefixSettings(minItemsInAutoPrefix, maxItemsInAutoPrefix);
+    } catch (IllegalArgumentException iae) {
+      illegalState(fieldName, iae.getMessage());
+    }
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.blockTreeMinItemsInAutoPrefix = minItemsInAutoPrefix;
+      current.blockTreeMaxItemsInAutoPrefix = maxItemsInAutoPrefix;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.blockTreeMinItemsInAutoPrefix == null) {
+      boolean success = false;
+      try {
+        current.blockTreeMinItemsInAutoPrefix = minItemsInAutoPrefix;
+        current.blockTreeMaxItemsInAutoPrefix = maxItemsInAutoPrefix;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.blockTreeMinItemsInAutoPrefix = null;
+          current.blockTreeMaxItemsInAutoPrefix = null;
+        }
+      }
+      changed();
+    } else {
+      current.blockTreeMinItemsInAutoPrefix = minItemsInAutoPrefix;
+      current.blockTreeMaxItemsInAutoPrefix = maxItemsInAutoPrefix;
+      changed();
+      assert current.validate();
+    }
+  }
+
+  /** Enables sorting for this field, using doc values of the appropriate type. */
+  public synchronized void enableSorting(String fieldName) {
+    enableSorting(fieldName, false);
+  }
+
+  public synchronized void enableSorting(String fieldName, boolean reversed) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortable = Boolean.TRUE;
+      current.sortReversed = reversed;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.sortable == null) {
+      assert current.sortReversed == null;
+      boolean success = false;
+      try {
+        current.sortable = Boolean.TRUE;
+        current.sortReversed = reversed;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.sortable = null;
+          current.sortReversed = null;
+        }
+      }
+      changed();
+    } else if (current.sortable == Boolean.FALSE) {
+      illegalState(fieldName, "sorting was already disabled");
+    } else if (current.sortReversed == null || current.sortReversed.booleanValue() != reversed) {
+      current.sortReversed = reversed;
+      changed();
+    }
+  }
+
+  /** Disables sorting for this field. */
+  public synchronized void disableSorting(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortable = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.sortable != Boolean.FALSE) {
+      // nocommit don't we need to ... turn off DocValues if they were only on because of sorting?
+      current.sortable = Boolean.FALSE;
+      current.sortReversed = null;
+      changed();
+    }
+  }
+
+  public synchronized boolean getSorted(String fieldName) {
+    return getFieldType(fieldName).sortable == Boolean.TRUE;
+  }
+
+  private boolean isNumericType(ValueType type) {
+    return type == ValueType.INT ||
+      type == ValueType.HALF_FLOAT ||
+      type == ValueType.FLOAT ||
+      type == ValueType.LONG ||
+      type == ValueType.DOUBLE ||
+      type == ValueType.DATE;
+  }
+
+  /** For multi-valued numeric fields, sets which value should be selected for sorting.  This can be changed at any time. */
+  public synchronized void setMultiValuedNumericSortSelector(String fieldName, SortedNumericSelector.Type selector) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    if (current.multiValued != Boolean.TRUE) {
+      illegalState(fieldName, "this field is not multi-valued");
+    }
+    if (isNumericType(current.valueType) == false) {
+      illegalState(fieldName, "value type must be INT, HALF_FLOAT, FLOAT, LONG, DOUBLE or DATE; got value type=" + current.valueType);
+    }
+    if (current.sortable != Boolean.TRUE) {
+      illegalState(fieldName, "field is not enabled for sorting");
+    }
+    if (current.numericSelector != selector) {
+      current.numericSelector = selector;
+      changed(false);
+    }
+  }
+
+  public synchronized SortedNumericSelector.Type getMultiValuedNumericSortSelector(String fieldName) {
+    FieldType current = getFieldType(fieldName);
+    if (current.multiValued != Boolean.TRUE) {
+      illegalState(fieldName, "this field is not multi-valued");
+    }
+    if (isNumericType(current.valueType) == false) {
+      illegalState(fieldName, "value type must be INT, HALF_FLOAT, FLOAT, LONG, DOUBLE or DATE; got value type=" + current.valueType);
+    }
+    if (current.sortable != Boolean.TRUE) {
+      illegalState(fieldName, "field is not enabled for sorting");
+    }
+    return current.numericSelector;
+  }
+
+  /** For multi-valued binary fields, sets which value should be selected for sorting.  This can be changed at any time. */
+  public synchronized void setMultiValuedStringSortSelector(String fieldName, SortedSetSelector.Type selector) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    if (current.multiValued != Boolean.TRUE) {
+      illegalState(fieldName, "this field is not multi-valued");
+    }
+    if (current.valueType != ValueType.BIG_INT &&
+        current.valueType != ValueType.BIG_DECIMAL &&
+        current.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "value type must be BIG_INT, BIG_DECIMAL or ATOM; got value type=" + current.valueType);
+    }
+    if (current.sortable != Boolean.TRUE) {
+      illegalState(fieldName, "field is not enabled for sorting");
+    }
+    if (current.sortedSetSelector != selector) {
+      current.sortedSetSelector = selector;
+      changed(false);
+    }
+  }
+
+  public synchronized SortedSetSelector.Type getMultiValuedStringSortSelector(String fieldName) {
+    FieldType current = getFieldType(fieldName);
+    if (current.multiValued != Boolean.TRUE) {
+      illegalState(fieldName, "this field is not multi-valued");
+    }
+    if (current.valueType != ValueType.BIG_INT &&
+        current.valueType != ValueType.BIG_DECIMAL &&
+        current.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "value type must be BIG_INT, BIG_DECIMAL or ATOM; got value type=" + current.valueType);
+    }
+    if (current.sortable != Boolean.TRUE) {
+      illegalState(fieldName, "field is not enabled for sorting");
+    }
+    return current.sortedSetSelector;
+  }
+
+  public synchronized void setSortMissingFirst(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortMissingLast = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else {
+      Boolean currentValue = current.sortMissingLast;
+      if (currentValue != Boolean.FALSE) {
+        current.sortMissingLast = Boolean.FALSE;
+        boolean success = false;
+        try {
+          current.validate();
+          success = true;
+        } finally {
+          if (success == false) {
+            current.sortMissingLast = currentValue;
+          }
+        }
+        changed(false);
+      }
+    }
+  }
+
+  public synchronized void setSortMissingLast(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortMissingLast = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else {
+      Boolean currentValue = current.sortMissingLast;
+      if (currentValue != Boolean.TRUE) {
+        current.sortMissingLast = Boolean.TRUE;
+        boolean success = false;
+        try {
+          current.validate();
+          success = true;
+        } finally {
+          if (success == false) {
+            current.sortMissingLast = currentValue;
+          }
+        }
+        changed(false);
+      }
+    }
+  }
+
+  /** Enables fast range filters for this field, using auto-prefix terms. */
+  public synchronized void enableFastRanges(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.fastRanges = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.fastRanges == null) {
+      boolean success = false;
+      try {
+        current.fastRanges = Boolean.TRUE;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.fastRanges = null;
+        }
+      }
+      changed();
+    } else if (current.fastRanges == Boolean.FALSE) {
+      illegalState(fieldName, "fastRanges was already disabled");
+    }
+  }
+
+  /** Disables fast range filters for this field.  You can do this at any time, but once it's disabled you cannot re-enable it. */
+  public synchronized void disableFastRanges(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.fastRanges = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.fastRanges != Boolean.FALSE) {
+      current.fastRanges = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getFastRanges(String fieldName) {
+    return getFieldType(fieldName).fastRanges == Boolean.TRUE;
+  }
+
+  /** Enables highlighting for this field, using postings highlighter. */
+  public synchronized void enableHighlighting(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.highlighted = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.highlighted == null) {
+      boolean success = false;
+      try {
+        current.highlighted = Boolean.TRUE;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.highlighted = null;
+        }
+      }
+      changed();
+    } else if (current.highlighted == Boolean.FALSE) {
+      illegalState(fieldName, "highlighting was already disabled");
+    }
+  }
+
+  /** Disables highlighting for this field. */
+  public synchronized void disableHighlighting(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.highlighted = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.highlighted != Boolean.TRUE) {
+      Boolean currentValue = current.highlighted;
+      boolean success = false;
+      try {
+        current.highlighted = Boolean.FALSE;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.highlighted = currentValue;
+        }
+      }
+      changed();
+    }
+  }
+
+  public synchronized boolean getHighlighted(String fieldName) {
+    return getFieldType(fieldName).highlighted == Boolean.TRUE;
+  }
+
+  /** Enables norms for this field.  This is only allowed if norms were not already disabled. */
+  public synchronized void enableNorms(String fieldName) {
+    // throws exc if norms were already disabled
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.indexNorms = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.indexNorms == null) {
+      boolean success = false;
+      try {
+        current.indexNorms = Boolean.TRUE;
+        success = true;
+      } finally {
+        if (success == false) {
+          current.indexNorms = null;
+        }
+      }
+      changed();
+    } else if (current.indexNorms == Boolean.FALSE) {
+      illegalState(fieldName, "norms were already disable");
+    }
+  }
+
+  /** Disable norms for this field. */
+  public synchronized void disableNorms(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.indexNorms = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.indexNorms != Boolean.FALSE) {
+      current.indexNorms = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getNorms(String fieldName) {
+    return getFieldType(fieldName).indexNorms == Boolean.TRUE;
+  }
+
+  /** Sets the maximum precision for this big int field. */
+  public void setBigIntByteWidth(String fieldName, int bytes) {
+    if (bytes <= 0) {
+      illegalState(fieldName, "bytes must be > 0; got: " + bytes);
+    }
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.BIG_INT;
+      current.bigIntByteWidth = bytes;
+      fields.put(fieldName, current);
+      current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.BIG_INT;
+      current.bigIntByteWidth = bytes;
+      current.setDefaults();
+      changed();
+    } else if (current.valueType != ValueType.BIG_INT) {
+      illegalState(fieldName, "can only setBigIntByteWidth on BIG_INT fields; got value type=" + current.valueType);
+    } else if (current.bigIntByteWidth == null) {
+      current.bigIntByteWidth = bytes;
+      changed();
+    } else if (current.bigIntByteWidth.intValue() != bytes) {
+      illegalState(fieldName, "cannot change bigIntByteWidth from " + current.bigIntByteWidth + " to " + bytes);
+    }
+  }
+
+  public int getBigIntByteWidth(String fieldName) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    if (current.valueType != ValueType.BIG_INT) {
+      illegalState(fieldName, "field is not BIG_INT; got value type=" + current.valueType);
+    }
+    if (current.bigIntByteWidth == null) {
+      illegalState(fieldName, "no byte width was set for this BIG_INT field");
+    }
+    return current.bigIntByteWidth;
+  }
+
+
+  /** Sets the byte width and scale for this big decimal field. */
+  public void setBigDecimalByteWidthAndScale(String fieldName, int maxBytes, int scale) {
+
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      //current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.BIG_DECIMAL;
+      current.bigIntByteWidth = maxBytes;
+      current.bigDecimalScale = scale;
+      fields.put(fieldName, current);
+      //current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      //current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.BIG_DECIMAL;
+      current.bigIntByteWidth = maxBytes;
+      current.bigDecimalScale = scale;
+      //current.setDefaults();
+      changed();
+    } else if (current.valueType != ValueType.BIG_DECIMAL) {
+      illegalState(fieldName, "can only setBigDecimalByteWidthAndScale on BIG_DECIMAL fields; got value type=" + current.valueType);
+    } else if (current.bigDecimalScale == null) {
+      current.bigIntByteWidth = maxBytes;
+      current.bigDecimalScale = scale;
+      changed();
+    } else if (current.bigDecimalScale.intValue() != scale) {
+      illegalState(fieldName, "cannot change bigDecimalScale from " + current.bigDecimalScale + " to " + scale);
+    } else if (current.bigIntByteWidth.intValue() != maxBytes) {
+      illegalState(fieldName, "cannot change bigIntByteWidth from " + current.bigIntByteWidth + " to " + maxBytes);
+    }
+  }
+
+  public int getBigDecimalScale(String fieldName) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    if (current.valueType != ValueType.BIG_DECIMAL) {
+      illegalState(fieldName, "field is not value type BIG_DECIMAL; got value type=" + current.valueType);
+    }
+    if (current.bigDecimalScale == null) {
+      illegalState(fieldName, "no scale was set for this BIG_DECIMAL field");
+    }
+    return current.bigDecimalScale;
+  }
+
+  public int getBigDecimalByteWidth(String fieldName) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    if (current.valueType != ValueType.BIG_DECIMAL) {
+      illegalState(fieldName, "field is not value type BIG_DECIMAL; got value type=" + current.valueType);
+    }
+    if (current.bigIntByteWidth == null) {
+      illegalState(fieldName, "no byte width was set for this BIG_DECIMAL field");
+    }
+    return current.bigIntByteWidth;
+  }
+
+  /** All indexed terms are reversed before indexing, using {@code ReverseStringFilter}.  This requires that Lucene's analysis module is on
+   *  the classpath. */
+  public void setReversedTerms(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.reversedTerms = true;
+      fields.put(fieldName, current);
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else if (current.reversedTerms == null) {
+      current.reversedTerms = true;
+      boolean success = false;
+      try {
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.reversedTerms = null;
+        }
+      }
+      current.reWrapAnalyzers(defaultIndexAnalyzer, defaultQueryAnalyzer);
+      changed();
+    } else if (current.reversedTerms != Boolean.TRUE) {
+      illegalState(fieldName, "can only setReversedTerms before the field is indexed");
+    }
+  }
+
+  public Boolean getReversedTerms(String fieldName) {
+    // field must exist
+    FieldType current = getFieldType(fieldName);
+    return current.reversedTerms; 
+  }
+
+  /** Store values for this field.  This can be changed at any time. */
+  public synchronized void enableStored(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.stored = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.stored != Boolean.TRUE) {
+      current.stored = Boolean.TRUE;
+      changed();
+    }
+  }
+
+  /** Do not store values for this field.  This can be changed at any time. */
+  public synchronized void disableStored(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.stored = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.stored == null || current.stored == Boolean.TRUE) {
+      current.stored = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  /** Whether this field's value is stored. */
+  public synchronized boolean getStored(String fieldName) {
+    return getFieldType(fieldName).stored == Boolean.TRUE;
+  }
+
+  /** Enable term vectors for this field.  This can be changed at any time. */
+  public synchronized void enableTermVectors(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storeTermVectors = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.storeTermVectors != Boolean.TRUE) {
+      current.storeTermVectors = Boolean.TRUE;
+      changed();
+    }
+  }
+
+  /** Disable term vectors for this field.  This can be changed at any time. */
+  public synchronized void disableTermVectors(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storeTermVectors = Boolean.FALSE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.storeTermVectors != Boolean.FALSE) {
+      current.storeTermVectors = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getTermVectors(String fieldName) {
+    return getFieldType(fieldName).storeTermVectors == Boolean.TRUE;
+  }
+
+  /** Enable term vector offsets for this field.  This can be changed at any time. */
+  public synchronized void enableTermVectorOffsets(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null || current.storeTermVectors != Boolean.TRUE) {
+      illegalState(fieldName, "cannot enable termVectorOffsets when termVectors haven't been enabled");
+    }
+    if (current.storeTermVectorOffsets != Boolean.TRUE) {
+      current.storeTermVectorOffsets = Boolean.TRUE;
+      changed();
+    }
+  }
+
+  /** Disable term vector offsets for this field.  This can be changed at any time. */
+  public synchronized void disableTermVectorOffsets(String fieldName) {
+    FieldType current = getFieldType(fieldName);
+    if (current.storeTermVectorOffsets == Boolean.TRUE) {
+      current.storeTermVectorOffsets = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getTermVectorOffsets(String fieldName) {
+    return getFieldType(fieldName).storeTermVectorOffsets == Boolean.TRUE;
+  }
+
+  /** Enable term vector positions for this field.  This can be changed at any time. */
+  public synchronized void enableTermVectorPositions(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null || current.storeTermVectors != Boolean.TRUE) {
+      illegalState(fieldName, "cannot enable termVectorPositions when termVectors haven't been enabled");
+    }
+    if (current.storeTermVectorPositions != Boolean.TRUE) {
+      current.storeTermVectorPositions = Boolean.TRUE;
+      changed();
+    }
+  }
+
+  /** Disable term vector positions for this field.  This can be changed at any time. */
+  public synchronized void disableTermVectorPositions(String fieldName) {
+    FieldType current = getFieldType(fieldName);
+    if (current.storeTermVectorPositions == Boolean.TRUE) {
+      current.storeTermVectorPositions = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getTermVectorPositions(String fieldName) {
+    return getFieldType(fieldName).storeTermVectorPositions == Boolean.TRUE;
+  }
+
+  /** Enable term vector payloads for this field.  This can be changed at any time. */
+  public synchronized void enableTermVectorPayloads(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null || current.storeTermVectors != Boolean.TRUE) {
+      illegalState(fieldName, "cannot enable termVectorPayloads when termVectors haven't been enabled");
+    }
+    if (current.storeTermVectorPositions != Boolean.TRUE) {
+      illegalState(fieldName, "cannot enable termVectorPayloads when termVectorPositions haven't been enabled");
+    }
+    if (current.storeTermVectorPayloads != Boolean.TRUE) {
+      current.storeTermVectorPayloads = Boolean.TRUE;
+      changed();
+    }
+  }
+
+  /** Disable term vector payloads for this field.  This can be changed at any time. */
+  public synchronized void disableTermVectorPayloads(String fieldName) {
+    FieldType current = getFieldType(fieldName);
+    if (current.storeTermVectorPayloads == Boolean.TRUE) {
+      current.storeTermVectorPayloads = Boolean.FALSE;
+      changed();
+    }
+  }
+
+  public synchronized boolean getTermVectorPayloads(String fieldName) {
+    return getFieldType(fieldName).storeTermVectorPayloads == Boolean.TRUE;
+  }
+
+  /** Changes index options for this field.  This can be set to any
+   *  value if it's not already set for the provided field; otherwise
+   *  it can only be downgraded as low as DOCS but never unset
+   *  entirely (once indexed, always indexed). */
+  public synchronized void setIndexOptions(String fieldName, IndexOptions indexOptions) {
+    ensureWritable();
+    if (indexOptions == null) {
+      throw new NullPointerException("IndexOptions must not be null (field: \"" + fieldName + "\")");
+    }
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.indexOptions = indexOptions;
+      current.indexOptionsSet = true;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.indexOptionsSet == false) {
+      assert current.indexOptions == IndexOptions.NONE;
+      boolean success = false;
+      try {
+        current.indexOptions = indexOptions;
+        current.indexOptionsSet = true;
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.indexOptions = IndexOptions.NONE;
+          current.indexOptionsSet = false;
+        }
+      }
+      changed();
+    } else if (current.indexOptions != IndexOptions.NONE && current.indexOptions != indexOptions) {
+      assert current.indexOptionsSet;
+      // Only allow downgrading IndexOptions:
+      if (current.indexOptions.compareTo(indexOptions) < 0) {
+        illegalState(fieldName, "cannot upgrade indexOptions from " + current.indexOptions + " to " + indexOptions);
+      }
+      current.indexOptions = indexOptions;
+      changed();
+    }
+  }
+
+  public synchronized IndexOptions getIndexOptions(String fieldName) {
+    // Field must exist:
+    return getFieldType(fieldName).indexOptions;
+  }
+
+  public synchronized void disableDocValues(String fieldName) {
+    setDocValuesType(fieldName, DocValuesType.NONE);
+  }
+
+  public synchronized void setDocValuesType(String fieldName, DocValuesType dvType) {
+    ensureWritable();
+    if (dvType == null) {
+      throw new NullPointerException("docValuesType cannot be null (field: \"" + fieldName + "\")");
+    }
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.docValuesType = dvType;
+      current.docValuesTypeSet = true;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.docValuesTypeSet == false) {
+      boolean success = false;
+      assert current.docValuesType == DocValuesType.NONE;
+      current.docValuesTypeSet = true;
+      current.docValuesType = dvType;
+      try {
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.docValuesType = DocValuesType.NONE;
+          current.docValuesTypeSet = false;
+        }
+      }
+      changed();
+    } else if (current.docValuesType != dvType) {
+      illegalState(fieldName, "cannot change docValuesType from " + current.docValuesType + " to " + dvType);
+    }
+  }
+
+  public synchronized DocValuesType getDocValuesType(String fieldName, DocValuesType dvType) {
+    // Field must exist:
+    return getFieldType(fieldName).docValuesType;
+  }
+
+  synchronized void recordValueType(String fieldName, ValueType valueType) {
+    recordValueType(fieldName, valueType, false);
+  }
+
+  synchronized void recordValueType(String fieldName, ValueType valueType, boolean isUnique) {
+    ensureWritable();
+    indexedDocs = true;
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      if (valueType == ValueType.BIG_INT) {
+        illegalState(fieldName, "you must first set the byte width for BIG_INT fields");
+      }
+      if (valueType == ValueType.BIG_DECIMAL) {
+        illegalState(fieldName, "you must first set the scale and byte width for BIG_DECIMAL fields");
+      }
+      current = newFieldType(fieldName);
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = valueType;
+      current.isUnique = isUnique;
+      fields.put(fieldName, current);
+      current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      if (valueType == ValueType.BIG_INT) {
+        illegalState(fieldName, "you must first set the byte width for BIG_INT fields");
+      }
+      if (valueType == ValueType.BIG_DECIMAL) {
+        illegalState(fieldName, "you must first set the scale and byte width for BIG_DECIMAL fields");
+      }
+
+      if (current.isUnique != null && current.isUnique.booleanValue() != isUnique) {
+        illegalState(fieldName, "cannot change to isUnique to " + isUnique + ": field was already with isUnique=" + current.isUnique);
+      }
+
+      FieldType sav = new FieldType(current);
+      // This can happen if e.g. the app first calls FieldTypes.enableStored(...)
+      boolean success = false;
+      try {
+        current.isUnique = isUnique;
+        assert current.storedOnly == null;
+        current.storedOnly = Boolean.FALSE;
+        current.valueType = valueType;
+        current.setDefaults();
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+      current.setDefaults();
+      changed();
+    } else if (current.valueType != valueType) {
+      illegalState(fieldName, "cannot change from value type " + current.valueType + " to " + valueType);
+    } else if (current.storedOnly == null) {    
+      current.storedOnly = false;
+      if (current.valueType == ValueType.BIG_DECIMAL) {
+        current.setDefaults();
+      }
+    } else if (current.storedOnly == Boolean.TRUE) {
+      illegalState(fieldName, "this field is only stored; use addStoredXXX instead");
+    }
+
+    if (current.isUnique == null) {
+      current.isUnique = isUnique;
+      changed();
+    } else if (current.isUnique != isUnique) {
+      illegalState(fieldName, "cannot change isUnique from " + current.isUnique + " to " + isUnique);
+    }
+  }
+
+  synchronized void recordStringAtomValueType(String fieldName, boolean isUnique) {
+    ensureWritable();
+    indexedDocs = true;
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.ATOM;
+      current.isBinary = Boolean.FALSE;
+      current.isUnique = isUnique;
+      fields.put(fieldName, current);
+      current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      if (current.isUnique != null && current.isUnique.booleanValue() != isUnique) {
+        illegalState(fieldName, "cannot change to isUnique to " + isUnique + ": field was already with isUnique=" + current.isUnique);
+      }
+
+      FieldType sav = new FieldType(current);
+      // This can happen if e.g. the app first calls FieldTypes.enableStored(...)
+      boolean success = false;
+      try {
+        current.isBinary = Boolean.FALSE;
+        current.isUnique = isUnique;
+        assert current.storedOnly == null;
+        current.storedOnly = Boolean.FALSE;
+        current.valueType = ValueType.ATOM;
+        current.setDefaults();
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+      changed();
+    } else if (current.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "cannot change from value type " + current.valueType + " to ATOM");
+    } else if (current.isBinary != Boolean.FALSE) {
+      illegalState(fieldName, "cannot change from binary to non-binary ATOM");
+    } else if (current.isUnique != isUnique) {
+      illegalState(fieldName, "cannot change isUnique from " + current.isUnique + " to " + isUnique);
+    }
+  }
+
+  synchronized void recordBinaryAtomValueType(String fieldName, boolean isUnique) {
+    ensureWritable();
+    indexedDocs = true;
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.ATOM;
+      current.isBinary = Boolean.TRUE;
+      current.isUnique = isUnique;
+      fields.put(fieldName, current);
+      current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      if (current.isUnique != null && current.isUnique.booleanValue() != isUnique) {
+        illegalState(fieldName, "cannot change to isUnique to " + isUnique + ": field was already with isUnique=" + current.isUnique);
+      }
+
+      FieldType sav = new FieldType(current);
+      // This can happen if e.g. the app first calls FieldTypes.enableStored(...)
+      boolean success = false;
+      try {
+        current.isBinary = Boolean.TRUE;
+        current.isUnique = isUnique;
+        assert current.storedOnly == null;
+        current.storedOnly = Boolean.FALSE;
+        current.valueType = ValueType.ATOM;
+        current.setDefaults();
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+      current.setDefaults();
+      changed();
+    } else if (current.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "cannot change from value type " + current.valueType + " to ATOM");
+    } else if (current.isBinary != Boolean.TRUE) {
+      illegalState(fieldName, "cannot change from string to binary ATOM");
+    } else if (current.isUnique != isUnique) {
+      illegalState(fieldName, "cannot change isUnique from " + current.isUnique + " to " + isUnique);
+    }
+  }
+
+  synchronized void recordStoredValueType(String fieldName, ValueType valueType) {
+    ensureWritable();
+    indexedDocs = true;
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      if (valueType == ValueType.BIG_DECIMAL) {
+        illegalState(fieldName, "cannot addStored: you must first record the byte width and scale for this BIG_DECIMAL field");
+      }
+      current = newFieldType(fieldName);
+      current.valueType = valueType;
+      current.storedOnly = Boolean.TRUE;
+      current.isUnique = Boolean.FALSE;
+      current.indexOptionsSet = true;
+      current.indexOptions = IndexOptions.NONE;
+      current.docValuesTypeSet = true;
+      current.docValuesType = DocValuesType.NONE;
+      fields.put(fieldName, current);
+      current.setDefaults();
+      changed();
+
+    } else if (current.storedOnly == Boolean.FALSE) {
+      illegalState(fieldName, "cannot addStored: field was already added non-stored");
+    } else if (current.storedOnly == null) {
+
+      if (valueType == ValueType.BIG_DECIMAL && current.bigDecimalScale == null) {
+        illegalState(fieldName, "cannot addStored: you must first record the byte width and scale for this BIG_DECIMAL field");
+      }
+
+      if (current.indexOptionsSet && current.indexOptions != IndexOptions.NONE) {
+        illegalState(fieldName, "cannot addStored: field is already indexed with indexOptions=" + current.indexOptions);
+      }
+
+      // nocommit why not?
+      /*
+      if (current.docValuesTypeSet && current.docValuesType != DocValuesType.NONE) {
+        illegalState(fieldName, "cannot addStored: field already has docValuesType=" + current.docValuesType);
+      }
+      */
+
+      // All methods that set valueType also set storedOnly to false, except for BIG_DECIMAL:
+      assert current.valueType == ValueType.NONE || (current.valueType == ValueType.BIG_DECIMAL && valueType == ValueType.BIG_DECIMAL);
+
+      FieldType sav = new FieldType(current);
+      boolean success = false;
+      try {
+        current.storedOnly = Boolean.TRUE;
+        current.valueType = valueType;
+        current.indexOptions = IndexOptions.NONE;
+        current.indexOptionsSet = true;
+        if (current.docValuesTypeSet == false) {
+          current.docValuesType = DocValuesType.NONE;
+          current.docValuesTypeSet = true;
+        }
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+      current.setDefaults();
+      changed();
+    }
+  }
+
+  synchronized void recordLargeTextType(String fieldName, boolean allowStored, boolean allowIndexed) {
+    ensureWritable();
+    indexedDocs = true;
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.storedOnly = Boolean.FALSE;
+      current.valueType = ValueType.TEXT;
+      fields.put(fieldName, current);
+      if (allowStored == false) {
+        current.stored = Boolean.FALSE;
+      }
+      if (allowIndexed == false) {
+        assert current.indexOptions == IndexOptions.NONE: "got " + current.indexOptions;
+        current.indexOptionsSet = true;
+      }
+      current.setDefaults();
+      changed();
+    } else if (current.valueType == ValueType.NONE) {
+      // This can happen if e.g. the app first calls FieldTypes.enableStored(...)
+      FieldType sav = new FieldType(current);
+      boolean success = false;
+      try {
+        assert current.storedOnly == null;
+        current.storedOnly = Boolean.FALSE;
+        current.valueType = ValueType.TEXT;
+        if (allowStored == false) {
+          if (current.stored == Boolean.TRUE) {
+            illegalState(fieldName, "can only store String large text fields");
+          } else if (current.stored == null) {
+            current.stored = Boolean.FALSE;
+          }
+        }
+        if (allowIndexed == false) {
+          if (current.indexOptionsSet == false) {
+            assert current.indexOptions == IndexOptions.NONE;
+            current.indexOptionsSet = true;
+          } else if (current.indexOptions != IndexOptions.NONE) {
+            illegalState(fieldName, "this field is already indexed with indexOptions=" + current.indexOptions);
+          }
+        }
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+      current.setDefaults();
+      changed();
+    } else if (current.valueType != ValueType.TEXT) {
+      illegalState(fieldName, "cannot change from value type " + current.valueType + " to " + ValueType.TEXT);
+    } else if (allowIndexed == false && current.indexOptionsSet && current.indexOptions != IndexOptions.NONE) {
+      illegalState(fieldName, "this field is already indexed with indexOptions=" + current.indexOptions);
+    } else if (allowIndexed && current.indexOptionsSet && current.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "this field is already disabled for indexing");
+    } else if (allowStored == false && current.stored == Boolean.TRUE) {
+      illegalState(fieldName, "this field was already enabled for storing");
+    }
+  }
+
+  public void setSortLocale(String fieldName, Locale locale, int strength) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortLocale = locale;
+      current.sortCollatorStrength = strength;;
+      current.sortCollator = Collator.getInstance(locale);
+      current.sortCollator.setStrength(current.sortCollatorStrength);
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.sortLocale == null || current.valueType == null) {
+      current.sortLocale = locale;
+      boolean success = false;
+      try {
+        current.validate();
+        success = true;
+      } finally {
+        if (success == false) {
+          current.sortLocale = null;
+        }
+      }
+      // TODO: can we make it easy to swap in icu?
+      current.sortCollatorStrength = strength;
+      current.sortCollator = Collator.getInstance(locale);
+      current.sortCollator.setStrength(current.sortCollatorStrength);
+      changed();
+    } else if (locale.equals(current.sortLocale) == false) {
+      illegalState(fieldName, "sortLocale can only be set before indexing");
+    }
+  }
+
+  public static interface SortKey {
+    Comparable getKey(Object o);
+  }
+
+  /** NOTE: does not persist; you must set this each time you open a new reader. */
+  public void setSortKey(String fieldName, SortKey sortKey) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.sortKey = sortKey;
+      fields.put(fieldName, current);
+    } else if (current.valueType == ValueType.ATOM) {
+      current.sortKey = sortKey;
+    } else {
+      illegalState(fieldName, "sortKey can only be set for ATOM fields; got value type=" + current.valueType);
+    }
+  }
+
+  public Locale getSortLocale(String fieldName) {
+    // Field must exist:
+    return getFieldType(fieldName).sortLocale;
+  }
+
+  public int getSortCollatorStrength(String fieldName) {
+    // Field must exist:
+    return getFieldType(fieldName).sortCollatorStrength;
+  }
+
+  /** Each value in this field will be unique (never occur in more than one document).  IndexWriter validates this.  */
+  public void setIsUnique(String fieldName) {
+    FieldType current = fields.get(fieldName);
+    if (current == null) {
+      current = newFieldType(fieldName);
+      current.isUnique = Boolean.TRUE;
+      fields.put(fieldName, current);
+      changed();
+    } else if (current.isUnique == Boolean.FALSE) {
+      illegalState(fieldName, "cannot change isUnique from FALSE to TRUE");
+    }
+  }
+
+  /** Returns true if values in this field must be unique across all documents in the index. */
+  public synchronized boolean getIsUnique(String fieldName) {   
+    FieldType fieldType = fields.get(fieldName);
+    return fieldType != null && fieldType.isUnique == Boolean.TRUE;
+  }
+
+  public Term newBigIntTerm(String fieldName, BigInteger token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    if (fieldType.valueType != ValueType.BIG_INT) {
+      illegalState(fieldName, "cannot create big int term when value type=" + fieldType.valueType);
+    }
+
+    return new Term(fieldName, NumericUtils.bigIntToBytes(token, fieldType.bigIntByteWidth));
+  }
+
+  /** Returns a {@link Query} matching this single {@code BigInteger} value, exactly. */
+  public Query newBigIntTermQuery(String fieldName, BigInteger token) {
+    return new TermQuery(newBigIntTerm(fieldName, token));
+  }
+
+  public Term newBigDecimalTerm(String fieldName, BigDecimal token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    if (fieldType.valueType != ValueType.BIG_DECIMAL) {
+      illegalState(fieldName, "cannot create big decimal term when value type=" + fieldType.valueType);
+    }
+
+    if (token.scale() != fieldType.bigDecimalScale.intValue()) {
+      illegalState(fieldName, "big decimal scale for this field is " + fieldType.bigDecimalScale + ", but token has scale " + token.scale());
+    }
+
+    return new Term(fieldName, NumericUtils.bigIntToBytes(token.unscaledValue(), fieldType.bigIntByteWidth));
+  }
+
+  /** Returns a {@link Query} matching this single {@code BigDecimal} value, exactly. */
+  public Query newBigDecimalTermQuery(String fieldName, BigDecimal token) {
+    return new TermQuery(newBigDecimalTerm(fieldName, token));
+  }
+
+  public Term newIntTerm(String fieldName, int token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    BytesRef bytes;
+
+    switch (fieldType.valueType) {
+    case INT:
+      bytes = NumericUtils.intToBytes(token);
+      break;
+    default:
+      illegalState(fieldName, "cannot create int term when value type=" + fieldType.valueType);
+      // Dead code but javac disagrees:
+      bytes = null;
+    }
+
+    return new Term(fieldName, bytes);
+  }
+
+  /** Returns a {@link Query} matching this single {@code int} value, exactly. */
+  public Query newExactIntQuery(String fieldName, int token) {
+    return new TermQuery(newIntTerm(fieldName, token));
+  }
+
+  public Term newFloatTerm(String fieldName, float token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    BytesRef bytes;
+
+    switch (fieldType.valueType) {
+    case FLOAT:
+      bytes = NumericUtils.floatToBytes(token);
+      break;
+    default:
+      illegalState(fieldName, "cannot create float term when value type=" + fieldType.valueType);
+      // Dead code but javac disagrees:
+      bytes = null;
+    }
+
+    return new Term(fieldName, bytes);
+  }
+
+  /** Returns a {@link Query} matching this single {@code float} value, exactly. */
+  public Query newExactFloatQuery(String fieldName, float token) {
+    return new TermQuery(newFloatTerm(fieldName, token));
+  }
+
+  public Term newLongTerm(String fieldName, long token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    BytesRef bytes;
+
+    switch (fieldType.valueType) {
+    case LONG:
+      bytes = NumericUtils.longToBytes(token);
+      break;
+    default:
+      illegalState(fieldName, "cannot create long term when value type=" + fieldType.valueType);
+      // Dead code but javac disagrees:
+      bytes = null;
+    }
+
+    return new Term(fieldName, bytes);
+  }
+
+  /** Returns a {@link Query} matching this single {@code long} value, exactly. */
+  public Query newExactLongQuery(String fieldName, long token) {
+    return new TermQuery(newLongTerm(fieldName, token));
+  }
+
+  public Term newDoubleTerm(String fieldName, double token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term: this field was not indexed");
+    }
+
+    BytesRef bytes;
+
+    switch (fieldType.valueType) {
+    case DOUBLE:
+      bytes = NumericUtils.doubleToBytes(token);
+      break;
+    default:
+      illegalState(fieldName, "cannot create double term when value type=" + fieldType.valueType);
+      // Dead code but javac disagrees:
+      bytes = null;
+    }
+
+    return new Term(fieldName, bytes);
+  }
+
+  /** Returns a {@link Query} matching this single {@code double} value, exactly. */
+  public Query newExactDoubleQuery(String fieldName, double token) {
+    return new TermQuery(newDoubleTerm(fieldName, token));
+  }
+
+  /** Returns a {@link Query} matching this single {@code byte[]} value, exactly. */
+  public Query newExactBinaryQuery(String fieldName, byte[] token) {
+    return newExactBinaryQuery(fieldName, new BytesRef(token));
+  }
+
+  /** Returns a {@link Query} matching this single {@link BytesRef} value, exactly. */
+  public Query newExactBinaryQuery(String fieldName, BytesRef token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term query: this field was not indexed");
+    }
+
+    // Field must be binary:
+    if (fieldType.valueType != ValueType.BINARY && fieldType.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "binary term query must have value type BINARY or ATOM; got " + fieldType.valueType);
+    }
+
+    return new TermQuery(new Term(fieldName, token));
+  }
+
+  /** Returns a {@link Query} matching this single {@code String} value, exactly. */
+  public Query newExactStringQuery(String fieldName, String token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term query: this field was not indexed");
+    }
+
+    // Field must be text:
+    if (fieldType.valueType != ValueType.TEXT && fieldType.valueType != ValueType.SHORT_TEXT && fieldType.valueType != ValueType.ATOM) {
+      illegalState(fieldName, "string term query must have value type TEXT, SHORT_TEXT or ATOM; got " + fieldType.valueType);
+    }
+
+    return new TermQuery(new Term(fieldName, token));
+  }
+
+  /** Returns a {@link Query} matching this single boolean value, exactly. */
+  public Query newExactBooleanQuery(String fieldName, boolean token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term query: this field was not indexed");
+    }
+
+    // Field must be boolean:
+    if (fieldType.valueType != ValueType.BOOLEAN) {
+      illegalState(fieldName, "boolean term query must have value type BOOLEAN; got " + fieldType.valueType);
+    }
+
+    byte[] value = new byte[1];
+    if (token) {
+      value[0] = 1;
+    }
+
+    return new TermQuery(new Term(fieldName, new BytesRef(value)));
+  }
+
+  /** Returns a {@link Query} matching this single {@code InetAddress}, exactly. */
+  public Query newExactInetAddressQuery(String fieldName, InetAddress token) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create term query: this field was not indexed");
+    }
+
+    // Field must be InetAddress:
+    if (fieldType.valueType != ValueType.INET_ADDRESS) {
+      illegalState(fieldName, "inet address term query must have value type INET_ADDRESS; got " + fieldType.valueType);
+    }
+
+    return new TermQuery(new Term(fieldName, new BytesRef(token.getAddress())));
+  }
+
+  private String getRangeFilterDesc(FieldType fieldType, Object min, boolean minInclusive, Object max, boolean maxInclusive) {
+    StringBuilder sb = new StringBuilder();
+    if (minInclusive) {
+      sb.append('[');
+    } else {
+      sb.append('{');
+    }
+    if (min == null) {
+      sb.append('*');
+    } else {
+      sb.append(min);
+    }
+    sb.append(" TO ");
+    if (max == null) {
+      sb.append('*');
+    } else {
+      sb.append(max);
+    }      
+    if (maxInclusive) {
+      sb.append(']');
+    } else {
+      sb.append('}');
+    }
+    return sb.toString();
+  }
+
+  public Filter newDocValuesRangeFilter(String fieldName, Date min, boolean minInclusive, Date max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be DATE type:
+    if (fieldType.valueType != ValueType.DATE) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed as value type DATE; got: " + fieldType.valueType);
+    }
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    return DocValuesRangeFilter.newLongRange(fieldName,
+                                             min == null ? null : min.getTime(),
+                                             max == null ? null : max.getTime(),
+                                             minInclusive, maxInclusive,
+                                             getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newIntDocValuesRangeFilter(String fieldName, Integer min, boolean minInclusive, Integer max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.INT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type INT; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newIntRange(fieldName,
+                                            min == null ? null : min.intValue(),
+                                            max == null ? null : max.intValue(),
+                                            minInclusive,
+                                            maxInclusive,
+                                            getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newLongDocValuesRangeFilter(String fieldName, Long min, boolean minInclusive, Long max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.LONG) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type LONG; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newLongRange(fieldName,
+                                             min == null ? null : min.longValue(),
+                                             max == null ? null : max.longValue(),
+                                             minInclusive,
+                                             maxInclusive,
+                                             getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newHalfFloatDocValuesRangeFilter(String fieldName, Float min, boolean minInclusive, Float max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.HALF_FLOAT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type HALF_FLOAT; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newHalfFloatRange(fieldName,
+                                                  min == null ? null : min.floatValue(),
+                                                  max == null ? null : max.floatValue(),
+                                                  minInclusive,
+                                                  maxInclusive,
+                                                  getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newFloatDocValuesRangeFilter(String fieldName, Float min, boolean minInclusive, Float max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.FLOAT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type FLOAT; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newFloatRange(fieldName,
+                                              min == null ? null : min.floatValue(),
+                                              max == null ? null : max.floatValue(),
+                                              minInclusive,
+                                              maxInclusive,
+                                              getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newDoubleDocValuesRangeFilter(String fieldName, Double min, boolean minInclusive, Double max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have doc values:
+    if (fieldType.docValuesType != DocValuesType.NUMERIC) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with NUMERIC doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.DOUBLE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type DOUBLE; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newDoubleRange(fieldName,
+                                               min == null ? null : min.doubleValue(),
+                                               max == null ? null : max.doubleValue(),
+                                               minInclusive,
+                                               maxInclusive,
+                                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newStringDocValuesRangeFilter(String fieldName, String minTerm, boolean minInclusive, String maxTerm, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    BytesRef min;
+    BytesRef max;
+    if (fieldType.sortLocale != null) {
+      synchronized (fieldType.sortCollator) {
+        min = minTerm == null ? null : new BytesRef(fieldType.sortCollator.getCollationKey(minTerm).toByteArray());
+        max = maxTerm == null ? null : new BytesRef(fieldType.sortCollator.getCollationKey(maxTerm).toByteArray());
+      }
+    } else {
+      min = minTerm == null ? null : new BytesRef(minTerm);
+      max = maxTerm == null ? null : new BytesRef(maxTerm);
+    }
+    
+    return newBinaryDocValuesRangeFilter(fieldName, minTerm == null ? null : min, minInclusive, maxTerm == null ? null : max, maxInclusive);
+  }
+
+  public Filter newBinaryDocValuesRangeFilter(String fieldName, byte[] minTerm, boolean minInclusive, byte[] maxTerm, boolean maxInclusive) {
+    return newBinaryDocValuesRangeFilter(fieldName, minTerm == null ? null : new BytesRef(minTerm), minInclusive, maxTerm == null ? null : new BytesRef(maxTerm), maxInclusive);
+  }
+
+  public Filter newBinaryDocValuesRangeFilter(String fieldName, BytesRef minTerm, boolean minInclusive, BytesRef maxTerm, boolean maxInclusive) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must have sorted doc values:
+    if (fieldType.docValuesType != DocValuesType.SORTED && fieldType.docValuesType != DocValuesType.SORTED_SET) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with SORTED or SORTED_SET doc values; got: " + fieldType.docValuesType);
+    }
+
+    if (fieldType.valueType != ValueType.ATOM && fieldType.valueType != ValueType.BINARY) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed as value type ATOM or BINARY; got: " + fieldType.valueType);
+    }
+
+    return DocValuesRangeFilter.newBytesRefRange(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                                                 getRangeFilterDesc(fieldType, minTerm, minInclusive, maxTerm, maxInclusive));
+  }
+
+  public Filter newDocValuesRangeFilter(String fieldName, InetAddress min, boolean minInclusive, InetAddress max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be InetAddress type:
+    if (fieldType.valueType != ValueType.INET_ADDRESS) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed as value type INET_ADDRESS; got: " + fieldType.valueType);
+    }
+
+    // Field must have sorted doc values:
+    if (fieldType.docValuesType != DocValuesType.SORTED && fieldType.docValuesType != DocValuesType.SORTED_SET) {
+      illegalState(fieldName, "cannot create doc values range query: this field was not indexed with SORTED or SORTED_SET doc values; got: " + fieldType.docValuesType);
+    }
+
+    BytesRef minTerm = min == null ? null : new BytesRef(min.getAddress());
+    BytesRef maxTerm = max == null ? null : new BytesRef(max.getAddress());
+
+    return DocValuesRangeFilter.newBytesRefRange(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                                                 getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newIntRangeFilter(String fieldName, Integer min, boolean minInclusive, Integer max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.INT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type INT; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.intToBytes(min.intValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.intToBytes(max.intValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newLongRangeFilter(String fieldName, Long min, boolean minInclusive, Long max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.LONG) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type LONG; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.longToBytes(min.longValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.longToBytes(max.longValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newBigIntRangeFilter(String fieldName, BigInteger min, boolean minInclusive, BigInteger max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.BIG_INT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type BIG_INT; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.bigIntToBytes(min, fieldType.bigIntByteWidth.intValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.bigIntToBytes(max, fieldType.bigIntByteWidth.intValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newBigDecimalRangeFilter(String fieldName, BigDecimal min, boolean minInclusive, BigDecimal max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.BIG_DECIMAL) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type BIG_DECIMAL; got: " + fieldType.valueType);
+    }
+
+  // TODO: we could allow different scale, and just rescale ourselves?
+    if (min != null && min.scale() != fieldType.bigDecimalScale.intValue()) {
+      illegalState(fieldName, "big decimal scale for this field is " + fieldType.bigDecimalScale + ", but min has scale " + min.scale());
+    }
+
+    if (max != null && max.scale() != fieldType.bigDecimalScale.intValue()) {
+      illegalState(fieldName, "big decimal scale for this field is " + fieldType.bigDecimalScale + ", but max has scale " + max.scale());
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.bigIntToBytes(min.unscaledValue(), fieldType.bigIntByteWidth.intValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.bigIntToBytes(max.unscaledValue(), fieldType.bigIntByteWidth.intValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newHalfFloatRangeFilter(String fieldName, Float min, boolean minInclusive, Float max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.HALF_FLOAT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type HALF_FLOAT; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.halfFloatToBytes(min.floatValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.halfFloatToBytes(max.floatValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newFloatRangeFilter(String fieldName, Float min, boolean minInclusive, Float max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.FLOAT) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type FLOAT; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.floatToBytes(min.floatValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.floatToBytes(max.floatValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newDoubleRangeFilter(String fieldName, Double min, boolean minInclusive, Double max, boolean maxInclusive) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.DOUBLE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type DOUBLE; got: " + fieldType.valueType);
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.doubleToBytes(min.doubleValue());
+    BytesRef maxTerm = max == null ? null : NumericUtils.doubleToBytes(max.doubleValue());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  public Filter newStringRangeFilter(String fieldName, String minTerm, boolean minInclusive, String maxTerm, boolean maxInclusive) {
+    return newBinaryRangeFilter(fieldName, minTerm == null ? null : new BytesRef(minTerm), minInclusive, maxTerm == null ? null : new BytesRef(maxTerm), maxInclusive);
+  }
+
+  public Filter newBinaryRangeFilter(String fieldName, byte[] minTerm, boolean minInclusive, byte[] maxTerm, boolean maxInclusive) {
+    return newBinaryRangeFilter(fieldName, minTerm == null ? null : new BytesRef(minTerm), minInclusive, maxTerm == null ? null : new BytesRef(maxTerm), maxInclusive);
+  }
+
+  public Filter newBinaryRangeFilter(String fieldName, BytesRef minTerm, boolean minInclusive, BytesRef maxTerm, boolean maxInclusive) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    if (fieldType.valueType != ValueType.ATOM && fieldType.valueType != ValueType.BINARY) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed as value type ATOM or BINARY; got: " + fieldType.valueType);
+    }
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, minTerm, minInclusive, maxTerm, maxInclusive));
+  }
+
+  // TODO: Date sugar for a range filter matching a specific hour/day/month/year/etc.?  need locale/timezone... should we use DateTools?
+
+  public Filter newRangeFilter(String fieldName, Date min, boolean minInclusive, Date max, boolean maxInclusive) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.valueType != ValueType.DATE) {
+      illegalState(fieldName, "cannot create range filter: expected value type=DATE but got: " + fieldType.valueType);
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    BytesRef minTerm = min == null ? null : NumericUtils.longToBytes(min.getTime());
+    BytesRef maxTerm = max == null ? null : NumericUtils.longToBytes(max.getTime());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  // TODO: also add "range filter using net mask" sugar version
+  public Filter newRangeFilter(String fieldName, InetAddress min, boolean minInclusive, InetAddress max, boolean maxInclusive) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    // Field must be indexed:
+    if (fieldType.indexOptions == IndexOptions.NONE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed");
+    }
+
+    if (fieldType.valueType != ValueType.INET_ADDRESS) {
+      illegalState(fieldName, "cannot create range filter: expected value type=INET_ADDRESS but got: " + fieldType.valueType);
+    }
+
+    if (fieldType.fastRanges != Boolean.TRUE) {
+      illegalState(fieldName, "cannot create range filter: this field was not indexed for fast ranges");
+    }
+
+    BytesRef minTerm = min == null ? null : new BytesRef(min.getAddress());
+    BytesRef maxTerm = max == null ? null : new BytesRef(max.getAddress());
+
+    return new TermRangeFilter(fieldName, minTerm, maxTerm, minInclusive, maxInclusive,
+                               getRangeFilterDesc(fieldType, min, minInclusive, max, maxInclusive));
+  }
+
+  /** Builds a sort from arbitrary list of fieldName, reversed pairs. */
+  public Sort newSort(Object... fields) {
+    if (fields.length == 0) {
+      throw new IllegalArgumentException("must sort by at least one field; got nothing");
+    }
+
+    int upto = 0;
+    List<SortField> sortFields = new ArrayList<>();
+
+    while (upto < fields.length) {
+      if ((fields[upto] instanceof String) == false) {
+        throw new IllegalArgumentException("arguments must (String [Boolean])+; expected String but got: " + fields[upto].getClass());
+      }
+      String fieldName = (String) fields[upto++];
+      Boolean reversed;
+      if (upto == fields.length || (fields[upto] instanceof Boolean) == false) {
+        reversed = null;
+      } else {
+        reversed = (Boolean) fields[upto++];
+      }
+      sortFields.add(newSortField(fieldName, reversed));
+    }
+
+    return new Sort(sortFields.toArray(new SortField[sortFields.size()]));
+  }
+
+  /** Returns the SortField for this field. */
+  public SortField newSortField(String fieldName) {
+    return newSortField(fieldName, false);
+  }
+
+  /** Returns the SortField for this field, optionally reversed.  If reverse is null, we use the default for the field. */
+  public SortField newSortField(String fieldName, Boolean reverse) {
+
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+    if (fieldType.sortable != Boolean.TRUE) {
+      illegalState(fieldName, "this field was not indexed for sorting");
+    }
+    if (reverse == null) {
+      reverse = fieldType.sortReversed;
+    }
+    if (reverse == null) {
+      reverse = Boolean.FALSE;
+    }
+    switch (fieldType.valueType) {
+    case INT:
+      {
+        SortField sortField;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          sortField = new SortedNumericSortField(fieldName, SortField.Type.INT, reverse, fieldType.numericSelector);
+        } else {
+          sortField = new SortField(fieldName, SortField.Type.INT, reverse);
+        }
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Integer.MIN_VALUE);
+          } else {
+            sortField.setMissingValue(Integer.MAX_VALUE);
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Integer.MAX_VALUE);
+          } else {
+            sortField.setMissingValue(Integer.MIN_VALUE);
+          }
+        }
+        return sortField;
+      }
+
+    case HALF_FLOAT:
+      {
+        final float missingValue;
+
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            missingValue = Float.NEGATIVE_INFINITY;
+          } else {
+            missingValue = Float.POSITIVE_INFINITY;
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            missingValue = Float.POSITIVE_INFINITY;
+          } else {
+            missingValue = Float.NEGATIVE_INFINITY;
+          }
+        }
+
+        SortField sortField;
+        FieldComparatorSource compSource;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<Float> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new HalfFloatComparator(numHits, fieldName, missingValue) {
+                  @Override
+                  protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
+                    assert field.equals(fieldName);
+                    SortedNumericDocValues dvs = context.reader().getSortedNumericDocValues(fieldName);
+                    assert dvs != null;
+                    return SortedNumericSelector.wrap(dvs, fieldType.numericSelector);
+                  }
+                };
+              }
+            };
+        } else {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<Float> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new HalfFloatComparator(numHits, fieldName, missingValue);
+              }
+            };
+        }
+
+        sortField = new SortField(fieldName, compSource, reverse) {
+            @Override
+            public String toString() {
+              return "<halffloat" + ": \"" + fieldName + "\" missingLast=" + fieldType.sortMissingLast + ">";
+            }
+          };
+
+        return sortField;
+      }
+
+    case FLOAT:
+      {
+        SortField sortField;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          sortField = new SortedNumericSortField(fieldName, SortField.Type.FLOAT, reverse, fieldType.numericSelector);
+        } else {
+          sortField = new SortField(fieldName, SortField.Type.FLOAT, reverse);
+        }
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Float.NEGATIVE_INFINITY);
+          } else {
+            sortField.setMissingValue(Float.POSITIVE_INFINITY);
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Float.POSITIVE_INFINITY);
+          } else {
+            sortField.setMissingValue(Float.NEGATIVE_INFINITY);
+          }
+        }
+        return sortField;
+      }
+
+    case LONG:
+    case DATE:
+      {
+        SortField sortField;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          sortField = new SortedNumericSortField(fieldName, SortField.Type.LONG, reverse, fieldType.numericSelector);
+        } else {
+          sortField = new SortField(fieldName, SortField.Type.LONG, reverse);
+        }
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Long.MIN_VALUE);
+          } else {
+            sortField.setMissingValue(Long.MAX_VALUE);
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Long.MAX_VALUE);
+          } else {
+            sortField.setMissingValue(Long.MIN_VALUE);
+          }
+        }
+        return sortField;
+      }
+
+    case DOUBLE:
+      {
+        SortField sortField;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          sortField = new SortedNumericSortField(fieldName, SortField.Type.DOUBLE, reverse, fieldType.numericSelector);
+        } else {
+          sortField = new SortField(fieldName, SortField.Type.DOUBLE, reverse);
+        }
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Double.NEGATIVE_INFINITY);
+          } else {
+            sortField.setMissingValue(Double.POSITIVE_INFINITY);
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(Double.POSITIVE_INFINITY);
+          } else {
+            sortField.setMissingValue(Double.NEGATIVE_INFINITY);
+          }
+        }
+        return sortField;
+      }
+
+    case BIG_INT:
+      {
+        SortField sortField;
+        FieldComparatorSource compSource;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<BigInteger> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new BigIntComparator(numHits, fieldName, fieldType.bigIntByteWidth, fieldType.sortMissingLast != Boolean.FALSE) {
+                  @Override
+                  protected SortedDocValues getDocValues(LeafReaderContext context) throws IOException {
+                    SortedSetDocValues dvs = context.reader().getSortedSetDocValues(fieldName);
+                    assert dvs != null;
+                    return SortedSetSelector.wrap(dvs, fieldType.sortedSetSelector);
+                  }
+                };
+              }
+            };
+        } else {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<BigInteger> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new BigIntComparator(numHits, fieldName, fieldType.bigIntByteWidth, fieldType.sortMissingLast != Boolean.FALSE);
+              }
+            };
+        }
+
+        return new SortField(fieldName, compSource, reverse) {
+            @Override
+            public String toString() {
+              return "<bigint" + ": \"" + fieldName + "\" missingLast=" + fieldType.sortMissingLast + ">";
+            }
+          };
+      }
+
+    case BIG_DECIMAL:
+      {
+        SortField sortField;
+        FieldComparatorSource compSource;
+        if (fieldType.multiValued == Boolean.TRUE) {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<BigDecimal> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new BigDecimalComparator(numHits, fieldName, fieldType.bigIntByteWidth, fieldType.sortMissingLast != Boolean.FALSE, fieldType.bigDecimalScale) {
+                  @Override
+                  protected SortedDocValues getDocValues(LeafReaderContext context) throws IOException {
+                    SortedSetDocValues dvs = context.reader().getSortedSetDocValues(fieldName);
+                    assert dvs != null;
+                    return SortedSetSelector.wrap(dvs, fieldType.sortedSetSelector);
+                  }
+                };
+              }
+            };
+        } else {
+          compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<BigDecimal> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new BigDecimalComparator(numHits, fieldName, fieldType.bigIntByteWidth, fieldType.sortMissingLast != Boolean.FALSE, fieldType.bigDecimalScale);
+              }
+            };
+        }
+
+        return new SortField(fieldName, compSource, reverse) {
+            @Override
+            public String toString() {
+              return "<bigdecimal" + ": \"" + fieldName + "\" missingLast=" + fieldType.sortMissingLast + ">";
+            }
+          };
+      }
+
+    case SHORT_TEXT:
+    case ATOM:
+    case BINARY:
+    case BOOLEAN:
+    case INET_ADDRESS:
+      SortField sortField;
+      {
+        if (fieldType.multiValued == Boolean.TRUE) {
+          sortField = new SortedSetSortField(fieldName, reverse, fieldType.sortedSetSelector);
+        } else if (fieldType.sortKey != null) {
+          FieldComparatorSource compSource = new FieldComparatorSource() {
+              @Override
+              public FieldComparator<BytesRef> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+                return new SortKeyComparator(numHits, fieldName, fieldType.sortMissingLast == Boolean.TRUE, fieldType.sortKey);
+              }
+            };
+
+          sortField = new SortField(fieldName, compSource, reverse) {
+              @Override
+              public String toString() {
+                return "<custom-sort-key" + ": \"" + fieldName + "\" missingLast=" + fieldType.sortMissingLast + ">";
+              }
+            };
+
+        } else if (fieldType.docValuesType == DocValuesType.BINARY) {
+          sortField = new SortField(fieldName, SortField.Type.STRING_VAL, reverse);
+        } else {
+          sortField = new SortField(fieldName, SortField.Type.STRING, reverse);
+        }
+
+        if (fieldType.sortMissingLast == Boolean.TRUE) {
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(SortField.STRING_FIRST);
+          } else {
+            sortField.setMissingValue(SortField.STRING_LAST);
+          }
+        } else {
+          assert fieldType.sortMissingLast == Boolean.FALSE;
+          if (reverse.booleanValue()) {
+            sortField.setMissingValue(SortField.STRING_LAST);
+          } else {
+            sortField.setMissingValue(SortField.STRING_FIRST);
+          }
+        }
+        return sortField;
+      }
+
+    default:
+      // BUG
+      illegalState(fieldName, "unhandled sort case, value type=" + fieldType.valueType);
+
+      // Dead code but javac disagrees:
+      return null;
+    }
+  }
+
+  /** Returns a {@link Filter} accepting documents that have this field. */
+  public Filter newFieldExistsFilter(String fieldName) {
+    if (enableExistsFilters == false) {
+      throw new IllegalStateException("field exists filter was disabled");
+    }
+
+    return new FieldExistsFilter(fieldName);
+  }
+
+  private synchronized void changed() {
+    changed(true);
+  }
+
+  private synchronized void changed(boolean requireWritable) {
+    if (requireWritable) {
+      ensureWritable();
+    }
+    changeCount++;
+  }
+
+  public synchronized long getChangeCount() {
+    return changeCount;
+  }
+
+  private synchronized void ensureWritable() {
+    if (readOnly) {
+      throw new IllegalStateException("cannot make changes to a read-only FieldTypes (it was opened from an IndexReader, not an IndexWriter)");
+    }
+    if (closed) {
+      throw new AlreadyClosedException("this FieldTypes has been closed");
+    }
+  }
+
+  /** This should only be invoked by {@link IndexWriter}.
+   *
+   * @lucene.internal */
+  public void close() {
+    closed = true;
+  }
+  
+  static void illegalState(String fieldName, String message) {
+    throw new IllegalStateException("field \"" + fieldName + "\": " + message);
+  }
+
+  /** @lucene.internal */
+  public synchronized String writeToString() throws IOException {
+    RAMFile file = new RAMFile();
+    RAMOutputStream out = new RAMOutputStream(file, true);
+    CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
+    
+    out.writeVInt(indexCreatedVersion.major);
+    out.writeVInt(indexCreatedVersion.minor);
+    out.writeVInt(indexCreatedVersion.bugfix);
+
+    writeBoolean(out, enableExistsFilters);
+    writeBoolean(out, indexedDocs);
+
+    int count = fields.size();
+    out.writeVInt(count);
+    int count2 = 0;
+    for(FieldType fieldType : fields.values()) {
+      fieldType.write(out);
+      count2++;
+    }
+    assert count == count2;
+
+    CodecUtil.writeFooter(out);
+
+    out.close();
+    byte[] bytes = new byte[(int) out.getFilePointer()];
+    RAMInputStream in = new RAMInputStream("FieldTypes", file);
+    in.readBytes(bytes, 0, bytes.length);
+    return bytesToString(bytes);
+  }
+
+  /** Reads FieldTypes from previously saved. */
+  private synchronized Version readFromString(String stringIn) throws IOException {
+
+    byte[] bytesIn = stringToBytes(stringIn);
+    RAMFile file = new RAMFile();
+    RAMOutputStream out = new RAMOutputStream(file, false);
+    out.writeBytes(bytesIn, 0, bytesIn.length);
+    out.close();
+    RAMInputStream ris = new RAMInputStream("FieldTypes", file);
+    ChecksumIndexInput in = new BufferedChecksumIndexInput(ris);
+
+    CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START);
+
+    Version indexCreatedVersion = Version.fromBits(in.readVInt(), in.readVInt(), in.readVInt());
+
+    enableExistsFilters = readBoolean(in);
+    indexedDocs = readBoolean(in);
+
+    int count = in.readVInt();
+    for(int i=0;i<count;i++) {
+      FieldType fieldType = new FieldType(in);
+      fields.put(fieldType.name, fieldType);
+    }
+
+    CodecUtil.checkFooter(in);
+
+    return indexCreatedVersion;
+  }
+
+  public Version getIndexCreatedVersion() {
+    return indexCreatedVersion;
+  }
+
+  public static FieldTypes getFieldTypes(Directory dir, Analyzer defaultQueryAnalyzer) throws IOException {
+    return getFieldTypes(dir, defaultQueryAnalyzer, false);
+  }
+
+  public static FieldTypes getFieldTypes(Directory dir, Analyzer defaultQueryAnalyzer, boolean is5xIndex) throws IOException {
+    SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
+    return getFieldTypes(infos, defaultQueryAnalyzer, IndexSearcher.getDefaultSimilarity());
+  }
+
+  public static FieldTypes getFieldTypes(SegmentInfos infos, Analyzer defaultQueryAnalyzer, Similarity defaultSimilarity) throws IOException {
+    return new FieldTypes(infos, defaultQueryAnalyzer, defaultSimilarity);
+  }
+
+  public Iterable<String> getFieldNames() {
+    return Collections.unmodifiableSet(fields.keySet());
+  }
+
+  public synchronized void mergeOneField(FieldTypes in, String fieldName) {
+    FieldType current = fields.get(fieldName);
+    FieldType sav;
+
+    // Field must exist:
+    FieldType toMerge = in.getFieldType(fieldName);
+
+    if (current != null) {
+      sav = new FieldType(current);
+      boolean success = false;
+      try {
+        current.merge(toMerge);
+        success = true;
+      } finally {
+        if (success == false) {
+          fields.put(fieldName, sav);
+        }
+      }
+    } else {
+      sav = null;
+      fields.put(fieldName, toMerge);
+    }
+  }
+
+  public synchronized void addAll(FieldTypes in) {
+    Map<String,FieldType> sav = new HashMap<>();
+    for(Map.Entry<String,FieldType> ent : fields.entrySet()) {
+      sav.put(ent.getKey(), new FieldType(ent.getValue()));
+    }
+
+    boolean success = false;
+    try {
+      for (FieldType fieldType : in.fields.values()) {
+        FieldType curFieldType = fields.get(fieldType.name);
+        if (curFieldType == null) {
+          fields.put(fieldType.name, new FieldType(fieldType));
+        } else {
+          curFieldType.merge(fieldType);
+        }
+      }
+      success = true;
+    } finally {
+      if (success == false) {
+        // Restore original fields:
+        fields.clear();
+        for(Map.Entry<String,FieldType> ent : sav.entrySet()) {
+          fields.put(ent.getKey(), ent.getValue());
+        }
+      }
+    }
+  }
+
+  public synchronized void enableExistsFilters() {
+    if (enableExistsFilters == false && indexedDocs) {
+      throw new IllegalStateException("cannot enable exists filters after documents were already indexed");
+    }
+    enableExistsFilters = true;
+  }
+
+  public synchronized void disableExistsFilters() {
+    if (enableExistsFilters && indexedDocs) {
+      throw new IllegalStateException("cannot disable exists filters after documents were already indexed");
+    }
+    enableExistsFilters = false;
+  }
+
+  private static void writeBoolean(DataOutput out, boolean value) throws IOException {
+    if (value) {
+      out.writeByte((byte) 1);
+    } else {
+      out.writeByte((byte) 0);
+    }
+  }
+
+  private static boolean readBoolean(DataInput in) throws IOException {
+    byte b = in.readByte();
+    if (b == 1) {
+      return true;
+    } else if (b == 0) {
+      return false;
+    } else {
+      throw new CorruptIndexException("invalid byte for boolean: " + b, in);
+    }
+  }
+
+  public synchronized void clear() {
+    fields.clear();
+    enableExistsFilters = true;
+    indexedDocs = false;
+    addFieldNamesField();
+  }
+
+  static void writeNullableInteger(DataOutput out, Integer value) throws IOException {
+    if (value == null) {
+      out.writeByte((byte) 0);
+    } else {
+      out.writeByte((byte) 1);
+      out.writeVInt(value.intValue());
+    }
+  }
+
+  static Integer readNullableInteger(DataInput in) throws IOException {
+    if (in.readByte() == 0) {
+      return null;
+    } else {
+      return in.readVInt();
+    }
+  }
+
+  static void writeNullableBoolean(DataOutput out, Boolean value) throws IOException {
+    if (value == null) {
+      out.writeByte((byte) 0);
+    } else if (value == Boolean.TRUE) {
+      out.writeByte((byte) 1);
+    } else {
+      out.writeByte((byte) 2);
+    }
+  }
+
+  static Boolean readNullableBoolean(DataInput in) throws IOException {
+    byte b = in.readByte();
+    if (b == 0) {
+      return null;
+    } else if (b == 1) {
+      return Boolean.TRUE;
+    } else if (b == 2) {
+      return Boolean.FALSE;
+    } else {
+      throw new CorruptIndexException("invalid byte for nullable boolean: " + b, in);
+    }
+  }
+
+  static void writeNullableString(DataOutput out, String value) throws IOException {
+    if (value == null) {
+      out.writeByte((byte) 0);
+    } else {
+      out.writeByte((byte) 1);
+      out.writeString(value);
+    }
+  }
+
+  static String readNullableString(DataInput in) throws IOException {
+    byte b = in.readByte();
+    if (b == 0) {
+      return null;
+    } else if (b == 1) {
+      return in.readString();
+    } else {
+      throw new CorruptIndexException("invalid byte for nullable string: " + b, in);
+    }
+  }
+
+  public ValueType getValueType(String fieldName) {
+    // Field must exist:
+    FieldType fieldType = getFieldType(fieldName);
+
+    return fieldType.valueType;
+  }
+
+  @Override
+  public synchronized String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("FieldTypes:");
+    for(FieldType fieldType : fields.values()) {
+      sb.append('\n');
+      sb.append(fieldType);
+    }
+
+    return sb.toString();
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/FilteringTokenFilter.java b/lucene/core/src/java/org/apache/lucene/document/FilteringTokenFilter.java
new file mode 100644
index 0000000..7dcfdaf
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/FilteringTokenFilter.java
@@ -0,0 +1,78 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+
+// TODO: forked from analysis module; can we share?
+
+/**
+ * Abstract base class for TokenFilters that may remove tokens.
+ * You have to implement {@link #accept} and return a boolean if the current
+ * token should be preserved. {@link #incrementToken} uses this method
+ * to decide if a token should be passed to the caller.
+ */
+abstract class FilteringTokenFilter extends TokenFilter {
+
+  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  private int skippedPositions;
+
+  /**
+   * Create a new {@link FilteringTokenFilter}.
+   * @param in      the {@link TokenStream} to consume
+   */
+  public FilteringTokenFilter(TokenStream in) {
+    super(in);
+  }
+
+  /** Override this method and return if the current input token should be returned by {@link #incrementToken}. */
+  protected abstract boolean accept() throws IOException;
+
+  @Override
+  public final boolean incrementToken() throws IOException {
+    skippedPositions = 0;
+    while (input.incrementToken()) {
+      if (accept()) {
+        if (skippedPositions != 0) {
+          posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
+        }
+        return true;
+      }
+      skippedPositions += posIncrAtt.getPositionIncrement();
+    }
+
+    // reached EOS -- return false
+    return false;
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    skippedPositions = 0;
+  }
+
+  @Override
+  public void end() throws IOException {
+    super.end();
+    posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java
deleted file mode 100644
index db0524f..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/FloatDocValuesField.java
+++ /dev/null
@@ -1,52 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Syntactic sugar for encoding floats as NumericDocValues
- * via {@link Float#floatToRawIntBits(float)}.
- * <p>
- * Per-document floating point values can be retrieved via
- * {@link org.apache.lucene.index.LeafReader#getNumericDocValues(String)}.
- * <p>
- * <b>NOTE</b>: In most all cases this will be rather inefficient,
- * requiring four bytes per document. Consider encoding floating
- * point values yourself with only as much precision as you require.
- */
-public class FloatDocValuesField extends NumericDocValuesField {
-
-  /** 
-   * Creates a new DocValues field with the specified 32-bit float value 
-   * @param name field name
-   * @param value 32-bit float value
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public FloatDocValuesField(String name, float value) {
-    super(name, Float.floatToRawIntBits(value));
-  }
-
-  @Override
-  public void setFloatValue(float value) {
-    super.setLongValue(Float.floatToRawIntBits(value));
-  }
-  
-  @Override
-  public void setLongValue(long value) {
-    throw new IllegalArgumentException("cannot change value type from Float to Long");
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatField.java b/lucene/core/src/java/org/apache/lucene/document/FloatField.java
deleted file mode 100644
index a959a73..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/FloatField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.NumericTokenStream; // javadocs
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.search.NumericRangeFilter; // javadocs
-import org.apache.lucene.search.NumericRangeQuery; // javadocs
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>float</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new FloatField(name, 6.0F, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>FloatField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  FloatField field = new FloatField(name, 0.0F, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setFloatValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link IntField}, {@link LongField}, {@link
- * DoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>FloatField</code>, use {@link NumericRangeQuery} or {@link
- * NumericRangeFilter}.  To sort according to a
- * <code>FloatField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>FloatField</code> 
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>FloatField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>FloatField</code>.</p>
- *
- * <p>A <code>FloatField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 4, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * NumericRangeQuery} or {@link NumericRangeFilter}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link NumericRangeQuery}. The format of
- * indexed values is described in {@link NumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * NumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @since 2.9
- */
-
-public final class FloatField extends Field {
-  
-  /** 
-   * Type for a FloatField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.NumericType.FLOAT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored FloatField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.NumericType.FLOAT);
-    TYPE_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored FloatField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  NumericUtils#PRECISION_STEP_DEFAULT_32} (8). 
-   *  @param name field name
-   *  @param value 32-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public FloatField(String name, float value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Float.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 32-bit float value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link FieldType.NumericType#FLOAT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a FLOAT numericType()
-   */
-  public FloatField(String name, float value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.NumericType.FLOAT) {
-      throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
-    }
-    fieldsData = Float.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/IntField.java b/lucene/core/src/java/org/apache/lucene/document/IntField.java
deleted file mode 100644
index 4910805..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/IntField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.NumericTokenStream; // javadocs
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.search.NumericRangeFilter; // javadocs
-import org.apache.lucene.search.NumericRangeQuery; // javadocs
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>int</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new IntField(name, 6, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>IntField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  IntField field = new IntField(name, 6, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setIntValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LongField}, {@link FloatField}, {@link
- * DoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>IntField</code>, use {@link NumericRangeQuery} or {@link
- * NumericRangeFilter}.  To sort according to a
- * <code>IntField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#INT}. <code>IntField</code> 
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>IntField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>IntField</code>.</p>
- *
- * <p>An <code>IntField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 4, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * NumericRangeQuery} or {@link NumericRangeFilter}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link NumericRangeQuery}. The format of
- * indexed values is described in {@link NumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * NumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @since 2.9
- */
-
-public final class IntField extends Field {
-  
-  /** 
-   * Type for an IntField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.NumericType.INT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored IntField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.NumericType.INT);
-    TYPE_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored IntField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  NumericUtils#PRECISION_STEP_DEFAULT_32} (8). 
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public IntField(String name, int value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Integer.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link FieldType.NumericType#INT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a INT numericType()
-   */
-  public IntField(String name, int value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.NumericType.INT) {
-      throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
-    }
-    fieldsData = Integer.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/LengthFilter.java b/lucene/core/src/java/org/apache/lucene/document/LengthFilter.java
new file mode 100644
index 0000000..c4ae7d4
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/LengthFilter.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO: forked from analysis module; can we share?
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+/**
+ * Removes words that are too long or too short from the stream.
+ * <p>
+ * Note: Length is calculated as the number of UTF-16 code units.
+ * </p>
+ */
+final class LengthFilter extends FilteringTokenFilter {
+
+  private final int min;
+  private final int max;
+
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+
+  /**
+   * Create a new {@link LengthFilter}. This will filter out tokens whose
+   * {@link CharTermAttribute} is either too short ({@link CharTermAttribute#length()}
+   * &lt; min) or too long ({@link CharTermAttribute#length()} &gt; max).
+   * @param in      the {@link TokenStream} to consume
+   * @param min     the minimum length
+   * @param max     the maximum length
+   */
+  public LengthFilter(TokenStream in, int min, int max) {
+    super(in);
+    if (min < 0) {
+      throw new IllegalArgumentException("minimum length must be greater than or equal to zero");
+    }
+    if (min > max) {
+      throw new IllegalArgumentException("maximum length must not be greater than minimum length");
+    }
+    this.min = min;
+    this.max = max;
+  }
+
+  @Override
+  public boolean accept() {
+    final int len = termAtt.length();
+    return (len >= min && len <= max);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/LimitTokenCountFilter.java b/lucene/core/src/java/org/apache/lucene/document/LimitTokenCountFilter.java
new file mode 100644
index 0000000..4c33e01
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/LimitTokenCountFilter.java
@@ -0,0 +1,97 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+
+// TODO: forked from analysis module; can we share?
+
+/**
+ * This TokenFilter limits the number of tokens while indexing. It is
+ * a replacement for the maximum field length setting inside {@link org.apache.lucene.index.IndexWriter}.
+ * <p>
+ * By default, this filter ignores any tokens in the wrapped {@code TokenStream}
+ * once the limit has been reached, which can result in {@code reset()} being 
+ * called prior to {@code incrementToken()} returning {@code false}.  For most 
+ * {@code TokenStream} implementations this should be acceptable, and faster 
+ * then consuming the full stream. If you are wrapping a {@code TokenStream} 
+ * which requires that the full stream of tokens be exhausted in order to 
+ * function properly, use the 
+ * {@link #LimitTokenCountFilter(TokenStream,int,boolean) consumeAllTokens} 
+ * option.
+ */
+final class LimitTokenCountFilter extends TokenFilter {
+
+  private final int maxTokenCount;
+  private final boolean consumeAllTokens;
+  private int tokenCount = 0;
+  private boolean exhausted = false;
+
+  /**
+   * Build a filter that only accepts tokens up to a maximum number.
+   * This filter will not consume any tokens beyond the maxTokenCount limit
+   *
+   * @see #LimitTokenCountFilter(TokenStream,int,boolean)
+   */
+  public LimitTokenCountFilter(TokenStream in, int maxTokenCount) {
+    this(in, maxTokenCount, false);
+  }
+
+  /**
+   * Build an filter that limits the maximum number of tokens per field.
+   * @param in the stream to wrap
+   * @param maxTokenCount max number of tokens to produce
+   * @param consumeAllTokens whether all tokens from the input must be consumed even if maxTokenCount is reached.
+   */
+  public LimitTokenCountFilter(TokenStream in, int maxTokenCount, boolean consumeAllTokens) {
+    super(in);
+    if (maxTokenCount < 1) {
+      throw new IllegalArgumentException("maxTokenCount must be greater than zero");
+    }
+    this.maxTokenCount = maxTokenCount;
+    this.consumeAllTokens = consumeAllTokens;
+  }
+  
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (exhausted) {
+      return false;
+    } else if (tokenCount < maxTokenCount) {
+      if (input.incrementToken()) {
+        tokenCount++;
+        return true;
+      } else {
+        exhausted = true;
+        return false;
+      }
+    } else {
+      while (consumeAllTokens && input.incrementToken()) { /* NOOP */ }
+      return false;
+    }
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    tokenCount = 0;
+    exhausted = false;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/LongField.java b/lucene/core/src/java/org/apache/lucene/document/LongField.java
deleted file mode 100644
index 9dad385..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/LongField.java
+++ /dev/null
@@ -1,182 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.NumericTokenStream; // javadocs
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.search.NumericRangeFilter; // javadocs
-import org.apache.lucene.search.NumericRangeQuery; // javadocs
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>long</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LongField(name, 6L, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LongField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LongField field = new LongField(name, 0L, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setLongValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link IntField}, {@link FloatField}, {@link
- * DoubleField}.
- *
- * Any type that can be converted to long can also be
- * indexed.  For example, date/time values represented by a
- * {@link java.util.Date} can be translated into a long
- * value using the {@link java.util.Date#getTime} method.  If you
- * don't need millisecond precision, you can quantize the
- * value, either by dividing the result of
- * {@link java.util.Date#getTime} or using the separate getters
- * (for year, month, etc.) to construct an <code>int</code> or
- * <code>long</code> value.</p>
- *
- * <p>To perform range querying or filtering against a
- * <code>LongField</code>, use {@link NumericRangeQuery} or {@link
- * NumericRangeFilter}.  To sort according to a
- * <code>LongField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LongField</code> 
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LongField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LongField</code>.</p>
- *
- * <p>A <code>LongField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 4, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * NumericRangeQuery} or {@link NumericRangeFilter}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link NumericRangeQuery}. The format of
- * indexed values is described in {@link NumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * NumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @since 2.9
- */
-
-public final class LongField extends Field {
-  
-  /** 
-   * Type for a LongField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.NumericType.LONG);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LongField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.NumericType.LONG);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LongField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  NumericUtils#PRECISION_STEP_DEFAULT} (16). 
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LongField(String name, long value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Long.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link FieldType.NumericType#LONG}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a LONG numericType()
-   */
-  public LongField(String name, long value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.NumericType.LONG) {
-      throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
-    }
-    fieldsData = Long.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
deleted file mode 100644
index 5d044b7..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
+++ /dev/null
@@ -1,57 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocValuesType;
-
-/**
- * <p>
- * Field that stores a per-document <code>long</code> value for scoring, 
- * sorting or value retrieval. Here's an example usage:
- * 
- * <pre class="prettyprint">
- *   document.add(new NumericDocValuesField(name, 22L));
- * </pre>
- * 
- * <p>
- * If you also need to store the value, you should add a
- * separate {@link StoredField} instance.
- * */
-
-public class NumericDocValuesField extends Field {
-
-  /**
-   * Type for numeric DocValues.
-   */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setDocValuesType(DocValuesType.NUMERIC);
-    TYPE.freeze();
-  }
-
-  /** 
-   * Creates a new DocValues field with the specified 64-bit long value 
-   * @param name field name
-   * @param value 64-bit long value
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public NumericDocValuesField(String name, long value) {
-    super(name, TYPE);
-    fieldsData = Long.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/SingleTokenTokenizer.java b/lucene/core/src/java/org/apache/lucene/document/SingleTokenTokenizer.java
new file mode 100644
index 0000000..50fbd98
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/SingleTokenTokenizer.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+
+// TODO: forked from analysis module; can we share?
+
+final class SingleTokenTokenizer extends Tokenizer {
+  /** Default read buffer size */ 
+  public static final int DEFAULT_BUFFER_SIZE = 256;
+
+  private boolean done = false;
+  private int finalOffset;
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  
+  public SingleTokenTokenizer() {
+    this(DEFAULT_BUFFER_SIZE);
+  }
+
+  SingleTokenTokenizer(int bufferSize) {
+    if (bufferSize <= 0) {
+      throw new IllegalArgumentException("bufferSize must be > 0");
+    }
+    termAtt.resizeBuffer(bufferSize);
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (!done) {
+      clearAttributes();
+      done = true;
+      int upto = 0;
+      char[] buffer = termAtt.buffer();
+      while (true) {
+        final int length = input.read(buffer, upto, buffer.length-upto);
+        if (length == -1) break;
+        upto += length;
+        if (upto == buffer.length)
+          buffer = termAtt.resizeBuffer(1+buffer.length);
+      }
+      termAtt.setLength(upto);
+      finalOffset = correctOffset(upto);
+      offsetAtt.setOffset(correctOffset(0), finalOffset);
+      return true;
+    }
+    return false;
+  }
+  
+  @Override
+  public void end() throws IOException {
+    super.end();
+    // set final offset 
+    offsetAtt.setOffset(finalOffset, finalOffset);
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    this.done = false;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortKeyComparator.java b/lucene/core/src/java/org/apache/lucene/document/SortKeyComparator.java
new file mode 100644
index 0000000..8e24783
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/SortKeyComparator.java
@@ -0,0 +1,142 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.SimpleFieldComparator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+
+/** Sorts a field by a provided deferenced sort key. */
+class SortKeyComparator extends SimpleFieldComparator<BytesRef> {
+
+  // TODO: we could cache the sort keys...
+  private final BytesRef[] values;
+  private final BytesRefBuilder[] tempBRs;
+  private BinaryDocValues docTerms;
+  private Bits docsWithField;
+  private final String field;
+  private BytesRef bottom;
+  private BytesRef topValue;
+  private final int missingSortCmp;
+  private final FieldTypes.SortKey sortKey;
+
+  /** Sole constructor. */
+  public SortKeyComparator(int numHits, String field, boolean sortMissingLast, FieldTypes.SortKey sortKey) {
+    values = new BytesRef[numHits];
+    tempBRs = new BytesRefBuilder[numHits];
+    this.sortKey = sortKey;
+    this.field = field;
+    missingSortCmp = sortMissingLast ? 1 : -1;
+  }
+
+  @Override
+  public int compare(int slot1, int slot2) {
+    return compareValues(values[slot1], values[slot2]);
+  }
+
+  @Override
+  public int compareBottom(int doc) {
+    final BytesRef comparableBytes = getComparableBytes(doc, docTerms.get(doc));
+    return compareValues(bottom, comparableBytes);
+  }
+
+  @Override
+  public void copy(int slot, int doc) {
+    final BytesRef comparableBytes = getComparableBytes(doc, docTerms.get(doc));
+    if (comparableBytes == null) {
+      values[slot] = null;
+    } else {
+      if (tempBRs[slot] == null) {
+        tempBRs[slot] = new BytesRefBuilder();
+      }
+      tempBRs[slot].copyBytes(comparableBytes);
+      values[slot] = tempBRs[slot].get();
+    }
+  }
+
+  /** Check whether the given value represents <tt>null</tt>. This can be
+   *  useful if the {@link BinaryDocValues} returned by {@link #getBinaryDocValues}
+   *  use a special value as a sentinel. The default implementation checks
+   *  {@link #getDocsWithField}.
+   *  <p>NOTE: The null value can only be an EMPTY {@link BytesRef}. */
+  protected boolean isNull(int doc, BytesRef term) {
+    return docsWithField != null && docsWithField.get(doc) == false;
+  }
+
+  @Override
+  public void doSetNextReader(LeafReaderContext context) throws IOException {
+    docTerms = DocValues.getBinary(context.reader(), field);
+    docsWithField = DocValues.getDocsWithField(context.reader(), field);
+    if (docsWithField instanceof Bits.MatchAllBits) {
+      docsWithField = null;
+    }
+  }
+    
+  @Override
+  public void setBottom(final int bottom) {
+    this.bottom = values[bottom];
+  }
+
+  @Override
+  public void setTopValue(BytesRef value) {
+    // null is fine: it means the last doc of the prior
+    // search was missing this value
+    topValue = value;
+  }
+
+  @Override
+  public BytesRef value(int slot) {
+    return values[slot];
+  }
+
+  @Override
+  public int compareValues(BytesRef val1, BytesRef val2) {
+    if (val1 == null) {
+      if (val2 == null) {
+        return 0;
+      }
+      return missingSortCmp;
+    } else if (val2 == null) {
+      return -missingSortCmp;
+    }
+    return sortKey.getKey(val1).compareTo(sortKey.getKey(val2));
+  }
+
+  @Override
+  public int compareTop(int doc) {
+    final BytesRef comparableBytes = getComparableBytes(doc, docTerms.get(doc));
+    return compareValues(topValue, comparableBytes);
+  }
+
+  /**
+   * Given a document and a term, return the term itself if it exists or
+   * <tt>null</tt> otherwise.
+   */
+  private BytesRef getComparableBytes(int doc, BytesRef term) {
+    if (term.length == 0 && docsWithField != null && docsWithField.get(doc) == false) {
+      return null;
+    }
+    return term;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
deleted file mode 100644
index 46966fd..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
+++ /dev/null
@@ -1,60 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * <p>
- * Field that stores
- * a per-document {@link BytesRef} value, indexed for
- * sorting.  Here's an example usage:
- * 
- * <pre class="prettyprint">
- *   document.add(new SortedDocValuesField(name, new BytesRef("hello")));
- * </pre>
- * 
- * <p>
- * If you also need to store the value, you should add a
- * separate {@link StoredField} instance.
- * 
- * */
-
-public class SortedDocValuesField extends Field {
-
-  /**
-   * Type for sorted bytes DocValues
-   */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setDocValuesType(DocValuesType.SORTED);
-    TYPE.freeze();
-  }
-
-  /**
-   * Create a new sorted DocValues field.
-   * @param name field name
-   * @param bytes binary content
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public SortedDocValuesField(String name, BytesRef bytes) {
-    super(name, TYPE);
-    fieldsData = bytes;
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
deleted file mode 100644
index 772e830..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
+++ /dev/null
@@ -1,67 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <p>
- * Field that stores a per-document <code>long</code> values for scoring, 
- * sorting or value retrieval. Here's an example usage:
- * 
- * <pre class="prettyprint">
- *   document.add(new SortedNumericDocValuesField(name, 5L));
- *   document.add(new SortedNumericDocValuesField(name, 14L));
- * </pre>
- * 
- * <p>
- * Note that if you want to encode doubles or floats with proper sort order,
- * you will need to encode them with {@link NumericUtils}:
- * 
- * <pre class="prettyprint">
- *   document.add(new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(-5.3f)));
- * </pre>
- * 
- * <p>
- * If you also need to store the value, you should add a
- * separate {@link StoredField} instance.
- * */
-
-public class SortedNumericDocValuesField extends Field {
-
-  /**
-   * Type for sorted numeric DocValues.
-   */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC);
-    TYPE.freeze();
-  }
-
-  /** 
-   * Creates a new DocValues field with the specified 64-bit long value 
-   * @param name field name
-   * @param value 64-bit long value
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public SortedNumericDocValuesField(String name, long value) {
-    super(name, TYPE);
-    fieldsData = Long.valueOf(value);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
deleted file mode 100644
index 23e635d..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
+++ /dev/null
@@ -1,61 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * <p>
- * Field that stores
- * a set of per-document {@link BytesRef} values, indexed for
- * faceting,grouping,joining.  Here's an example usage:
- * 
- * <pre class="prettyprint">
- *   document.add(new SortedSetDocValuesField(name, new BytesRef("hello")));
- *   document.add(new SortedSetDocValuesField(name, new BytesRef("world")));
- * </pre>
- * 
- * <p>
- * If you also need to store the value, you should add a
- * separate {@link StoredField} instance.
- * 
- * */
-
-public class SortedSetDocValuesField extends Field {
-
-  /**
-   * Type for sorted bytes DocValues
-   */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setDocValuesType(DocValuesType.SORTED_SET);
-    TYPE.freeze();
-  }
-
-  /**
-   * Create a new sorted DocValues field.
-   * @param name field name
-   * @param bytes binary content
-   * @throws IllegalArgumentException if the field name is null
-   */
-  public SortedSetDocValuesField(String name, BytesRef bytes) {
-    super(name, TYPE);
-    fieldsData = bytes;
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/StoredField.java b/lucene/core/src/java/org/apache/lucene/document/StoredField.java
deleted file mode 100644
index d0fc08d..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/StoredField.java
+++ /dev/null
@@ -1,169 +0,0 @@
-package org.apache.lucene.document;
-
-import org.apache.lucene.index.IndexReader; // javadocs
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.search.IndexSearcher; // javadocs
-import org.apache.lucene.util.BytesRef;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** A field whose value is stored so that {@link
- *  IndexSearcher#doc} and {@link IndexReader#document} will
- *  return the field and its value. */
-public class StoredField extends Field {
-
-  /**
-   * Type for a stored-only field.
-   */
-  public final static FieldType TYPE;
-  static {
-    TYPE = new FieldType();
-    TYPE.setStored(true);
-    TYPE.freeze();
-  }
-
-  /**
-   * Expert: allows you to customize the {@link
-   * FieldType}.
-   * @param name field name
-   * @param type custom {@link FieldType} for this field
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  protected StoredField(String name, FieldType type) {
-    super(name, type);
-  }
-  
-  /**
-   * Expert: allows you to customize the {@link
-   * FieldType}.
-   * <p>NOTE: the provided byte[] is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param bytes byte array pointing to binary content (not copied)
-   * @param type custom {@link FieldType} for this field
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, BytesRef bytes, FieldType type) {
-    super(name, bytes, type);
-  }
-  
-  /**
-   * Create a stored-only field with the given binary value.
-   * <p>NOTE: the provided byte[] is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param value byte array pointing to binary content (not copied)
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, byte[] value) {
-    super(name, value, TYPE);
-  }
-  
-  /**
-   * Create a stored-only field with the given binary value.
-   * <p>NOTE: the provided byte[] is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param value byte array pointing to binary content (not copied)
-   * @param offset starting position of the byte array
-   * @param length valid length of the byte array
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, byte[] value, int offset, int length) {
-    super(name, value, offset, length, TYPE);
-  }
-
-  /**
-   * Create a stored-only field with the given binary value.
-   * <p>NOTE: the provided BytesRef is not copied so be sure
-   * not to change it until you're done with this field.
-   * @param name field name
-   * @param value BytesRef pointing to binary content (not copied)
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, BytesRef value) {
-    super(name, value, TYPE);
-  }
-
-  /**
-   * Create a stored-only field with the given string value.
-   * @param name field name
-   * @param value string value
-   * @throws IllegalArgumentException if the field name or value is null.
-   */
-  public StoredField(String name, String value) {
-    super(name, value, TYPE);
-  }
-  
-  /**
-   * Expert: allows you to customize the {@link
-   * FieldType}.
-   * @param name field name
-   * @param value string value
-   * @param type custom {@link FieldType} for this field
-   * @throws IllegalArgumentException if the field name or value is null.
-   */
-  public StoredField(String name, String value, FieldType type) {
-    super(name, value, type);
-  }
-
-  // TODO: not great but maybe not a big problem?
-  /**
-   * Create a stored-only field with the given integer value.
-   * @param name field name
-   * @param value integer value
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, int value) {
-    super(name, TYPE);
-    fieldsData = value;
-  }
-
-  /**
-   * Create a stored-only field with the given float value.
-   * @param name field name
-   * @param value float value
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, float value) {
-    super(name, TYPE);
-    fieldsData = value;
-  }
-
-  /**
-   * Create a stored-only field with the given long value.
-   * @param name field name
-   * @param value long value
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, long value) {
-    super(name, TYPE);
-    fieldsData = value;
-  }
-
-  /**
-   * Create a stored-only field with the given double value.
-   * @param name field name
-   * @param value double value
-   * @throws IllegalArgumentException if the field name is null.
-   */
-  public StoredField(String name, double value) {
-    super(name, TYPE);
-    fieldsData = value;
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/StringField.java b/lucene/core/src/java/org/apache/lucene/document/StringField.java
deleted file mode 100644
index e960307..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/StringField.java
+++ /dev/null
@@ -1,60 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.IndexOptions;
-
-/** A field that is indexed but not tokenized: the entire
- *  String value is indexed as a single token.  For example
- *  this might be used for a 'country' field or an 'id'
- *  field, or any field that you intend to use for sorting
- *  or access through the field cache. */
-
-public final class StringField extends Field {
-
-  /** Indexed, not tokenized, omits norms, indexes
-   *  DOCS_ONLY, not stored. */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-
-  /** Indexed, not tokenized, omits norms, indexes
-   *  DOCS_ONLY, stored */
-  public static final FieldType TYPE_STORED = new FieldType();
-
-  static {
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setTokenized(false);
-    TYPE_NOT_STORED.freeze();
-
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.setTokenized(false);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a new StringField. 
-   *  @param name field name
-   *  @param value String value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name or value is null.
-   */
-  public StringField(String name, String value, Store stored) {
-    super(name, value, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java b/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java
new file mode 100644
index 0000000..e1b17e9
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java
@@ -0,0 +1,73 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+
+final class StringTokenStream extends TokenStream {
+  private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+  private boolean used = false;
+  private String value = null;
+    
+  /** Creates a new TokenStream that returns a String as single token.
+   * <p>Warning: Does not initialize the value, you must call
+   * {@link #setValue(String)} afterwards!
+   */
+  StringTokenStream() {
+  }
+    
+  /** Sets the string value. */
+  void setValue(String value) {
+    this.value = value;
+  }
+
+  @Override
+  public boolean incrementToken() {
+    if (used) {
+      return false;
+    }
+    clearAttributes();
+    termAttribute.append(value);
+    offsetAttribute.setOffset(0, value.length());
+    used = true;
+    return true;
+  }
+
+  @Override
+  public void end() throws IOException {
+    super.end();
+    final int finalOffset = value.length();
+    offsetAttribute.setOffset(finalOffset, finalOffset);
+  }
+    
+  @Override
+  public void reset() {
+    used = false;
+  }
+
+  @Override
+  public void close() {
+    value = null;
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/document/TextField.java b/lucene/core/src/java/org/apache/lucene/document/TextField.java
deleted file mode 100644
index b6a3053..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/TextField.java
+++ /dev/null
@@ -1,79 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.Reader;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.index.IndexOptions;
-
-/** A field that is indexed and tokenized, without term
- *  vectors.  For example this would be used on a 'body'
- *  field, that contains the bulk of a document's text. */
-
-public final class TextField extends Field {
-
-  /** Indexed, tokenized, not stored. */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-
-  /** Indexed, tokenized, stored. */
-  public static final FieldType TYPE_STORED = new FieldType();
-
-  static {
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.freeze();
-
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  // TODO: add sugar for term vectors...?
-
-  /** Creates a new un-stored TextField with Reader value. 
-   * @param name field name
-   * @param reader reader value
-   * @throws IllegalArgumentException if the field name is null
-   * @throws NullPointerException if the reader is null
-   */
-  public TextField(String name, Reader reader) {
-    super(name, reader, TYPE_NOT_STORED);
-  }
-
-  /** Creates a new TextField with String value. 
-   * @param name field name
-   * @param value string value
-   * @param store Store.YES if the content should also be stored
-   * @throws IllegalArgumentException if the field name or value is null.
-   */
-  public TextField(String name, String value, Store store) {
-    super(name, value, store == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-  }
-  
-  /** Creates a new un-stored TextField with TokenStream value. 
-   * @param name field name
-   * @param stream TokenStream value
-   * @throws IllegalArgumentException if the field name is null.
-   * @throws NullPointerException if the tokenStream is null
-   */
-  public TextField(String name, TokenStream stream) {
-    super(name, stream, TYPE_NOT_STORED);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/AlwaysForceMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/AlwaysForceMergePolicy.java
new file mode 100644
index 0000000..dd4b55e
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/AlwaysForceMergePolicy.java
@@ -0,0 +1,84 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+/** Wraps another {@link MergePolicy}, except when forceMerge is requested, if the wrapped merge
+ *  policy woulid do nothing, this one will always merge the one segment. */
+public class AlwaysForceMergePolicy extends MergePolicy {
+
+  /** Wrapped {@link MergePolicy}. */
+  protected final MergePolicy in;
+
+  /** Wrap the given {@link MergePolicy} and intercept forceMerge requests to
+   * only upgrade segments written with previous Lucene versions. */
+  public AlwaysForceMergePolicy(MergePolicy in) {
+    this.in = in;
+  }
+
+  private boolean didForceMerge;
+  
+  @Override
+  public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
+    return in.findMerges(null, segmentInfos, writer);
+  }
+
+  /** Call this to "force" a force merge again. */
+  public void reset() {
+    didForceMerge = false;
+  }
+  
+  @Override
+  public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
+    MergeSpecification spec = in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer);
+    if (spec == null && didForceMerge == false) {
+      didForceMerge = true;
+      List<SegmentCommitInfo> infos = new ArrayList<>();
+      for(SegmentCommitInfo info : segmentInfos) {
+        infos.add(info);
+      }
+      if (infos.isEmpty()) {
+        spec = null;
+      } else {
+        spec = new MergeSpecification();
+        spec.add(new OneMerge(infos));
+      }
+    }
+    return spec;
+  }
+  
+  @Override
+  public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
+    return in.findForcedDeletesMerges(segmentInfos, writer);
+  }
+  
+  @Override
+  public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
+    return in.useCompoundFile(segments, newSegment, writer);
+  }
+  
+  @Override
+  public String toString() {
+    return "[" + getClass().getSimpleName() + "->" + in + "]";
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
index 1f52b92..185f651 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
@@ -22,6 +22,8 @@
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.lucene.document.FieldTypes;
+
 /** Base class for implementing {@link CompositeReader}s based on an array
  * of sub-readers. The implementing class has to add code for
  * correctly refcounting and closing the sub-readers.
@@ -56,6 +58,8 @@
    * for effectiveness the array is used internally. */
   private final List<R> subReadersList;
 
+  private final FieldTypes fieldTypes;
+
   /**
    * Constructs a {@code BaseCompositeReader} on the given subReaders.
    * @param subReaders the wrapped sub-readers. This array is returned by
@@ -67,11 +71,14 @@
   protected BaseCompositeReader(R[] subReaders) {
     this.subReaders = subReaders;
     this.subReadersList = Collections.unmodifiableList(Arrays.asList(subReaders));
+    fieldTypes = new FieldTypes(null);
     starts = new int[subReaders.length + 1];    // build starts array
     int maxDoc = 0, numDocs = 0;
     for (int i = 0; i < subReaders.length; i++) {
       starts[i] = maxDoc;
       final IndexReader r = subReaders[i];
+      // Throws exc if the sub-reader field types are inconsistent:
+      fieldTypes.addAll(r.getFieldTypes());
       maxDoc += r.maxDoc();      // compute maxDocs
       if (maxDoc < 0 /* overflow */ || maxDoc > IndexWriter.getActualMaxDocs()) {
         throw new IllegalArgumentException("Too many documents, composite IndexReaders cannot exceed " + IndexWriter.getActualMaxDocs());
@@ -85,6 +92,11 @@
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
+  @Override
   public final Fields getTermVectors(int docID) throws IOException {
     ensureOpen();
     final int i = readerIndex(docID);        // find subreader num
diff --git a/lucene/core/src/java/org/apache/lucene/index/BinaryDocValuesFieldUpdates.java b/lucene/core/src/java/org/apache/lucene/index/BinaryDocValuesFieldUpdates.java
index a5b817f..2add260 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BinaryDocValuesFieldUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BinaryDocValuesFieldUpdates.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 5b993e2..272c989 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -56,6 +56,8 @@
 
 class BufferedUpdatesStream implements Accountable {
 
+  // TODO: if IW knows the field is unique, we can be faster here
+
   // TODO: maybe linked list?
   private final List<FrozenBufferedUpdates> updates = new ArrayList<>();
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 8963535..3dc1191 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -25,13 +25,17 @@
 import java.text.NumberFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Deque;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.CheckIndex.Status.DocValuesStatus;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.AlreadyClosedException;
@@ -51,6 +55,7 @@
 import org.apache.lucene.util.LongBitSet;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.Version;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 
 /**
  * Basic tool and API to check the health of an index and
@@ -139,6 +144,9 @@
     /** Holds the userData of the last commit in the index */
     public Map<String, String> userData;
 
+    /** How many docs violate unique field constraints and would be deleted with -exorcise. */
+    public int nonUniqueCount;
+
     /** Holds the status of each segment in the index.
      *  See {@link #segmentInfos}.
      *
@@ -472,6 +480,8 @@
       return result;
     }
 
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(sis, null, null);
+
     // find the oldest and newest segment versions
     Version oldest = null;
     Version newest = null;
@@ -531,8 +541,10 @@
     result.numSegments = numSegments;
     result.userData = sis.getUserData();
     String userDataString;
-    if (sis.getUserData().size() > 0) {
-      userDataString = " userData=" + sis.getUserData();
+    Map<String,String> userData = new HashMap<String,String>(sis.getUserData());
+    userData.remove(FieldTypes.FIELD_TYPES_KEY);
+    if (userData.isEmpty() == false) {
+      userDataString = " userData=" + userData;
     } else {
       userDataString = "";
     }
@@ -569,11 +581,18 @@
       return result;
     }
 
-
     result.newSegments = sis.clone();
     result.newSegments.clear();
+
+    // Carry over field types:
+    userData = sis.getUserData();
+    if (userData.containsKey(FieldTypes.FIELD_TYPES_KEY)) {
+      result.newSegments.getUserData().put(FieldTypes.FIELD_TYPES_KEY, userData.get(FieldTypes.FIELD_TYPES_KEY));
+    }
     result.maxSegmentName = -1;
 
+    IndexReader[] segmentReaders = new IndexReader[numSegments];
+
     for(int i=0;i<numSegments;i++) {
       final SegmentCommitInfo info = sis.info(i);
       int segmentName = Integer.parseInt(info.info.name.substring(1), Character.MAX_RADIX);
@@ -627,8 +646,9 @@
         }
         if (infoStream != null)
           infoStream.print("    test: open reader.........");
-        reader = new SegmentReader(info, IOContext.DEFAULT);
+        reader = new SegmentReader(fieldTypes, info, IOContext.DEFAULT);
         msg(infoStream, "OK");
+        segmentReaders[i] = reader;
 
         segInfoStat.openReaderPassed = true;
         
@@ -719,21 +739,82 @@
         result.totLoseDocCount += toLoseDocCount;
         result.numBadSegments++;
         continue;
-      } finally {
-        if (reader != null)
-          reader.close();
       }
 
       // Keeper
       result.newSegments.add(info.clone());
     }
 
-    if (0 == result.numBadSegments) {
-      result.clean = true;
-    } else
-      msg(infoStream, "WARNING: " + result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents) detected");
+    if (onlySegments == null && result.numBadSegments == 0) {
+      MultiReader topReader = new MultiReader(segmentReaders);
+      try {
+        int nonUniqueCount = 0;
+        String nonUniqueMessage = null;
 
-    if ( ! (result.validCounter = (result.maxSegmentName < sis.counter))) {
+        for(String fieldName : fieldTypes.getFieldNames()) {
+          if (fieldTypes.getIsUnique(fieldName)) {
+            Terms terms = MultiFields.getTerms(topReader, fieldName);
+            if (terms != null) {
+              Bits liveDocs = MultiFields.getLiveDocs(topReader);
+              TermsEnum termsEnum = terms.iterator(null);
+              DocsEnum docsEnum = null;
+              while (termsEnum.next() != null) {
+                docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
+                int docID = docsEnum.nextDoc();
+                if (docID != DocsEnum.NO_MORE_DOCS) {
+                  while (true) {
+                    int docID2 = docsEnum.nextDoc();
+                    if (docID2 != DocsEnum.NO_MORE_DOCS) {
+                      if (nonUniqueCount == 0) {
+                        nonUniqueMessage = "field=\"" + fieldName + "\" is supposed to be unique, but isn't: e.g. term=" + termsEnum.term() + " matches both docID=" + docID + " and docID=" + docID2;
+                        if (failFast) {
+                          msg(infoStream, "FAILED");
+                          msg(infoStream, nonUniqueMessage);
+                          throw new RuntimeException(nonUniqueMessage);
+                        }
+                      }
+                      nonUniqueCount++;
+                    } else {
+                      break;
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+
+        if (nonUniqueCount != 0) {
+          nonUniqueMessage += "; total " + nonUniqueCount + " non-unique documents would be deleted";
+          result.nonUniqueCount = nonUniqueCount;
+          msg(infoStream, "FAILED");
+          msg(infoStream, nonUniqueMessage);
+        }
+      } finally {
+        topReader.close();
+      }
+    }
+
+    if (0 == result.numBadSegments && 0 == result.nonUniqueCount) {
+      result.clean = true;
+    } else {
+      StringBuilder whatsWrong = new StringBuilder();
+      if (result.numBadSegments != 0) {
+        whatsWrong.append(result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents)");
+      }
+      if (result.nonUniqueCount != 0) {
+        if (whatsWrong.length() != 0) {
+          whatsWrong.append(", and up to ");
+        }
+        whatsWrong.append(result.nonUniqueCount + " non-unique documents");
+      }
+        
+      msg(infoStream, "WARNING: " + whatsWrong.toString());
+    }
+
+    result.validCounter = result.maxSegmentName < sis.counter;
+
+    if (result.validCounter == false) {
       result.clean = false;
       result.newSegments.counter = result.maxSegmentName + 1; 
       msg(infoStream, "ERROR: Next segment name counter " + sis.counter + " is not greater than max segment name " + result.maxSegmentName);
@@ -872,6 +953,181 @@
     return status;
   }
 
+  /** Visits all terms in the range minTerm (inclusive) to maxTerm (exclusive), marking all doc IDs encountered into allDocsSeen, and
+   *  returning the total number of terms visited. */
+  private static long getDocsFromTermRange(String field, int maxDoc, TermsEnum termsEnum, FixedBitSet docsSeen, BytesRef minTerm, BytesRef maxTerm, boolean isIntersect) throws IOException {
+    docsSeen.clear(0, docsSeen.length());
+
+    long termCount = 0;
+    DocsEnum docsEnum = null;
+    BytesRefBuilder lastTerm = null;
+    while (true) {
+      BytesRef term;
+
+      // Kinda messy: for intersect, we must first next(), but for "normal", we are already on our first term:
+      if (isIntersect || termCount != 0) {
+        term = termsEnum.next();
+      } else {
+        term = termsEnum.term();
+      }
+
+      if (term == null) {
+        if (isIntersect == false) {
+          throw new RuntimeException("didn't see max term field=" + field + " term=" + maxTerm);
+        }
+        return termCount;
+      }
+
+      assert term.isValid();
+        
+      if (lastTerm == null) {
+        lastTerm = new BytesRefBuilder();
+        lastTerm.copyBytes(term);
+      } else {
+        if (lastTerm.get().compareTo(term) >= 0) {
+          throw new RuntimeException("terms out of order: lastTerm=" + lastTerm + " term=" + term);
+        }
+        lastTerm.copyBytes(term);
+      }
+
+      //System.out.println("    term=" + term);
+
+      // Caller already ensured terms enum positioned >= minTerm:
+      if (term.compareTo(minTerm) < 0) {
+        throw new RuntimeException("saw term before min term field=" + field + " term=" + minTerm);
+      }
+
+      if (isIntersect == false) {
+        int cmp = term.compareTo(maxTerm);
+        if (cmp == 0) {
+          // Done!
+          return termCount;
+        } else if (cmp > 0) {
+          throw new RuntimeException("didn't see end term field=" + field + " term=" + maxTerm);
+        }
+      }
+
+      docsEnum = termsEnum.docs(null, docsEnum, 0);
+
+      int lastDoc = -1;
+      while (true) {
+        int doc = docsEnum.nextDoc();
+        if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+          break;
+        }
+        if (doc <= lastDoc) {
+          throw new RuntimeException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
+        }
+        if (doc >= maxDoc) {
+          throw new RuntimeException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
+        }
+
+        //System.out.println("      doc=" + doc);
+        docsSeen.set(doc);
+
+        lastDoc = doc;
+      }
+
+      termCount++;
+    }
+  }
+
+  /** Test Terms.intersect on this range, and validates that it returns the same doc ids as using non-intersect TermsEnum.  Returns true if
+   *  any fake terms were seen. */
+  private static boolean checkSingleTermRange(String field, int maxDoc, Terms terms, BytesRef minTerm, BytesRef maxTerm, FixedBitSet normalDocs, FixedBitSet intersectDocs) throws IOException {
+    DocsEnum docsEnum = null;
+    // System.out.println("  check minTerm=" + minTerm + " maxTerm=" + maxTerm);
+
+    TermsEnum termsEnum = terms.iterator(null);
+    TermsEnum.SeekStatus status = termsEnum.seekCeil(minTerm);
+    if (status != TermsEnum.SeekStatus.FOUND) {
+      throw new RuntimeException("failed to seek to existing term field=" + field + " term=" + minTerm);
+    }
+
+    // Do "dumb" iteration to visit all terms in the range:
+    long normalTermCount = getDocsFromTermRange(field, maxDoc, termsEnum, normalDocs, minTerm, maxTerm, false);
+
+    // Now do the same operation using intersect:
+    long intersectTermCount = getDocsFromTermRange(field, maxDoc, terms.intersect(new CompiledAutomaton(minTerm, true, maxTerm, false), null), intersectDocs, minTerm, maxTerm, true);
+
+    if (intersectTermCount > normalTermCount) {
+      throw new RuntimeException("intersect returned too many terms: field=" + field + " intersectTermCount=" + intersectTermCount + " normalTermCount=" + normalTermCount);
+    }
+
+    if (normalDocs.equals(intersectDocs) == false) {
+      throw new RuntimeException("intersect visited different docs than straight terms enum: " + normalDocs.cardinality() + " for straight enum, vs " + intersectDocs.cardinality() + " for intersect, minTerm=" + minTerm + " maxTerm=" + maxTerm);
+    }
+    //System.out.println("    " + intersectTermCount + " vs " + normalTermCount);
+    return intersectTermCount != normalTermCount;
+  }
+
+  /** Make an effort to visit "fake" (e.g. auto-prefix) terms.  We do this by running term range intersections across an initially wide
+   *  interval of terms, at different boundaries, and then gradually decrease the interval.  This is not guaranteed to hit all non-real
+   *  terms (doing that in general is non-trivial), but it should hit many of them, and validate their postings against the postings for the
+   *  real terms. */
+  private static void checkTermRanges(String field, int maxDoc, Terms terms, long numTerms) throws IOException {
+
+    // We'll target this many terms in our interval for the current level:
+    double currentInterval = numTerms;
+
+    FixedBitSet normalDocs = new FixedBitSet(maxDoc);
+    FixedBitSet intersectDocs = new FixedBitSet(maxDoc);
+
+    TermsEnum termsEnum = null;
+    //System.out.println("CI.checkTermRanges field=" + field + " numTerms=" + numTerms);
+
+    while (currentInterval >= 10.0) {
+      //System.out.println("  cycle interval=" + currentInterval);
+
+      // We iterate this terms enum to locate min/max term for each sliding/overlapping interval we test at the current level:
+      termsEnum = terms.iterator(termsEnum);
+
+      long termCount = 0;
+
+      Deque<BytesRef> termBounds = new LinkedList<>();
+
+      long lastTermAdded = Long.MIN_VALUE;
+
+      BytesRefBuilder lastTerm = null;
+
+      while (true) {
+        BytesRef term = termsEnum.next();
+        if (term == null) {
+          break;
+        }
+        //System.out.println("  top: term=" + term.utf8ToString());
+        if (termCount >= lastTermAdded + currentInterval/4) {
+          termBounds.add(BytesRef.deepCopyOf(term));
+          lastTermAdded = termCount;
+          if (termBounds.size() == 5) {
+            BytesRef minTerm = termBounds.removeFirst();
+            BytesRef maxTerm = termBounds.getLast();
+            checkSingleTermRange(field, maxDoc, terms, minTerm, maxTerm, normalDocs, intersectDocs);
+          }
+        }
+        termCount++;
+
+        if (lastTerm == null) {
+          lastTerm = new BytesRefBuilder();
+          lastTerm.copyBytes(term);
+        } else {
+          if (lastTerm.get().compareTo(term) >= 0) {
+            throw new RuntimeException("terms out of order: lastTerm=" + lastTerm + " term=" + term);
+          }
+          lastTerm.copyBytes(term);
+        }
+      }
+
+      if (lastTerm != null && termBounds.isEmpty() == false) {
+        BytesRef minTerm = termBounds.removeFirst();
+        BytesRef maxTerm = lastTerm.get();
+        checkSingleTermRange(field, maxDoc, terms, minTerm, maxTerm, normalDocs, intersectDocs);
+      }
+
+      currentInterval *= .75;
+    }
+  }
+
   /**
    * checks Fields api is consistent with itself.
    * searcher is optional, to verify with queries. Can be null.
@@ -888,6 +1144,7 @@
     
     String lastField = null;
     for (String field : fields) {
+
       // MultiFieldsEnum relies upon this order...
       if (lastField != null && field.compareTo(lastField) <= 0) {
         throw new RuntimeException("fields out of order: lastField=" + lastField + " field=" + field);
@@ -991,7 +1248,8 @@
         if (term == null) {
           break;
         }
-
+        // System.out.println("CI: field=" + field + " check term=" + term + " docFreq=" + termsEnum.docFreq());
+        
         assert term.isValid();
         
         // make sure terms arrive in order according to
@@ -1280,6 +1538,14 @@
         // docs got deleted and then merged away):
         
       } else {
+
+        long fieldTermCount = (status.delTermCount+status.termCount)-termCountStart;
+
+        if (hasFreqs == false) {
+          // For DOCS_ONLY fields we recursively test term ranges:
+          checkTermRanges(field, maxDoc, fieldTerms, fieldTermCount);
+        }
+        
         final Object stats = fieldTerms.getStats();
         assert stats != null;
         if (status.blockTreeStats == null) {
@@ -1301,11 +1567,9 @@
           }
         }
         
-        if (fieldTerms != null) {
-          final int v = fieldTerms.getDocCount();
-          if (v != -1 && visitedDocs.cardinality() != v) {
-            throw new RuntimeException("docCount for field " + field + "=" + v + " != recomputed docCount=" + visitedDocs.cardinality());
-          }
+        final int v = fieldTerms.getDocCount();
+        if (v != -1 && visitedDocs.cardinality() != v) {
+          throw new RuntimeException("docCount for field " + field + "=" + v + " != recomputed docCount=" + visitedDocs.cardinality());
         }
         
         // Test seek to last term:
@@ -1313,6 +1577,9 @@
           if (termsEnum.seekCeil(lastTerm.get()) != TermsEnum.SeekStatus.FOUND) { 
             throw new RuntimeException("seek to last term " + lastTerm + " failed");
           }
+          if (termsEnum.term().equals(lastTerm.get()) == false) {
+            throw new RuntimeException("seek to last term " + lastTerm.get() + " returned FOUND but seeked to the wrong term " + termsEnum.term());
+          }
           
           int expectedDocFreq = termsEnum.docFreq();
           DocsEnum d = termsEnum.docs(null, null, DocsEnum.FLAG_NONE);
@@ -1321,21 +1588,21 @@
             docFreq++;
           }
           if (docFreq != expectedDocFreq) {
-            throw new RuntimeException("docFreq for last term " + lastTerm + "=" + expectedDocFreq + " != recomputed docFreq=" + docFreq);
+            throw new RuntimeException("docFreq for last term " + lastTerm.toBytesRef() + "=" + expectedDocFreq + " != recomputed docFreq=" + docFreq);
           }
         }
         
         // check unique term count
         long termCount = -1;
         
-        if ((status.delTermCount+status.termCount)-termCountStart > 0) {
+        if (fieldTermCount > 0) {
           termCount = fields.terms(field).size();
           
-          if (termCount != -1 && termCount != status.delTermCount + status.termCount - termCountStart) {
-            throw new RuntimeException("termCount mismatch " + (status.delTermCount + termCount) + " vs " + (status.termCount - termCountStart));
+          if (termCount != -1 && termCount != fieldTermCount) {
+            throw new RuntimeException("termCount mismatch " + termCount + " vs " + fieldTermCount);
           }
         }
-        
+
         // Test seeking by ord
         if (hasOrd && status.termCount-termCountStart > 0) {
           int seekCount = (int) Math.min(10000L, termCount);
@@ -1355,6 +1622,9 @@
               if (termsEnum.seekCeil(seekTerms[i]) != TermsEnum.SeekStatus.FOUND) {
                 throw new RuntimeException("seek to existing term " + seekTerms[i] + " failed");
               }
+              if (termsEnum.term().equals(seekTerms[i]) == false) {
+                throw new RuntimeException("seek to existing term " + seekTerms[i] + " returned FOUND but seeked to the wrong term " + termsEnum.term());
+              }
               
               docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
               if (docs == null) {
@@ -1488,7 +1758,7 @@
       for (int j = 0; j < reader.maxDoc(); ++j) {
         // Intentionally pull even deleted documents to
         // make sure they too are not corrupt:
-        StoredDocument doc = reader.document(j);
+        Document doc = reader.document(j);
         if (liveDocs == null || liveDocs.get(j)) {
           status.docCount++;
           status.totFields += doc.getFields().size();
@@ -1726,58 +1996,55 @@
       case SORTED:
         status.totalSortedFields++;
         checkSortedDocValues(fi.name, reader, reader.getSortedDocValues(fi.name), docsWithField);
-        if (reader.getBinaryDocValues(fi.name) != null ||
-            reader.getNumericDocValues(fi.name) != null ||
-            reader.getSortedNumericDocValues(fi.name) != null ||
-            reader.getSortedSetDocValues(fi.name) != null) {
-          throw new RuntimeException(fi.name + " returns multiple docvalues types!");
-        }
+        checkMoreThanOneDocValuesType(fi.name, reader);
         break;
       case SORTED_NUMERIC:
         status.totalSortedNumericFields++;
         checkSortedNumericDocValues(fi.name, reader, reader.getSortedNumericDocValues(fi.name), docsWithField);
-        if (reader.getBinaryDocValues(fi.name) != null ||
-            reader.getNumericDocValues(fi.name) != null ||
-            reader.getSortedSetDocValues(fi.name) != null ||
-            reader.getSortedDocValues(fi.name) != null) {
-          throw new RuntimeException(fi.name + " returns multiple docvalues types!");
-        }
+        checkMoreThanOneDocValuesType(fi.name, reader);
         break;
       case SORTED_SET:
         status.totalSortedSetFields++;
         checkSortedSetDocValues(fi.name, reader, reader.getSortedSetDocValues(fi.name), docsWithField);
-        if (reader.getBinaryDocValues(fi.name) != null ||
-            reader.getNumericDocValues(fi.name) != null ||
-            reader.getSortedNumericDocValues(fi.name) != null ||
-            reader.getSortedDocValues(fi.name) != null) {
-          throw new RuntimeException(fi.name + " returns multiple docvalues types!");
-        }
+        checkMoreThanOneDocValuesType(fi.name, reader);
         break;
       case BINARY:
         status.totalBinaryFields++;
         checkBinaryDocValues(fi.name, reader, reader.getBinaryDocValues(fi.name), docsWithField);
-        if (reader.getNumericDocValues(fi.name) != null ||
-            reader.getSortedDocValues(fi.name) != null ||
-            reader.getSortedNumericDocValues(fi.name) != null ||
-            reader.getSortedSetDocValues(fi.name) != null) {
-          throw new RuntimeException(fi.name + " returns multiple docvalues types!");
-        }
+        checkMoreThanOneDocValuesType(fi.name, reader);
         break;
       case NUMERIC:
         status.totalNumericFields++;
         checkNumericDocValues(fi.name, reader, reader.getNumericDocValues(fi.name), docsWithField);
-        if (reader.getBinaryDocValues(fi.name) != null ||
-            reader.getSortedDocValues(fi.name) != null ||
-            reader.getSortedNumericDocValues(fi.name) != null ||
-            reader.getSortedSetDocValues(fi.name) != null) {
-          throw new RuntimeException(fi.name + " returns multiple docvalues types!");
-        }
+        checkMoreThanOneDocValuesType(fi.name, reader);
         break;
       default:
         throw new AssertionError();
     }
   }
   
+  private static void checkMoreThanOneDocValuesType(String fieldName, LeafReader reader) throws IOException {
+    List<String> docValues = new ArrayList<>();
+    if (reader.getBinaryDocValues(fieldName) != null) {
+      docValues.add("BINARY");
+    }
+    if (reader.getNumericDocValues(fieldName) != null) {
+      docValues.add("NUMERIC");
+    }
+    if (reader.getSortedDocValues(fieldName) != null) {
+      docValues.add("SORTED");
+    }
+    if (reader.getSortedSetDocValues(fieldName) != null) {
+      docValues.add("SORTED_SET");
+    }
+    if (reader.getSortedNumericDocValues(fieldName) != null) {
+      docValues.add("SORTED_NUMERIC");
+    }
+    if (docValues.size() > 1) {
+      throw new RuntimeException("field=\"" + fieldName + "\" returns multiple docvalues types: " + docValues);
+    }
+  }
+  
   private static void checkNorms(FieldInfo fi, LeafReader reader, PrintStream infoStream) throws IOException {
     if (fi.hasNorms()) {
       checkNumericDocValues(fi.name, reader, reader.getNormValues(fi.name), new Bits.MatchAllBits(reader.maxDoc()));
@@ -1842,17 +2109,16 @@
           // Again, with the one doc deleted:
           checkFields(tfv, onlyDocIsDeleted, 1, fieldInfos, false, true, infoStream, verbose);
 
-          // Only agg stats if the doc is live:
-          final boolean doStats = liveDocs == null || liveDocs.get(j);
-
-          if (doStats) {
-            status.docCount++;
+          if (liveDocs != null && liveDocs.get(j) == false) {
+            // Only check live docs
+            continue;
           }
 
+          status.docCount++;
+
           for(String field : tfv) {
-            if (doStats) {
-              status.totVectors++;
-            }
+
+            status.totVectors++;
 
             // Make sure FieldInfo thinks this field is vector'd:
             final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@@ -2038,6 +2304,59 @@
       throw new IllegalArgumentException("can only exorcise an index that was fully checked (this status checked a subset of segments)");
     result.newSegments.changed();
     result.newSegments.commit(result.dir);
+
+    if (result.nonUniqueCount != 0) {
+      // Open an IndexWriter to delete all non-unique documents:
+
+      // TODO: messy that we drop & then reacquire write lock; can we xfer to writer somehow?  Or maybe make a filter dir w/ NoLockFactory?
+      IOUtils.close(writeLock);
+      IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
+      IndexReader r = null;
+      boolean success = false;
+      try {
+        int delCount = 0;
+        FieldTypes fieldTypes = w.getFieldTypes();
+        r = w.getReader();
+        for (String fieldName : fieldTypes.getFieldNames()) {
+          if (fieldTypes.getIsUnique(fieldName)) {
+            Terms terms = MultiFields.getTerms(r, fieldName);
+            if (terms != null) {
+              Bits liveDocs = MultiFields.getLiveDocs(r);
+              TermsEnum termsEnum = terms.iterator(null);
+              DocsEnum docsEnum = null;
+              while (termsEnum.next() != null) {
+                docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
+                int docID = docsEnum.nextDoc();
+                if (docID != DocsEnum.NO_MORE_DOCS) {
+                  // Delete all but the first document:
+                  while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+                    delCount++;
+                    if (w.tryDeleteDocument(r, docID) == false) {
+                      throw new RuntimeException("failed to tryDeleteDocument " + docID);
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+        if (delCount != result.nonUniqueCount) {
+          throw new RuntimeException("exorcise attempted to delete the wrong number (" + delCount + ") of documents vs expected " + result.nonUniqueCount);
+        }
+        success = true;
+      } finally {
+        if (success == false) {
+          IOUtils.closeWhileHandlingException(r);
+          w.rollback();
+        } else {
+          IOUtils.close(w, r);
+        }
+        // re-obtain write lock
+        if (!writeLock.obtain(IndexWriterConfig.WRITE_LOCK_TIMEOUT)) {
+          throw new LockObtainFailedException("Index locked for write: " + writeLock);
+        }
+      }
+    }
   }
 
   private static boolean assertsOn;
@@ -2066,7 +2385,7 @@
     segment(s).  This can be specified multiple times,
     to check more than one segment, eg <code>-segment _2
     -segment _a</code>.  You can't use this with the -exorcise
-    option.
+    option.  Note that this skips certain top-level checks.
     </ul>
 
     <p><b>WARNING</b>: <code>-exorcise</code> should only be used on an emergency basis as it will cause
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index dabbcef..ba62cf0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -28,7 +28,6 @@
 import org.apache.lucene.codecs.NormsConsumer;
 import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.StoredFieldsWriter;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.ArrayUtil;
@@ -89,6 +88,7 @@
     // aborting on any exception from this method
 
     int numDocs = state.segmentInfo.getDocCount();
+
     writeNorms(state);
     writeDocValues(state);
     
@@ -274,7 +274,7 @@
   }
 
   @Override
-  public void processDocument() throws IOException, AbortingException {
+  public void processDocument(Term delTerm) throws IOException, AbortingException {
 
     // How many indexed field names we've seen (collapses
     // multiple field instances by the same name):
@@ -291,29 +291,24 @@
 
     termsHash.startDocument();
 
-    // Invert indexed fields:
-    try {
-      for (IndexableField field : docState.doc.indexableFields()) {
-        IndexableFieldType fieldType = field.fieldType();
-        
-        // if the field omits norms, the boost cannot be indexed.
-        if (fieldType.omitNorms() && field.boost() != 1.0f) {
-          throw new UnsupportedOperationException("You cannot set an index-time boost: norms are omitted for field '" + field.name() + "'");
-        }
-        
-        PerField fp = getOrAddField(field.name(), fieldType, true);
-        boolean first = fp.fieldGen != fieldGen;
-        fp.invert(field, first);
+    fillStoredFields(docState.docID);
+    startStoredFields();
 
-        if (first) {
-          fields[fieldCount++] = fp;
-          fp.fieldGen = fieldGen;
-        }
+    boolean aborting = false;
+    try {
+      for (IndexableField field : docState.doc) {
+        fieldCount = processField(field, fieldGen, fieldCount, delTerm);
       }
+    } catch (AbortingException ae) {
+      aborting = true;
+      throw ae;
     } finally {
-      // Finish each field name seen in the document:
-      for (int i=0;i<fieldCount;i++) {
-        fields[i].finish();
+      if (aborting == false) {
+        // Finish each indexed field name seen in the document:
+        for (int i=0;i<fieldCount;i++) {
+          fields[i].finish();
+        }
+        finishStoredFields();
       }
     }
 
@@ -324,72 +319,88 @@
       // vectors are now corrupt:
       throw AbortingException.wrap(th);
     }
-
-    // Add stored fields:
-    fillStoredFields(docState.docID);
-    startStoredFields();
-
-    // TODO: clean up this loop, it's bogus that docvalues are treated as stored fields...
-    boolean abort = false;
-    try {
-      for (StorableField field : docState.doc.storableFields()) {
-        String fieldName = field.name();
-        IndexableFieldType fieldType = field.fieldType();
-      
-        verifyFieldType(fieldName, fieldType);
-        
-        PerField fp = getOrAddField(fieldName, fieldType, false);
-        if (fieldType.stored()) {
-          try {
-            storedFieldsWriter.writeField(fp.fieldInfo, field);
-          } catch (Throwable th) {
-            abort = true;
-            throw AbortingException.wrap(th);
-          }
-        }
-
-        DocValuesType dvType = fieldType.docValuesType();
-        if (dvType == null) {
-          throw new NullPointerException("docValuesType cannot be null (field: \"" + fieldName + "\")");
-        }
-        if (dvType != DocValuesType.NONE) {
-          indexDocValue(fp, dvType, field);
-        }
-      }
-    } finally {
-      if (abort == false) {
-        finishStoredFields();
-      }
-    }
   }
 
-  private static void verifyFieldType(String name, IndexableFieldType ft) {
-    if (ft.indexOptions() == null) {
-      throw new NullPointerException("IndexOptions must not be null (field: \"" + name + "\")");
+  private int processField(IndexableField field, long fieldGen, int fieldCount, Term delTerm) throws IOException, AbortingException {
+    String fieldName = field.name();
+    IndexableFieldType fieldType = field.fieldType();
+
+    PerField fp = null;
+
+    if (fieldType.indexOptions() == null) {
+      throw new NullPointerException("IndexOptions must not be null (field: \"" + fieldName + "\")");
     }
-    if (ft.indexOptions() == IndexOptions.NONE) {
-      if (ft.storeTermVectors()) {
-        throw new IllegalArgumentException("cannot store term vectors "
-                                           + "for a field that is not indexed (field=\"" + name + "\")");
+
+    // Invert indexed fields:
+    if (fieldType.indexOptions() != IndexOptions.NONE) {
+      
+      // if the field omits norms, the boost cannot be indexed.
+      if (fieldType.omitNorms() && field.boost() != 1.0f) {
+        throw new UnsupportedOperationException("You cannot set an index-time boost: norms are omitted for field '" + field.name() + "'");
       }
-      if (ft.storeTermVectorPositions()) {
-        throw new IllegalArgumentException("cannot store term vector positions "
-                                           + "for a field that is not indexed (field=\"" + name + "\")");
+      
+      fp = getOrAddField(fieldName, fieldType, true);
+      boolean first = fp.fieldGen != fieldGen;
+      fp.invert(field, first, delTerm);
+
+      if (first) {
+        fields[fieldCount++] = fp;
+        fp.fieldGen = fieldGen;
       }
-      if (ft.storeTermVectorOffsets()) {
-        throw new IllegalArgumentException("cannot store term vector offsets "
-                                           + "for a field that is not indexed (field=\"" + name + "\")");
+    } else {
+      verifyUnIndexedFieldType(fieldName, fieldType);
+    }
+
+    // Add stored fields:
+    if (fieldType.stored()) {
+      if (fp == null) {
+        fp = getOrAddField(fieldName, fieldType, false);
       }
-      if (ft.storeTermVectorPayloads()) {
-        throw new IllegalArgumentException("cannot store term vector payloads "
-                                           + "for a field that is not indexed (field=\"" + name + "\")");
+      if (fieldType.stored()) {
+        try {
+          storedFieldsWriter.writeField(fp.fieldInfo, field);
+        } catch (Throwable th) {
+          throw AbortingException.wrap(th);
+        }
       }
     }
+
+    DocValuesType dvType = fieldType.docValuesType();
+    if (dvType == null) {
+      throw new NullPointerException("docValueType cannot be null (field: \"" + fieldName + "\")");
+    }
+    if (dvType != DocValuesType.NONE) {
+      if (fp == null) {
+        fp = getOrAddField(fieldName, fieldType, false);
+      }
+      indexDocValue(fp, dvType, field);
+    }
+    
+    return fieldCount;
+  }
+
+  private static void verifyUnIndexedFieldType(String name, IndexableFieldType ft) {
+    if (ft.storeTermVectors()) {
+      throw new IllegalArgumentException("cannot store term vectors "
+                                         + "for a field that is not indexed (field=\"" + name + "\")");
+    }
+    if (ft.storeTermVectorPositions()) {
+      throw new IllegalArgumentException("cannot store term vector positions "
+                                         + "for a field that is not indexed (field=\"" + name + "\")");
+    }
+    if (ft.storeTermVectorOffsets()) {
+      throw new IllegalArgumentException("cannot store term vector offsets "
+                                         + "for a field that is not indexed (field=\"" + name + "\")");
+    }
+    if (ft.storeTermVectorPayloads()) {
+      throw new IllegalArgumentException("cannot store term vector payloads "
+                                         + "for a field that is not indexed (field=\"" + name + "\")");
+    }
   }
 
   /** Called from processDocument to index one field's doc
    *  value */
-  private void indexDocValue(PerField fp, DocValuesType dvType, StorableField field) throws IOException {
+  private void indexDocValue(PerField fp, DocValuesType dvType, IndexableField field) throws IOException {
 
     if (fp.fieldInfo.getDocValuesType() == DocValuesType.NONE) {
       // This will throw an exc if the caller tried to
@@ -406,35 +417,35 @@
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new NumericDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((NumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericValue().longValue());
+        ((NumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericDocValue().longValue());
         break;
 
       case BINARY:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new BinaryDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((BinaryDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((BinaryDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
 
       case SORTED:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((SortedDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
         
       case SORTED_NUMERIC:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedNumericDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedNumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericValue().longValue());
+        ((SortedNumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericDocValue().longValue());
         break;
 
       case SORTED_SET:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedSetDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedSetDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((SortedSetDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
 
       default:
@@ -454,7 +465,7 @@
   }
 
   /** Returns a previously created {@link PerField},
-   *  absorbing the type information from {@link FieldType},
+   *  absorbing the type information from {@link IndexableFieldType},
    *  and creates a new {@link PerField} if this field name
    *  wasn't seen yet. */
   private PerField getOrAddField(String name, IndexableFieldType fieldType, boolean invert) {
@@ -491,12 +502,17 @@
         fields = newFields;
       }
 
-    } else if (invert && fp.invertState == null) {
+    } else {
       // Messy: must set this here because e.g. FreqProxTermsWriterPerField looks at the initial
       // IndexOptions to decide what arrays it must create).  Then, we also must set it in
       // PerField.invert to allow for later downgrading of the index options:
       fp.fieldInfo.setIndexOptions(fieldType.indexOptions());
-      fp.setInvertState();
+
+      // NOTE: messy, but we must do this in case field was first seen w/o being
+      // indexed, and now is seen again, this time being indexed:
+      if (invert && fp.invertState == null) {
+        fp.setInvertState();
+      }
     }
 
     return fp;
@@ -510,6 +526,7 @@
 
     FieldInvertState invertState;
     TermsHashPerField termsHashPerField;
+    final LiveUniqueValues uniqueValues;
 
     // Non-null if this field ever had doc values in this
     // segment:
@@ -536,6 +553,7 @@
       if (invert) {
         setInvertState();
       }
+      uniqueValues = docWriter.writer.getUniqueValues(fieldInfo.name);
     }
 
     void setInvertState() {
@@ -564,15 +582,17 @@
     /** Inverts one field for one document; first is true
      *  if this is the first time we are seeing this field
      *  name in this document. */
-    public void invert(IndexableField field, boolean first) throws IOException, AbortingException {
+    public void invert(IndexableField field, boolean first, Term delTerm) throws IOException, AbortingException {
+      IndexableFieldType fieldType = field.fieldType();
       if (first) {
         // First time we're seeing this field (indexed) in
         // this document:
         invertState.reset();
+      } else {
+        invertState.position += fieldType.getPositionGap();
+        invertState.offset += fieldType.getOffsetGap();
       }
 
-      IndexableFieldType fieldType = field.fieldType();
-
       IndexOptions indexOptions = fieldType.indexOptions();
       fieldInfo.setIndexOptions(indexOptions);
 
@@ -580,8 +600,6 @@
         fieldInfo.setOmitsNorms();
       }
 
-      final boolean analyzed = fieldType.tokenized() && docState.analyzer != null;
-        
       // only bother checking offsets if something will consume them.
       // TODO: after we fix analyzers, also check if termVectorOffsets will be indexed.
       final boolean checkOffsets = indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
@@ -592,7 +610,7 @@
        * but rather a finally that takes note of the problem.
        */
       boolean succeededInProcessingField = false;
-      try (TokenStream stream = tokenStream = field.tokenStream(docState.analyzer, tokenStream)) {
+      try (TokenStream stream = tokenStream = field.tokenStream(tokenStream)) {
         // reset the TokenStream to the first token
         stream.reset();
         invertState.setAttributeSource(stream);
@@ -605,8 +623,8 @@
           // chokes on a given document), then it's
           // non-aborting and (above) this one document
           // will be marked as deleted, but still
-          // consume a docID
-
+          // consume a docID since we will have already
+          // written some if its postings into our RAM buffer.
           int posIncr = invertState.posIncrAttribute.getPositionIncrement();
           invertState.position += posIncr;
           if (invertState.position < invertState.lastPosition) {
@@ -634,8 +652,8 @@
           if (invertState.length < 0) {
             throw new IllegalArgumentException("too many tokens in field '" + field.name() + "'");
           }
-          //System.out.println("  term=" + invertState.termAttribute);
-
+          //System.out.println("  term=" + fieldInfo.name + ":" + invertState.termAttribute);
+          
           // If we hit an exception in here, we abort
           // all buffered documents since the last
           // flush, on the likelihood that the
@@ -657,6 +675,24 @@
           } catch (Throwable th) {
             throw AbortingException.wrap(th);
           }
+
+          // maybe low-schema should know "isUnique"?
+
+          if (uniqueValues != null) {
+            BytesRef token = BytesRef.deepCopyOf(invertState.termAttribute.getBytesRef());
+            if (uniqueValues.add(token) == false &&
+                (delTerm == null ||
+                 delTerm.field().equals(field.name()) == false ||
+                 delTerm.bytes().equals(token) == false)) {
+              // Unique constraint violated; document will be marked deleted above:
+              throw new NotUniqueException(field.name(), token);
+            }
+            if (stream.incrementToken()) {
+              uniqueValues.delete(token);
+              throw new IllegalArgumentException("field \"" + field.name() + "\": unique fields must have a single token");
+            }
+            break;
+          }
         }
 
         // trigger streams to perform end-of-stream operations
@@ -675,11 +711,6 @@
         }
       }
 
-      if (analyzed) {
-        invertState.position += docState.analyzer.getPositionIncrementGap(fieldInfo.name);
-        invertState.offset += docState.analyzer.getOffsetGap(fieldInfo.name);
-      }
-
       invertState.boost *= field.boost();
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
index c636444..d557f03 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -64,7 +64,7 @@
   }
   
   /**
-   * Open a near real time IndexReader from the {@link org.apache.lucene.index.IndexWriter}.
+   * Open a near real time IndexReader from the {@link IndexWriter}.
    *
    * @param writer The IndexWriter to open from
    * @param applyAllDeletes If true, all buffered deletes will
@@ -86,6 +86,19 @@
     return writer.getReader(applyAllDeletes);
   }
 
+  /** Open a near-real-time IndexReader from {@link IndexWriter}, applying all deletes.
+   * @param writer The IndexWriter to open from
+   * @return The new IndexReader
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException if there is a low-level IO error
+   *
+   * @see #open(IndexWriter,boolean)
+   *
+   */
+  public static DirectoryReader open(final IndexWriter writer) throws IOException {
+    return open(writer, true);
+  }
+
   /** Expert: returns an IndexReader reading the index in the given
    *  {@link IndexCommit}.
    * @param commit the commit point to open
@@ -388,5 +401,4 @@
    * @lucene.experimental
    */
   public abstract IndexCommit getIndexCommit() throws IOException;
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
index 5f23ce7..584f275 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
@@ -20,7 +20,7 @@
 import java.io.IOException;
 
 abstract class DocConsumer {
-  abstract void processDocument() throws IOException, AbortingException;
+  abstract void processDocument(Term delTerm) throws IOException, AbortingException;
   abstract void flush(final SegmentWriteState state) throws IOException, AbortingException;
   abstract void abort();
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java b/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
index 4a259ae..43aef14 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocValuesUpdate.java
@@ -23,7 +23,6 @@
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_HEADER;
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
 
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.RamUsageEstimator;
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
index 722b282..e4fab8d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -19,8 +19,10 @@
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Locale;
+import java.util.List;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -131,7 +133,7 @@
     flushPolicy = config.getFlushPolicy();
     this.writer = writer;
     this.events = new ConcurrentLinkedQueue<>();
-    flushControl = new DocumentsWriterFlushControl(this, config, writer.bufferedUpdatesStream);
+    flushControl = new DocumentsWriterFlushControl(this, config, writer.bufferedUpdatesStream, writer.uniqueValuesRAM);
   }
   
   synchronized boolean deleteQueries(final Query... queries) throws IOException {
@@ -147,10 +149,18 @@
   // per-DWPT map (but still must go into the global map)
   synchronized boolean deleteTerms(final Term... terms) throws IOException {
     // TODO why is this synchronized?
+    for(Term term : terms) {
+      LiveUniqueValues uniqueValues = writer.getUniqueValues(term.field());
+      if (uniqueValues != null) {
+        // We must live-delete this field:
+        uniqueValues.delete(term.bytes());
+      }
+    }
+
     final DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
     deleteQueue.addDelete(terms);
     flushControl.doOnDelete();
-    return applyAllDeletes( deleteQueue);
+    return applyAllDeletes(deleteQueue);
   }
 
   synchronized boolean updateDocValues(DocValuesUpdate... updates) throws IOException {
@@ -385,12 +395,12 @@
       final FieldInfos.Builder infos = new FieldInfos.Builder(
           writer.globalFieldNumberMap);
       state.dwpt = new DocumentsWriterPerThread(writer.newSegmentName(),
-                                                directory, config, infoStream, deleteQueue, infos,
+                                                writer, directory, deleteQueue, infos,
                                                 writer.pendingNumDocs, writer.enableTestPoints);
     }
   }
 
-  boolean updateDocuments(final Iterable<? extends IndexDocument> docs, final Analyzer analyzer,
+  boolean updateDocuments(final Iterable<? extends Iterable<? extends IndexableField>> docs,
                           final Term delTerm) throws IOException, AbortingException {
     boolean hasEvents = preUpdate();
 
@@ -407,7 +417,7 @@
       final DocumentsWriterPerThread dwpt = perThread.dwpt;
       final int dwptNumDocs = dwpt.getNumDocsInRAM();
       try {
-        dwpt.updateDocuments(docs, analyzer, delTerm);
+        dwpt.updateDocuments(docs, delTerm);
       } catch (AbortingException ae) {
         flushControl.doOnAbort(perThread);
         dwpt.abort();
@@ -427,8 +437,7 @@
     return postUpdate(flushingDWPT, hasEvents);
   }
 
-  boolean updateDocument(final IndexDocument doc, final Analyzer analyzer,
-      final Term delTerm) throws IOException, AbortingException {
+  boolean updateDocument(final Iterable<? extends IndexableField> doc, final Term delTerm) throws IOException, AbortingException {
 
     boolean hasEvents = preUpdate();
 
@@ -445,7 +454,7 @@
       final DocumentsWriterPerThread dwpt = perThread.dwpt;
       final int dwptNumDocs = dwpt.getNumDocsInRAM();
       try {
-        dwpt.updateDocument(doc, analyzer, delTerm); 
+        dwpt.updateDocument(doc, delTerm); 
       } catch (AbortingException ae) {
         flushControl.doOnAbort(perThread);
         dwpt.abort();
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
index bdf8195..250aef7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
@@ -25,6 +25,7 @@
 import java.util.Queue;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
 import org.apache.lucene.util.Accountable;
@@ -72,8 +73,9 @@
   private final LiveIndexWriterConfig config;
   private final BufferedUpdatesStream bufferedUpdatesStream;
   private final InfoStream infoStream;
+  final AtomicLong uniqueValuesRAM;
 
-  DocumentsWriterFlushControl(DocumentsWriter documentsWriter, LiveIndexWriterConfig config, BufferedUpdatesStream bufferedUpdatesStream) {
+  DocumentsWriterFlushControl(DocumentsWriter documentsWriter, LiveIndexWriterConfig config, BufferedUpdatesStream bufferedUpdatesStream, AtomicLong uniqueValuesRAM) {
     this.infoStream = config.getInfoStream();
     this.stallControl = new DocumentsWriterStallControl(config);
     this.perThreadPool = documentsWriter.perThreadPool;
@@ -82,6 +84,7 @@
     this.hardMaxBytesPerDWPT = config.getRAMPerThreadHardLimitMB() * 1024 * 1024;
     this.documentsWriter = documentsWriter;
     this.bufferedUpdatesStream = bufferedUpdatesStream;
+    this.uniqueValuesRAM = uniqueValuesRAM;
   }
 
   public synchronized long activeBytes() {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index 7116275..3d1f3f0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -25,8 +25,9 @@
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocumentsWriterDeleteQueue.DeleteSlice;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
@@ -68,11 +69,10 @@
 
   static class DocState {
     final DocumentsWriterPerThread docWriter;
-    Analyzer analyzer;
     InfoStream infoStream;
     Similarity similarity;
     int docID;
-    IndexDocument doc;
+    Iterable<? extends IndexableField> doc;
 
     DocState(DocumentsWriterPerThread docWriter, InfoStream infoStream) {
       this.docWriter = docWriter;
@@ -84,10 +84,7 @@
     }
 
     public void clear() {
-      // don't hold onto doc nor analyzer, in case it is
-      // largish:
       doc = null;
-      analyzer = null;
     }
   }
 
@@ -155,18 +152,23 @@
   final IntBlockPool.Allocator intBlockAllocator;
   private final AtomicLong pendingNumDocs;
   private final LiveIndexWriterConfig indexWriterConfig;
+  final FieldTypes fieldTypes;
+  final IndexWriter writer;
   private final boolean enableTestPoints;
   
-  public DocumentsWriterPerThread(String segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
+  public DocumentsWriterPerThread(String segmentName, IndexWriter writer, Directory directory,
+                                  DocumentsWriterDeleteQueue deleteQueue,
                                   FieldInfos.Builder fieldInfos, AtomicLong pendingNumDocs, boolean enableTestPoints) throws IOException {
     this.directoryOrig = directory;
+    this.writer = writer;
     this.directory = new TrackingDirectoryWrapper(directory);
     this.fieldInfos = fieldInfos;
-    this.indexWriterConfig = indexWriterConfig;
-    this.infoStream = infoStream;
-    this.codec = indexWriterConfig.getCodec();
+    this.indexWriterConfig = writer.config;
+    this.infoStream = indexWriterConfig.getInfoStream();
+    this.codec = writer.codec;
+    this.fieldTypes = writer.fieldTypes;
     this.docState = new DocState(this, infoStream);
-    this.docState.similarity = indexWriterConfig.getSimilarity();
+    this.docState.similarity = writer.fieldTypes.getSimilarity();
     this.pendingNumDocs = pendingNumDocs;
     bytesUsed = Counter.newCounter();
     byteBlockAllocator = new DirectTrackingAllocator(bytesUsed);
@@ -209,11 +211,16 @@
     }
   }
 
-  public void updateDocument(IndexDocument doc, Analyzer analyzer, Term delTerm) throws IOException, AbortingException {
+  public void updateDocument(Iterable<? extends IndexableField> doc, Term delTerm) throws IOException, AbortingException {
     testPoint("DocumentsWriterPerThread addDocument start");
+    if (doc instanceof Document) {
+      Document doc2 = (Document) doc;
+      if (doc2.getFieldTypes() != fieldTypes) {
+        throw new IllegalArgumentException("this document wasn't created by this writer (fieldTypes are different)");
+      }
+    }
     assert deleteQueue != null;
     docState.doc = doc;
-    docState.analyzer = analyzer;
     docState.docID = numDocsInRAM;
     if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
       infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segmentInfo.name);
@@ -228,7 +235,7 @@
     boolean success = false;
     try {
       try {
-        consumer.processDocument();
+        consumer.processDocument(delTerm);
       } finally {
         docState.clear();
       }
@@ -243,10 +250,9 @@
     finishDocument(delTerm);
   }
 
-  public int updateDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer, Term delTerm) throws IOException, AbortingException {
+  public int updateDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Term delTerm) throws IOException, AbortingException {
     testPoint("DocumentsWriterPerThread addDocuments start");
     assert deleteQueue != null;
-    docState.analyzer = analyzer;
     if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
       infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segmentInfo.name);
     }
@@ -254,13 +260,19 @@
     boolean allDocsIndexed = false;
     try {
       
-      for(IndexDocument doc : docs) {
+      for(Iterable<? extends IndexableField> doc : docs) {
         // Even on exception, the document is still added (but marked
         // deleted), so we don't need to un-reserve at that point.
         // Aborting exceptions will actually "lose" more than one
         // document, so the counter will be "wrong" in that case, but
         // it's very hard to fix (we can't easily distinguish aborting
         // vs non-aborting exceptions):
+        if (doc instanceof Document) {
+          Document doc2 = (Document) doc;
+          if (doc2.getFieldTypes() != fieldTypes) {
+            throw new IllegalArgumentException("this document wasn't created by this writer (fieldTypes are different)");
+          }
+        }
         reserveDoc();
         docState.doc = doc;
         docState.docID = numDocsInRAM;
@@ -268,7 +280,7 @@
 
         boolean success = false;
         try {
-          consumer.processDocument();
+          consumer.processDocument(delTerm);
           success = true;
         } finally {
           if (!success) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
index f20ec71..083c2a1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
@@ -233,13 +233,19 @@
      * Returns true if the {@code fieldName} exists in the map and is of the
      * same {@code dvType}.
      */
-    synchronized boolean contains(String fieldName, DocValuesType dvType) {
+    synchronized void verifyUpdateDocValuesType(String fieldName, DocValuesType dvType) {
       // used by IndexWriter.updateNumericDocValue
       if (!nameToNumber.containsKey(fieldName)) {
-        return false;
+        throw new IllegalArgumentException("can only update existing " + dvType + " field but field=\"" + fieldName + "\" does not exist");
       } else {
-        // only return true if the field has the same dvType as the requested one
-        return dvType == docValuesType.get(fieldName);
+        DocValuesType currentDVType = docValuesType.get(fieldName);
+        if (dvType != currentDVType) {
+          if (currentDVType != DocValuesType.NUMERIC && currentDVType != DocValuesType.BINARY) {
+            throw new IllegalArgumentException("can only update NUMERIC or BINARY doc values, but updated field=\"" + fieldName + "\" has docValuesType " + currentDVType);
+          } else {
+            throw new IllegalArgumentException("can only update existing " + dvType + " field but updated field=\"" + fieldName + "\" has docValuesType " + currentDVType);
+          }
+        }
       }
     }
     
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
index b2a8649..e234245 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
@@ -24,6 +24,7 @@
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 
 /** 
@@ -99,4 +100,9 @@
   public void removeCoreClosedListener(CoreClosedListener listener) {
     in.removeCoreClosedListener(listener);
   }
+
+  @Override
+  public FieldTypes getFieldTypes() {
+    return in.getFieldTypes();
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterDirectoryReader.java
index 4a4f5c4..787ca78 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterDirectoryReader.java
@@ -20,6 +20,8 @@
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.lucene.document.FieldTypes;
+
 /**
  * A FilterDirectoryReader wraps another DirectoryReader, allowing implementations
  * to transform or extend it.
@@ -134,6 +136,11 @@
     in.doClose();
   }
 
+  @Override
+  public FieldTypes getFieldTypes() {
+    return in.getFieldTypes();
+  }
+
   /** Returns the wrapped {@link DirectoryReader}. */
   public DirectoryReader getDelegate() {
     return in;
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 47422a9..062d1b7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.Iterator;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.CachingWrapperFilter;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.Bits;
@@ -469,6 +470,12 @@
     in.checkIntegrity();
   }
 
+  @Override
+  public FieldTypes getFieldTypes() {
+    ensureOpen();
+    return in.getFieldTypes();
+  }
+
   /** Returns the wrapped {@link LeafReader}. */
   public LeafReader getDelegate() {
     return in;
diff --git a/lucene/core/src/java/org/apache/lucene/index/FlushByRamOrCountsPolicy.java b/lucene/core/src/java/org/apache/lucene/index/FlushByRamOrCountsPolicy.java
index 8da0b9e..617765b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FlushByRamOrCountsPolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FlushByRamOrCountsPolicy.java
@@ -69,7 +69,7 @@
       }
     }
     if ((flushOnRAM() &&
-        control.getDeleteBytesUsed() > (1024*1024*indexWriterConfig.getRAMBufferSizeMB()))) {
+         control.getDeleteBytesUsed() + control.uniqueValuesRAM.get() > (1024*1024*indexWriterConfig.getRAMBufferSizeMB()))) {
       control.setApplyAllDeletes();
      if (infoStream.isEnabled("FP")) {
        infoStream.message("FP", "force apply deletes bytesUsed=" + control.getDeleteBytesUsed() + " vs ramBufferMB=" + indexWriterConfig.getRAMBufferSizeMB());
@@ -86,7 +86,7 @@
       control.setFlushPending(state);
     } else if (flushOnRAM()) {// flush by RAM
       final long limit = (long) (indexWriterConfig.getRAMBufferSizeMB() * 1024.d * 1024.d);
-      final long totalRam = control.activeBytes() + control.getDeleteBytesUsed();
+      final long totalRam = control.activeBytes() + control.getDeleteBytesUsed() + control.uniqueValuesRAM.get();
       if (totalRam >= limit) {
         if (infoStream.isEnabled("FP")) {
           infoStream.message("FP", "trigger flush: activeBytes=" + control.activeBytes() + " deleteBytes=" + control.getDeleteBytesUsed() + " vs limit=" + limit);
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index fe5d31f..176d3bb 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -37,6 +37,8 @@
 class FreqProxFields extends Fields {
   final Map<String,FreqProxTermsWriterPerField> fields = new LinkedHashMap<>();
 
+  private Bits liveDocs;
+
   public FreqProxFields(List<FreqProxTermsWriterPerField> fieldList) {
     // NOTE: fields are already sorted by field name
     for(FreqProxTermsWriterPerField field : fieldList) {
@@ -44,6 +46,10 @@
     }
   }
 
+  public void setLiveDocs(Bits liveDocs) {
+    this.liveDocs = liveDocs;
+  }
+
   public Iterator<String> iterator() {
     return fields.keySet().iterator();
   }
@@ -51,7 +57,7 @@
   @Override
   public Terms terms(String field) throws IOException {
     FreqProxTermsWriterPerField perField = fields.get(field);
-    return perField == null ? null : new FreqProxTerms(perField);
+    return perField == null ? null : new FreqProxTerms(perField, liveDocs);
   }
 
   @Override
@@ -62,9 +68,11 @@
 
   private static class FreqProxTerms extends Terms {
     final FreqProxTermsWriterPerField terms;
+    final Bits liveDocs;
 
-    public FreqProxTerms(FreqProxTermsWriterPerField terms) {
+    public FreqProxTerms(FreqProxTermsWriterPerField terms, Bits liveDocs) {
       this.terms = terms;
+      this.liveDocs = liveDocs;
     }
 
     @Override
@@ -72,8 +80,9 @@
       FreqProxTermsEnum termsEnum;
       if (reuse instanceof FreqProxTermsEnum && ((FreqProxTermsEnum) reuse).terms == this.terms) {
         termsEnum = (FreqProxTermsEnum) reuse;
+        assert termsEnum.liveDocs == this.liveDocs;
       } else {
-        termsEnum = new FreqProxTermsEnum(terms);
+        termsEnum = new FreqProxTermsEnum(terms, liveDocs);
       }
       termsEnum.reset();
       return termsEnum;
@@ -136,11 +145,13 @@
     final FreqProxPostingsArray postingsArray;
     final BytesRef scratch = new BytesRef();
     final int numTerms;
+    final Bits liveDocs;
     int ord;
 
-    public FreqProxTermsEnum(FreqProxTermsWriterPerField terms) {
+    public FreqProxTermsEnum(FreqProxTermsWriterPerField terms, Bits liveDocs) {
       this.terms = terms;
       this.numTerms = terms.bytesHash.size();
+      this.liveDocs = liveDocs;
       sortedTermIDs = terms.sortedTermIDs;
       assert sortedTermIDs != null;
       postingsArray = (FreqProxPostingsArray) terms.postingsArray;
@@ -151,7 +162,6 @@
     }
 
     public SeekStatus seekCeil(BytesRef text) {
-
       // TODO: we could instead keep the BytesRefHash
       // intact so this is a hash lookup
 
@@ -170,17 +180,19 @@
         } else {
           // found:
           ord = mid;
+          assert term().compareTo(text) == 0;
           return SeekStatus.FOUND;
         }
       }
 
       // not found:
-      ord = lo + 1;
+      ord = lo;
       if (ord >= numTerms) {
         return SeekStatus.END;
       } else {
         int textStart = postingsArray.textStarts[sortedTermIDs[ord]];
         terms.bytePool.setBytesRef(scratch, textStart);
+        assert term().compareTo(text) > 0;
         return SeekStatus.NOT_FOUND;
       }
     }
@@ -230,8 +242,8 @@
     }
 
     @Override
-    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
-      if (liveDocs != null) {
+    public DocsEnum docs(Bits liveDocsIn, DocsEnum reuse, int flags) {
+      if (liveDocsIn != null) {
         throw new IllegalArgumentException("liveDocs must be null");
       }
 
@@ -246,18 +258,20 @@
       if (reuse instanceof FreqProxDocsEnum) {
         docsEnum = (FreqProxDocsEnum) reuse;
         if (docsEnum.postingsArray != postingsArray) {
-          docsEnum = new FreqProxDocsEnum(terms, postingsArray);
+          docsEnum = new FreqProxDocsEnum(terms, postingsArray, liveDocs);
+        } else {
+          assert docsEnum.liveDocs == liveDocs;
         }
       } else {
-        docsEnum = new FreqProxDocsEnum(terms, postingsArray);
+        docsEnum = new FreqProxDocsEnum(terms, postingsArray, liveDocs);
       }
       docsEnum.reset(sortedTermIDs[ord]);
       return docsEnum;
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-      if (liveDocs != null) {
+    public DocsAndPositionsEnum docsAndPositions(Bits liveDocsIn, DocsAndPositionsEnum reuse, int flags) {
+      if (liveDocsIn != null) {
         throw new IllegalArgumentException("liveDocs must be null");
       }
       FreqProxDocsAndPositionsEnum posEnum;
@@ -277,10 +291,12 @@
       if (reuse instanceof FreqProxDocsAndPositionsEnum) {
         posEnum = (FreqProxDocsAndPositionsEnum) reuse;
         if (posEnum.postingsArray != postingsArray) {
-          posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
+          posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray, liveDocs);
+        } else {
+          assert posEnum.liveDocs == liveDocs;
         }
       } else {
-        posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
+        posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray, liveDocs);
       }
       posEnum.reset(sortedTermIDs[ord]);
       return posEnum;
@@ -313,22 +329,24 @@
     final FreqProxPostingsArray postingsArray;
     final ByteSliceReader reader = new ByteSliceReader();
     final boolean readTermFreq;
+    final Bits liveDocs;
     int docID;
     int freq;
     boolean ended;
     int termID;
 
-    public FreqProxDocsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
+    public FreqProxDocsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray, Bits liveDocs) {
       this.terms = terms;
       this.postingsArray = postingsArray;
       this.readTermFreq = terms.hasFreq;
+      this.liveDocs = liveDocs;
     }
 
     public void reset(int termID) {
       this.termID = termID;
       terms.initReader(reader, termID, 0);
       ended = false;
-      docID = 0;
+      docID = -1;
     }
 
     @Override
@@ -349,33 +367,40 @@
 
     @Override
     public int nextDoc() throws IOException {
-      if (reader.eof()) {
-        if (ended) {
-          return NO_MORE_DOCS;
-        } else {
-          ended = true;
-          docID = postingsArray.lastDocIDs[termID];
-          if (readTermFreq) {
-            freq = postingsArray.termFreqs[termID];
-          }
-        }
-      } else {
-        int code = reader.readVInt();
-        if (!readTermFreq) {
-          docID += code;
-        } else {
-          docID += code >>> 1;
-          if ((code & 1) != 0) {
-            freq = 1;
-          } else {
-            freq = reader.readVInt();
-          }
-        }
-
-        assert docID != postingsArray.lastDocIDs[termID];
+      if (docID == -1) {
+        docID = 0;
       }
+      while (true) {
+        if (reader.eof()) {
+          if (ended) {
+            return NO_MORE_DOCS;
+          } else {
+            ended = true;
+            docID = postingsArray.lastDocIDs[termID];
+            if (readTermFreq) {
+              freq = postingsArray.termFreqs[termID];
+            }
+          }
+        } else {
+          int code = reader.readVInt();
+          if (!readTermFreq) {
+            docID += code;
+          } else {
+            docID += code >>> 1;
+            if ((code & 1) != 0) {
+              freq = 1;
+            } else {
+              freq = reader.readVInt();
+            }
+          }
 
-      return docID;
+          assert docID != postingsArray.lastDocIDs[termID];
+        }
+
+        if (liveDocs == null || liveDocs.get(docID)) {
+          return docID;
+        }
+      }
     }
 
     @Override
@@ -396,6 +421,7 @@
     final ByteSliceReader reader = new ByteSliceReader();
     final ByteSliceReader posReader = new ByteSliceReader();
     final boolean readOffsets;
+    final Bits liveDocs;
     int docID;
     int freq;
     int pos;
@@ -407,10 +433,11 @@
     boolean hasPayload;
     BytesRefBuilder payload = new BytesRefBuilder();
 
-    public FreqProxDocsAndPositionsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
+    public FreqProxDocsAndPositionsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray, Bits liveDocs) {
       this.terms = terms;
       this.postingsArray = postingsArray;
       this.readOffsets = terms.hasOffsets;
+      this.liveDocs = liveDocs;
       assert terms.hasProx;
       assert terms.hasFreq;
     }
@@ -436,34 +463,40 @@
 
     @Override
     public int nextDoc() throws IOException {
-      while (posLeft != 0) {
-        nextPosition();
-      }
-
-      if (reader.eof()) {
-        if (ended) {
-          return NO_MORE_DOCS;
-        } else {
-          ended = true;
-          docID = postingsArray.lastDocIDs[termID];
-          freq = postingsArray.termFreqs[termID];
-        }
-      } else {
-        int code = reader.readVInt();
-        docID += code >>> 1;
-        if ((code & 1) != 0) {
-          freq = 1;
-        } else {
-          freq = reader.readVInt();
+      while (true) {
+        while (posLeft != 0) {
+          nextPosition();
         }
 
-        assert docID != postingsArray.lastDocIDs[termID];
-      }
+        if (reader.eof()) {
+          if (ended) {
+            return NO_MORE_DOCS;
+          } else {
+            ended = true;
+            docID = postingsArray.lastDocIDs[termID];
+            freq = postingsArray.termFreqs[termID];
+          }
+        } else {
+          int code = reader.readVInt();
+          docID += code >>> 1;
+          if ((code & 1) != 0) {
+            freq = 1;
+          } else {
+            freq = reader.readVInt();
+          }
 
-      posLeft = freq;
-      pos = 0;
-      startOffset = 0;
-      return docID;
+          assert docID != postingsArray.lastDocIDs[termID];
+        }
+
+        posLeft = freq;
+        pos = 0;
+        startOffset = 0;
+        if (liveDocs != null && liveDocs.get(docID) == false) {
+          continue;
+        }
+
+        return docID;
+      }
     }
 
     @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
index 8e98fbd..f82cef6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
@@ -97,10 +97,14 @@
     // Sort by field name
     CollectionUtil.introSort(allFields);
 
-    Fields fields = new FreqProxFields(allFields);
+    FreqProxFields fields = new FreqProxFields(allFields);
 
     applyDeletes(state, fields);
 
+    if (state.liveDocs != null) {
+      fields.setLiveDocs(state.liveDocs);
+    }
+
     FieldsConsumer consumer = state.segmentInfo.getCodec().postingsFormat().fieldsConsumer(state);
     boolean success = false;
     try {
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
index fae0d6f..039543c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
@@ -30,7 +30,6 @@
 final class FreqProxTermsWriterPerField extends TermsHashPerField {
 
   private FreqProxPostingsArray freqProxPostingsArray;
-
   final boolean hasFreq;
   final boolean hasProx;
   final boolean hasOffsets;
diff --git a/lucene/core/src/java/org/apache/lucene/index/GeneralField.java b/lucene/core/src/java/org/apache/lucene/index/GeneralField.java
deleted file mode 100644
index cb07f03..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/GeneralField.java
+++ /dev/null
@@ -1,33 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Represents a single field in lucene document. Further generalizations
- * are {@link IndexableField} and {@link StorableField} interfaces.  
- *
- *  @lucene.experimental */
-
-public interface GeneralField {
-
-  /** Field name */
-  public String name();
-
-  /** {@link IndexableFieldType} describing the properties
-   * of this field. */
-  public IndexableFieldType fieldType();
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
index c4e939b..f68d2b6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
@@ -17,11 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.DocumentStoredFieldVisitor;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.util.Bits;  // javadocs
-import org.apache.lucene.util.IOUtils;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.Collections;
@@ -31,6 +26,13 @@
 import java.util.WeakHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DocumentStoredFieldVisitor;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.util.Bits; // javadocs
+import org.apache.lucene.util.IOUtils;
+
 /**
  IndexReader is an abstract class, providing an interface for accessing a
  point-in-time view of an index.  Any changes made to the index
@@ -101,6 +103,8 @@
     public void onClose(IndexReader reader) throws IOException;
   }
 
+  public abstract FieldTypes getFieldTypes();
+
   private final Set<ReaderClosedListener> readerClosedListeners = 
       Collections.synchronizedSet(new LinkedHashSet<ReaderClosedListener>());
 
@@ -339,13 +343,13 @@
    *  custom processing/loading of each field.  If you
    *  simply want to load all fields, use {@link
    *  #document(int)}.  If you want to load a subset, use
-   *  {@link DocumentStoredFieldVisitor}.  */
+   *  {@link Document2StoredFieldVisitor}.  */
   public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException;
-  
+
   /**
    * Returns the stored fields of the <code>n</code><sup>th</sup>
    * <code>Document</code> in this index.  This is just
-   * sugar for using {@link DocumentStoredFieldVisitor}.
+   * sugar for using {@link Document2StoredFieldVisitor}.
    * <p>
    * <b>NOTE:</b> for performance reasons, this method does not check if the
    * requested document is deleted, and therefore asking for a deleted document
@@ -364,8 +368,8 @@
   // TODO: we need a separate StoredField, so that the
   // Document returned here contains that class not
   // IndexableField
-  public final StoredDocument document(int docID) throws IOException {
-    final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
+  public final Document document(int docID) throws IOException {
+    final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(getFieldTypes());
     document(docID, visitor);
     return visitor.getDocument();
   }
@@ -373,12 +377,11 @@
   /**
    * Like {@link #document(int)} but only loads the specified
    * fields.  Note that this is simply sugar for {@link
-   * DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
+   * Document2StoredFieldVisitor#Document2StoredFieldVisitor(Set)}.
    */
-  public final StoredDocument document(int docID, Set<String> fieldsToLoad)
+  public final Document document(int docID, Set<String> fieldsToLoad)
       throws IOException {
-    final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(
-        fieldsToLoad);
+    final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(getFieldTypes(), fieldsToLoad);
     document(docID, visitor);
     return visitor.getDocument();
   }
@@ -499,5 +502,4 @@
    * @see Terms#getSumTotalTermFreq()
    */
   public abstract long getSumTotalTermFreq(String field) throws IOException;
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 7da70f0..80ee3d7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -32,8 +32,8 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Locale;
-import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -42,12 +42,15 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldInfosFormat;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate;
 import org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate;
 import org.apache.lucene.index.FieldInfos.FieldNumbers;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FilterDirectory;
@@ -223,7 +226,7 @@
   boolean enableTestPoints = false;
 
   private static final int UNBOUNDED_MAX_MERGE_SEGMENTS = -1;
-  
+
   /**
    * Name of the write lock in the index.
    */
@@ -254,13 +257,17 @@
   private final Directory mergeDirectory;  // used for merging
   private final Analyzer analyzer;    // how to analyze text
 
+  final FieldTypes fieldTypes; // schema
+
   private volatile long changeCount; // increments every time a change is completed
   private volatile long lastCommitChangeCount; // last changeCount that was committed
+  private volatile long lastCommitFieldTypesChangeCount; // last FieldTypes.getChangeCount()
 
   private List<SegmentCommitInfo> rollbackSegments;      // list of segmentInfo we will fallback to if the commit fails
 
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
+  volatile long pendingCommitFieldTypesChangeCount;
 
   private Collection<String> filesToCommit;
 
@@ -270,6 +277,9 @@
   private final DocumentsWriter docWriter;
   private final Queue<Event> eventQueue;
   final IndexFileDeleter deleter;
+  final Map<String,LiveUniqueValues> uniqueValues = new HashMap<>();
+
+  private final ReaderManager readerManager;
 
   // used by forceMerge to note those needing merging
   private Map<SegmentCommitInfo,Boolean> segmentsToMerge = new HashMap<>();
@@ -311,7 +321,7 @@
 
   // The instance that was passed to the constructor. It is saved only in order
   // to allow users to query an IndexWriter settings.
-  private final LiveIndexWriterConfig config;
+  final LiveIndexWriterConfig config;
 
   /** System.nanoTime() when commit started; used to write
    *  an infoStream message about how long commit took. */
@@ -759,15 +769,15 @@
     infoStream = config.getInfoStream();
     mergeScheduler = config.getMergeScheduler();
     mergeScheduler.setInfoStream(infoStream);
-    codec = config.getCodec();
 
     bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
     poolReaders = config.getReaderPooling();
 
     writeLock = directory.makeLock(WRITE_LOCK_NAME);
 
-    if (!writeLock.obtain(config.getWriteLockTimeout())) // obtain write lock
+    if (!writeLock.obtain(config.getWriteLockTimeout())) { // obtain write lock
       throw new LockObtainFailedException("Index locked for write: " + writeLock);
+    }
 
     boolean success = false;
     try {
@@ -836,6 +846,7 @@
         }
       }
 
+      fieldTypes = new FieldTypes(this, create, segmentInfos.infosVersion < SegmentInfos.VERSION_60, config.getAnalyzer(), config.getSimilarity());
       rollbackSegments = segmentInfos.createBackupSegmentInfos();
 
       // start with previous field numbers, but new FieldInfos
@@ -866,18 +877,37 @@
         messageState();
       }
 
+      // TODO: this is awkward.  It would be better if Codec was fully controlled by FieldTypes, somehow.  Maybe IR/IW should consult
+      // FieldTypes for the format for each field instead of requiring a PerFieldXXXFormat for all XXXs?
+      if ((config.getCodec() instanceof Lucene50Codec) == false) {
+        codec = config.getCodec();
+      } else {
+        codec = fieldTypes.getCodec();
+      }
+
+      readerManager = new ReaderManager(this, true);
+
       success = true;
 
     } finally {
       if (!success) {
         if (infoStream.isEnabled("IW")) {
-          infoStream.message("IW", "init: hit exception on init; releasing write lock");
+          infoStream.message("IW", "init: hit exception on init; releasing write lock and closing");
         }
-        IOUtils.closeWhileHandlingException(writeLock);
+        IOUtils.closeWhileHandlingException(writeLock, this);
         writeLock = null;
       }
     }
   }
+
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
+  public Document newDocument() {
+    ensureOpen();
+    return new Document(fieldTypes);
+  }
   
   // reads latest field infos for the commit
   // this is used on IW init and addIndexes(Dir) to create/update the global field map.
@@ -1032,12 +1062,6 @@
     return directory;
   }
 
-  /** Returns the analyzer used by this index. */
-  public Analyzer getAnalyzer() {
-    ensureOpen();
-    return analyzer;
-  }
-
   /** Returns total number of docs in this index, including
    *  docs not yet flushed (still in the RAM buffer),
    *  not counting deletions.
@@ -1126,7 +1150,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public void addDocument(IndexDocument doc) throws IOException {
+  public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
     updateDocument(null, doc);
   }
 
@@ -1151,7 +1175,7 @@
    * perhaps to obtain better index compression), in which case
    * you may need to fully re-index your documents at that time.
    *
-   * <p>See {@link #addDocument(IndexDocument)} for details on
+   * <p>See {@link #addDocument(Iterable)} for details on
    * index and IndexWriter state after an Exception, and
    * flushing/merging temporary free space requirements.</p>
    *
@@ -1167,7 +1191,7 @@
    *
    * @lucene.experimental
    */
-  public void addDocuments(Iterable<? extends IndexDocument> docs) throws IOException {
+  public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     updateDocuments(null, docs);
   }
 
@@ -1184,12 +1208,12 @@
    *
    * @lucene.experimental
    */
-  public void updateDocuments(Term delTerm, Iterable<? extends IndexDocument> docs) throws IOException {
+  public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     ensureOpen();
     try {
       boolean success = false;
       try {
-        if (docWriter.updateDocuments(docs, analyzer, delTerm)) {
+        if (docWriter.updateDocuments(docs, delTerm)) {
           processEvents(true, false);
         }
         success = true;
@@ -1305,6 +1329,9 @@
    * Deletes the document(s) matching any of the provided queries.
    * All given deletes are applied and flushed atomically at the same time.
    *
+   * <p>Any deleted values from unique fields e.g. from {@link Document@addUniqueAtom}
+   * will not be seen as deleted until these buffered delete queries are applied.
+   *
    * @param queries array of queries to identify the documents
    * to be deleted
    * @throws CorruptIndexException if the index is corrupt
@@ -1334,12 +1361,12 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public void updateDocument(Term term, IndexDocument doc) throws IOException {
+  public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
     ensureOpen();
     try {
       boolean success = false;
       try {
-        if (docWriter.updateDocument(doc, analyzer, term)) {
+        if (docWriter.updateDocument(doc, term)) {
           processEvents(true, false);
         }
         success = true;
@@ -1373,9 +1400,7 @@
    */
   public void updateNumericDocValue(Term term, String field, long value) throws IOException {
     ensureOpen();
-    if (!globalFieldNumberMap.contains(field, DocValuesType.NUMERIC)) {
-      throw new IllegalArgumentException("can only update existing numeric-docvalues fields!");
-    }
+    globalFieldNumberMap.verifyUpdateDocValuesType(field, DocValuesType.NUMERIC);
     try {
       if (docWriter.updateDocValues(new NumericDocValuesUpdate(term, field, value))) {
         processEvents(true, false);
@@ -1410,9 +1435,7 @@
     if (value == null) {
       throw new IllegalArgumentException("cannot update a field to a null value: " + field);
     }
-    if (!globalFieldNumberMap.contains(field, DocValuesType.BINARY)) {
-      throw new IllegalArgumentException("can only update existing binary-docvalues fields!");
-    }
+    globalFieldNumberMap.verifyUpdateDocValuesType(field, DocValuesType.BINARY);
     try {
       if (docWriter.updateDocValues(new BinaryDocValuesUpdate(term, field, value))) {
         processEvents(true, false);
@@ -1435,11 +1458,10 @@
    * @throws IOException
    *           if there is a low-level IO error
    */
-  public void updateDocValues(Term term, Field... updates) throws IOException {
+  public void updateDocValues(Term term, Iterable<? extends IndexableField> updates) throws IOException {
     ensureOpen();
-    DocValuesUpdate[] dvUpdates = new DocValuesUpdate[updates.length];
-    for (int i = 0; i < updates.length; i++) {
-      final Field f = updates[i];
+    List<DocValuesUpdate> dvUpdates = new ArrayList<>();
+    for (IndexableField f : updates) {
       final DocValuesType dvType = f.fieldType().docValuesType();
       if (dvType == null) {
         throw new NullPointerException("DocValuesType cannot be null (field: \"" + f.name() + "\")");
@@ -1447,22 +1469,21 @@
       if (dvType == DocValuesType.NONE) {
         throw new IllegalArgumentException("can only update NUMERIC or BINARY fields! field=" + f.name());
       }
-      if (!globalFieldNumberMap.contains(f.name(), dvType)) {
-        throw new IllegalArgumentException("can only update existing docvalues fields! field=" + f.name() + ", type=" + dvType);
-      }
       switch (dvType) {
         case NUMERIC:
-          dvUpdates[i] = new NumericDocValuesUpdate(term, f.name(), (Long) f.numericValue());
+          globalFieldNumberMap.verifyUpdateDocValuesType(f.name(), dvType);
+          dvUpdates.add(new NumericDocValuesUpdate(term, f.name(), (Long) f.numericValue()));
           break;
         case BINARY:
-          dvUpdates[i] = new BinaryDocValuesUpdate(term, f.name(), f.binaryValue());
+          globalFieldNumberMap.verifyUpdateDocValuesType(f.name(), dvType);
+          dvUpdates.add(new BinaryDocValuesUpdate(term, f.name(), f.binaryValue()));
           break;
         default:
           throw new IllegalArgumentException("can only update NUMERIC or BINARY fields: field=" + f.name() + ", type=" + dvType);
       }
     }
     try {
-      if (docWriter.updateDocValues(dvUpdates)) {
+      if (docWriter.updateDocValues(dvUpdates.toArray(new DocValuesUpdate[dvUpdates.size()]))) {
         processEvents(true, false);
       }
     } catch (OutOfMemoryError oom) {
@@ -1922,12 +1943,15 @@
       }
 
       // Must pre-close in case it increments changeCount so that we can then
-      // set it to false before calling closeInternal
+      // set it to false before closing
       mergeScheduler.close();
 
+      readerManager.close();
+
       bufferedUpdatesStream.clear();
       docWriter.close(); // mark it as closed first to prevent subsequent indexing actions/flushes 
       docWriter.abort(this); // don't sync on IW here
+      fieldTypes.close();
       synchronized(this) {
 
         if (pendingCommit != null) {
@@ -1999,7 +2023,7 @@
           }
           
           // close all the closeables we can (but important is readerPool and writeLock to prevent leaks)
-          IOUtils.closeWhileHandlingException(readerPool, deleter, writeLock);
+          IOUtils.closeWhileHandlingException(readerManager, readerPool, deleter, writeLock);
           writeLock = null;
         }
         closed = true;
@@ -2071,6 +2095,7 @@
              */
             // Don't bother saving any changes in our segmentInfos
             readerPool.dropAll(false);
+            fieldTypes.clear();
             // Mark that the index has changed
             ++changeCount;
             segmentInfos.changed();
@@ -2312,6 +2337,9 @@
    *
    * <p>This requires this index not be among those to be added.
    *
+   * <p>
+   * <b>NOTE</b>: this call does not check unique fields
+   *
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    * @throws LockObtainFailedException if we were unable to
@@ -2344,6 +2372,8 @@
           SegmentInfos sis = SegmentInfos.readLatestCommit(dir); // read infos from dir
           totalDocCount += sis.totalDocCount();
 
+          fieldTypes.addAll(FieldTypes.getFieldTypes(sis, fieldTypes.getIndexAnalyzer(), fieldTypes.getSimilarity()));
+
           for (SegmentCommitInfo info : sis) {
             assert !infos.contains(info): "dup info dir=" + info.info.dir + " name=" + info.info.name;
 
@@ -2436,6 +2466,9 @@
    * {@code maxMergeAtOnce} parameter, you should pass that many readers in one
    * call.
    * 
+   * <p>
+   * <b>NOTE</b>: this call does not check unique fields
+   *
    * @throws CorruptIndexException
    *           if the index is corrupt
    * @throws IOException
@@ -2453,13 +2486,17 @@
 
       String mergedName = newSegmentName();
       for (CodecReader leaf : readers) {
+        FieldTypes ft = leaf.getFieldTypes();
+        if (ft != null) {
+          fieldTypes.addAll(ft);
+        }
         numDocs += leaf.numDocs();
       }
 
       // Make sure adding the new documents to this index won't
       // exceed the limit:
       reserveDocs(numDocs);
-      
+
       final IOContext context = new IOContext(new MergeInfo(numDocs, -1, false, -1));
 
       // TODO: somehow we should fix this merge so it's
@@ -2469,7 +2506,7 @@
       SegmentInfo info = new SegmentInfo(directory, Version.LATEST, mergedName, -1,
                                          false, codec, null, StringHelper.randomId(), new HashMap<>());
 
-      SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
+      SegmentMerger merger = new SegmentMerger(fieldTypes, Arrays.asList(readers), info, infoStream, trackingDir,
                                                globalFieldNumberMap, 
                                                context);
       
@@ -2681,7 +2718,10 @@
               // sneak into the commit point:
               toCommit = segmentInfos.clone();
 
+              toCommit.getUserData().put(FieldTypes.FIELD_TYPES_KEY, fieldTypes.writeToString());
+
               pendingCommitChangeCount = changeCount;
+              pendingCommitFieldTypesChangeCount = fieldTypes.getChangeCount();
 
               // This protects the segmentInfos we are now going
               // to commit.  This is important in case, eg, while
@@ -2795,7 +2835,10 @@
    *  merged finished, this method may return true right
    *  after you had just called {@link #commit}. */
   public final boolean hasUncommittedChanges() {
-    return changeCount != lastCommitChangeCount || docWriter.anyChanges() || bufferedUpdatesStream.any();
+    return fieldTypes.getChangeCount() != lastCommitFieldTypesChangeCount ||
+      changeCount != lastCommitChangeCount ||
+      docWriter.anyChanges() ||
+      bufferedUpdatesStream.any();
   }
 
   private final void commitInternal(MergePolicy mergePolicy) throws IOException {
@@ -2851,6 +2894,7 @@
             deleter.checkpoint(pendingCommit, true);
 
             lastCommitChangeCount = pendingCommitChangeCount;
+            lastCommitFieldTypesChangeCount = pendingCommitFieldTypesChangeCount;
             rollbackSegments = pendingCommit.createBackupSegmentInfos();
 
             finished = true;
@@ -2988,7 +3032,7 @@
 
     return false;
   }
-  
+
   final synchronized boolean applyAllDeletesAndUpdates() throws IOException {
     flushDeletesCount.incrementAndGet();
     final BufferedUpdatesStream.ApplyDeletesResult result;
@@ -3867,7 +3911,7 @@
           synchronized (this) {
             // We must also sync on IW here, because another thread could be writing
             // new DV updates / remove old gen field infos files causing FNFE:
-            newReader = new SegmentReader(info, reader, liveDocs, info.info.getDocCount() - delCount);
+            newReader = new SegmentReader(fieldTypes, info, reader, liveDocs, info.info.getDocCount() - delCount);
           }
 
           boolean released = false;
@@ -3892,7 +3936,7 @@
       
       // we pass merge.getMergeReaders() instead of merge.readers to allow the
       // OneMerge to return a view over the actual segments to merge
-      final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
+      final SegmentMerger merger = new SegmentMerger(fieldTypes, merge.getMergeReaders(),
                                                      merge.info.info, infoStream, dirWrapper,
                                                      globalFieldNumberMap, 
                                                      context);
@@ -4221,12 +4265,14 @@
       }
 
       synchronized(this) {
+        long fieldTypesChangeCount = fieldTypes.getChangeCount();
 
         if (lastCommitChangeCount > changeCount) {
           throw new IllegalStateException("lastCommitChangeCount=" + lastCommitChangeCount + ",changeCount=" + changeCount);
         }
+        assert lastCommitFieldTypesChangeCount <= fieldTypesChangeCount: "lastCommitFieldTypesChangeCount=" + lastCommitFieldTypesChangeCount + " fieldTypesChangeCount=" + fieldTypesChangeCount;
 
-        if (pendingCommitChangeCount == lastCommitChangeCount) {
+        if (pendingCommitChangeCount == lastCommitChangeCount && pendingCommitFieldTypesChangeCount == lastCommitFieldTypesChangeCount) {
           if (infoStream.isEnabled("IW")) {
             infoStream.message("IW", "  skip startCommit(): no changes pending");
           }
@@ -4239,7 +4285,7 @@
         }
 
         if (infoStream.isEnabled("IW")) {
-          infoStream.message("IW", "startCommit index=" + segString(toLiveInfos(toSync)) + " changeCount=" + changeCount);
+          infoStream.message("IW", "startCommit index=" + segString(toLiveInfos(toSync)) + " changeCount=" + changeCount + " fieldTypesChangeCount=" + fieldTypesChangeCount);
         }
 
         assert filesExist(toSync);
@@ -4535,7 +4581,6 @@
         maybeMerge(config.getMergePolicy(), MergeTrigger.SEGMENT_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
       }
     }
-    
   }
   
   synchronized void incRefDeleter(SegmentInfos segmentInfos) throws IOException {
@@ -4544,7 +4589,7 @@
   }
   
   synchronized void decRefDeleter(SegmentInfos segmentInfos) throws IOException {
-    ensureOpen();
+    ensureOpen(false);
     deleter.decRef(segmentInfos);
   }
   
@@ -4612,6 +4657,32 @@
     }
   }
 
+  // TODO: cutover to this, remove DirecotryReader/ReaderManager APIs taking writer directly?
+  /** Returns a {@link ReferenceManager} to get NRT readers. */
+  ReferenceManager<DirectoryReader> getReaderManager() {
+    return readerManager;
+  }
+
+  // TODO: we could expose this to apps too?  e.g. to check if a given id exists in the index
+
+  // TODO: what other optos can we do when we know field is unique?
+  final AtomicLong uniqueValuesRAM = new AtomicLong();
+
+  synchronized LiveUniqueValues getUniqueValues(String fieldName) {
+    LiveUniqueValues v;
+    if (fieldTypes.getIsUnique(fieldName)) {
+      v = uniqueValues.get(fieldName);
+      if (v == null) {
+        v = new LiveUniqueValues(fieldName, readerManager, uniqueValuesRAM);
+        uniqueValues.put(fieldName, v);
+      }
+    } else {
+      v = null;
+    }
+
+    return v;
+  }
+
   /** Wraps the incoming {@link Directory} so that we assign a per-thread
    *  {@link MergeRateLimiter} to all created {@link IndexOutput}s. */
   private Directory addMergeRateLimiters(Directory in) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexableField.java b/lucene/core/src/java/org/apache/lucene/index/IndexableField.java
index f13ac3c..815dfcd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexableField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexableField.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.search.similarities.DefaultSimilarity; // javadocs
 import org.apache.lucene.search.similarities.Similarity; // javadocs
+import org.apache.lucene.util.BytesRef;
 
 // TODO: how to handle versioning here...?
 
@@ -31,16 +32,21 @@
  *
  *  @lucene.experimental */
 
-public interface IndexableField extends GeneralField {
+public interface IndexableField {
+
+  /** Field name */
+  public String name();
+
+  /** {@link IndexableFieldType} describing the properties of this field. */
+  public IndexableFieldType fieldType();
 
   /**
    * Creates the TokenStream used for indexing this field.  If appropriate,
    * implementations should use the given Analyzer to create the TokenStreams.
    *
-   * @param analyzer Analyzer that should be used to create the TokenStreams from
    * @param reuse TokenStream for a previous instance of this field <b>name</b>. This allows
    *              custom field types (like StringField and NumericField) that do not use
-   *              the analyzer to still have good performance. Note: the passed-in type
+   *              an analyzer to still have good performance. Note: the passed-in type
    *              may be inappropriate, for example if you mix up different types of Fields
    *              for the same field name. So it's the responsibility of the implementation to
    *              check.
@@ -48,7 +54,9 @@
    *         a non-null value if the field is to be indexed
    * @throws IOException Can be thrown while creating the TokenStream
    */
-  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException;
+  default public TokenStream tokenStream(TokenStream reuse) throws IOException {
+    return null;
+  }
 
   /** 
    * Returns the field's index-time boost.
@@ -70,5 +78,32 @@
    * @see Similarity#computeNorm(FieldInvertState)
    * @see DefaultSimilarity#encodeNormValue(float)
    */
-  public float boost();
+  default public float boost() {
+    return 1.0f;
+  }
+
+  /** Non-null if this field has a stored binary value */
+  default public BytesRef binaryValue() {
+    return null;
+  }
+
+  /** Non-null if this field has a binary doc value */
+  default public BytesRef binaryDocValue() {
+    return null;
+  }
+
+  /** Non-null if this field has a string value */
+  default public String stringValue() {
+    return null;
+  }
+
+  /** Non-null if this field has a numeric value */
+  default public Number numericValue() { 
+    return null;
+  }
+
+  /** Non-null if this field has a numeric doc value */
+  default public Number numericDocValue() {
+    return null;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
index ee24158..ef480b5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
@@ -26,20 +26,11 @@
 public interface IndexableFieldType {
 
   /** True if the field's value should be stored */
-  public boolean stored();
+  default public boolean stored() {
+    return false;
+  }
   
   /** 
-   * True if this field's value should be analyzed by the
-   * {@link Analyzer}.
-   * <p>
-   * This has no effect if {@link #indexOptions()} returns
-   * IndexOptions.NONE.
-   */
-  // TODO: shouldn't we remove this?  Whether/how a field is
-  // tokenized is an impl detail under Field?
-  public boolean tokenized();
-
-  /** 
    * True if this field's indexed form should be also stored 
    * into term vectors.
    * <p>
@@ -50,7 +41,9 @@
    * This option is illegal if {@link #indexOptions()} returns
    * IndexOptions.NONE.
    */
-  public boolean storeTermVectors();
+  default public boolean storeTermVectors() {
+    return false;
+  }
 
   /** 
    * True if this field's token character offsets should also
@@ -59,7 +52,9 @@
    * This option is illegal if term vectors are not enabled for the field
    * ({@link #storeTermVectors()} is false)
    */
-  public boolean storeTermVectorOffsets();
+  default public boolean storeTermVectorOffsets() {
+    return false;
+  }
 
   /** 
    * True if this field's token positions should also be stored
@@ -68,7 +63,9 @@
    * This option is illegal if term vectors are not enabled for the field
    * ({@link #storeTermVectors()} is false). 
    */
-  public boolean storeTermVectorPositions();
+  default public boolean storeTermVectorPositions() {
+    return false;
+  }
   
   /** 
    * True if this field's token payloads should also be stored
@@ -77,7 +74,9 @@
    * This option is illegal if term vector positions are not enabled 
    * for the field ({@link #storeTermVectors()} is false).
    */
-  public boolean storeTermVectorPayloads();
+  default public boolean storeTermVectorPayloads() {
+    return false;
+  }
 
   /**
    * True if normalization values should be omitted for the field.
@@ -85,15 +84,31 @@
    * This saves memory, but at the expense of scoring quality (length normalization
    * will be disabled), and if you omit norms, you cannot use index-time boosts. 
    */
-  public boolean omitNorms();
+  default public boolean omitNorms() {
+    return false;
+  }
 
   /** {@link IndexOptions}, describing what should be
    *  recorded into the inverted index */
-  public IndexOptions indexOptions();
+  default public IndexOptions indexOptions() {
+    return IndexOptions.NONE;
+  }
 
   /** 
    * DocValues {@link DocValuesType}: how the field's value will be indexed
    * into docValues.
    */
-  public DocValuesType docValuesType();  
+  default public DocValuesType docValuesType() {
+    return DocValuesType.NONE;
+  }
+
+  /** Returns the gap to insert between multi-valued, tokenized fields */
+  default public int getPositionGap() {
+    return 1;
+  }
+
+  /** Returns the gap offset to insert between multi-valued, tokenized fields */
+  default public int getOffsetGap() {
+    return 0;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/LiveUniqueValues.java b/lucene/core/src/java/org/apache/lucene/index/LiveUniqueValues.java
new file mode 100644
index 0000000..c868cf5
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/LiveUniqueValues.java
@@ -0,0 +1,271 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader.ReaderClosedListener;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CloseableThreadLocal;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/** Tracks the already indexed values in a unique field ({@link FieldTypes.#getIsUnique}. */
+final class LiveUniqueValues implements ReferenceManager.RefreshListener, Closeable {
+
+  private final static long BYTES_PER_ENTRY = 
+    // HashMap array @ 50% load
+    2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF +
+    // HashMap Entry
+    3 * RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_INT+
+    // BytesRef
+    RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + RamUsageEstimator.NUM_BYTES_OBJECT_REF +
+    // byte[] (we count actual bytes based on length of id that's added)
+    RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
+    
+  // Holds reused TermsEnum/DocsEnum state for faster lookups:
+  private final ConcurrentMap<IndexReader,CloseableThreadLocal<PerThreadLookup>> lookupStates = new ConcurrentHashMap<>();
+
+  // Evicts this reader from lookupStates once it's closed:
+  private final ReaderClosedListener removeLookupState = new ReaderClosedListener() {
+      @Override
+      public void onClose(IndexReader reader) {
+        CloseableThreadLocal<PerThreadLookup> ctl = lookupStates.remove(reader);
+        if (ctl != null) {
+          ctl.close();
+        }
+      }
+    };
+
+  // Maps the id to TRUE if it's live, else FALSE:
+  private volatile Map<BytesRef,Boolean> old = newMap();
+  private volatile Map<BytesRef,Boolean> current = newMap();
+  private final ReaderManager mgr;
+  private final String uidField;
+  private volatile long oldRAMBytesUsed;
+  private volatile long currentRAMBytesUsed;
+  private final AtomicLong ramBytesUsed;
+
+  private static Map<BytesRef,Boolean> newMap() {
+    return new HashMap<BytesRef,Boolean>();
+  }
+
+  /** Sole constructor. */
+  public LiveUniqueValues(String uidField, ReaderManager mgr, AtomicLong ramBytesUsed) {
+    this.uidField = uidField;
+    this.mgr = mgr;
+    this.ramBytesUsed = ramBytesUsed;
+    mgr.addListener(this);
+  }
+
+  @Override
+  public void close() {
+    mgr.removeListener(this);
+  }
+
+  @Override
+  public synchronized void beforeRefresh() throws IOException {
+    old = current;
+    // Start sending all updates after this point to the new
+    // map.  While reopen is running, any lookup will first
+    // try this new map, then fallback to old, then to the
+    // current searcher:
+    current = newMap();
+    oldRAMBytesUsed = currentRAMBytesUsed;
+    currentRAMBytesUsed = 0;
+  }
+
+  @Override
+  public synchronized void afterRefresh(boolean didRefresh) throws IOException {
+    // Now drop all the old values because they are now
+    // visible via the searcher that was just opened; if
+    // didRefresh is false, it's possible old has some
+    // entries in it, which is fine: it means they were
+    // actually already included in the previously opened
+    // reader.  So we can safely clear old here:
+    old = newMap();
+    ramBytesUsed.addAndGet(-oldRAMBytesUsed);
+    oldRAMBytesUsed = 0;
+  }
+
+  /** Call this to try adding a value; this returns false if the add
+   *  fails because the value is already present in this field. */
+  // TODO: improve concurrency
+  public synchronized boolean add(BytesRef id) throws IOException {
+    Boolean v = current.get(id);
+    if (v != null) {
+      if (v == Boolean.FALSE) {
+        current.put(id, Boolean.TRUE);
+        return true;
+      } else {
+        return false;
+      }
+    }
+    v = old.get(id);
+    if (v != null) {
+      if (v == Boolean.FALSE) {
+        current.put(id, Boolean.TRUE);
+        addRAM(BYTES_PER_ENTRY + id.length);
+        return true;
+      } else {
+        return false;
+      }
+    }
+    DirectoryReader reader = mgr.acquire();
+    try {
+      PerThreadLookup lookup = getLookupState(reader);
+      if (lookup.exists(id)) {
+        return false;
+      } else {
+        current.put(id, Boolean.TRUE);
+        addRAM(BYTES_PER_ENTRY + id.length);
+        return true;
+      }
+    } finally {
+      mgr.release(reader);
+    }
+  }
+
+  /** Call this after you've successfully deleted a document
+   *  from the index. */
+  public synchronized void delete(BytesRef id) {
+    Boolean old = current.put(id, Boolean.FALSE);
+    if (old == null) {
+      addRAM(BYTES_PER_ENTRY + id.length);
+    }
+  }
+
+  private synchronized void addRAM(long bytes) {
+    currentRAMBytesUsed += bytes;
+    ramBytesUsed.addAndGet(bytes);
+  }
+
+  /** Returns the [approximate] number of id/value pairs
+   *  buffered in RAM. */
+  public synchronized int size() {
+    return current.size() + old.size();
+  }
+
+  private PerThreadLookup getLookupState(DirectoryReader reader) throws IOException {
+    CloseableThreadLocal<PerThreadLookup> ctl = lookupStates.get(reader);
+    if (ctl == null) {
+      // First time we are seeing this reader; make a new CTL:
+      ctl = new CloseableThreadLocal<PerThreadLookup>();
+      CloseableThreadLocal<PerThreadLookup> other = lookupStates.putIfAbsent(reader, ctl);
+      if (other == null) {
+        // Our CTL won, we must remove it when the reader is closed:
+        reader.addReaderClosedListener(removeLookupState);
+      } else {
+        // Another thread beat us to it: just use their CTL:
+        ctl.close();
+        ctl = other;
+      }
+    }
+
+    PerThreadLookup lookupState = ctl.get();
+    if (lookupState == null) {
+      // First time this thread searches this reader:
+      lookupState = new PerThreadLookup(reader, uidField);
+      ctl.set(lookupState);
+    }
+
+    return lookupState;
+  }
+
+  // TODO: optimize this so that on toplevel reader reopen, we reuse TermsEnum for shared segments:
+  private final static class PerThreadLookup {
+
+    private final LeafReaderContext[] readerContexts;
+    private final TermsEnum[] termsEnums;
+    private final DocsEnum[] docsEnums;
+    private final Bits[] liveDocs;
+    private final int numSegs;
+    private final boolean hasDeletions;
+
+    public PerThreadLookup(IndexReader r, String uidFieldName) throws IOException {
+
+      List<LeafReaderContext> leaves = new ArrayList<>(r.leaves());
+
+      readerContexts = leaves.toArray(new LeafReaderContext[leaves.size()]);
+      termsEnums = new TermsEnum[leaves.size()];
+      docsEnums = new DocsEnum[leaves.size()];
+      liveDocs = new Bits[leaves.size()];
+      int numSegs = 0;
+      boolean hasDeletions = false;
+
+      // iterate backwards to optimize for the frequently updated documents
+      // which are likely to be in the last segments
+      for(int i=leaves.size()-1;i>=0;i--) {
+        LeafReaderContext readerContext = leaves.get(i);
+        Fields fields = readerContext.reader().fields();
+        if (fields != null) {
+          Terms terms = fields.terms(uidFieldName);
+          if (terms != null) {
+            readerContexts[numSegs] = readerContext;
+            termsEnums[numSegs] = terms.iterator(null);
+            assert termsEnums[numSegs] != null;
+            liveDocs[numSegs] = readerContext.reader().getLiveDocs();
+            hasDeletions |= readerContext.reader().hasDeletions();
+            numSegs++;
+          }
+        }
+      }
+      this.numSegs = numSegs;
+      this.hasDeletions = hasDeletions;
+    }
+
+    /** Return true if id is found. */
+    public boolean exists(BytesRef id) throws IOException {
+      for(int seg=0;seg<numSegs;seg++) {
+        if (termsEnums[seg].seekExact(id)) {
+          // NOTE: we don't need to pass live docs because IW now removes them on flush:
+          DocsEnum docs = docsEnums[seg] = termsEnums[seg].docs(null, docsEnums[seg], 0);
+          int docID = docs.nextDoc();
+          if (docID != DocsEnum.NO_MORE_DOCS) {
+            assert docs.nextDoc() == DocsEnum.NO_MORE_DOCS;
+            return true;
+          } else {
+            assert hasDeletions;
+          }
+        }
+      }
+      return false;
+    }
+  }
+
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
index 2aa9e5f..5e145da 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
@@ -35,7 +35,7 @@
   MergeState.DocMap currentMap;
   DocsEnum current;
   int currentBase;
-  int doc = -1;
+  int doc;
   private final MergeState mergeState;
   MultiDocsEnum multiDocsEnum;
 
@@ -50,6 +50,7 @@
     this.multiDocsEnum = docsEnum;
     upto = -1;
     current = null;
+    doc = -1;
     return this;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeState.java b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
index 108b462..706fde7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergeState.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
@@ -25,6 +25,8 @@
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.packed.PackedInts;
@@ -74,9 +76,12 @@
   /** InfoStream for debugging messages. */
   public final InfoStream infoStream;
 
-  /** Sole constructor. */
-  MergeState(List<CodecReader> readers, SegmentInfo segmentInfo, InfoStream infoStream) throws IOException {
+  public final FieldTypes fieldTypes;
 
+  /** Sole constructor. */
+  MergeState(FieldTypes fieldTypes, List<CodecReader> readers, SegmentInfo segmentInfo, InfoStream infoStream) throws IOException {
+
+    this.fieldTypes = fieldTypes;
     int numReaders = readers.size();
     docMaps = new DocMap[numReaders];
     docBase = new int[numReaders];
@@ -88,9 +93,12 @@
     docValuesProducers = new DocValuesProducer[numReaders];
     fieldInfos = new FieldInfos[numReaders];
     liveDocs = new Bits[numReaders];
-
     for(int i=0;i<numReaders;i++) {
       final CodecReader reader = readers.get(i);
+      FieldTypes readerFieldTypes = reader.getFieldTypes();
+      if (readerFieldTypes != null && fieldTypes != readerFieldTypes) {
+        fieldTypes.addAll(readerFieldTypes);
+      }
 
       maxDocs[i] = reader.maxDoc();
       liveDocs[i] = reader.getLiveDocs();
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index 8a6dd0c..65560c1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -78,7 +79,7 @@
           return fields.get(0);
         } else {
           return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
-                                         slices.toArray(ReaderSlice.EMPTY_ARRAY));
+                                 slices.toArray(ReaderSlice.EMPTY_ARRAY));
         }
     }
   }
@@ -200,7 +201,6 @@
     if (result != null)
       return result;
 
-
     // Lazy init: first time this field is requested, we
     // create & add to terms:
     final List<Terms> subs2 = new ArrayList<>();
@@ -220,7 +220,7 @@
       // is unbounded.
     } else {
       result = new MultiTerms(subs2.toArray(Terms.EMPTY_ARRAY),
-          slices2.toArray(ReaderSlice.EMPTY_ARRAY));
+                              slices2.toArray(ReaderSlice.EMPTY_ARRAY));
       terms.put(field, result);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
index 2ad32de..ff15365 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
@@ -39,6 +39,7 @@
   private final boolean hasOffsets;
   private final boolean hasPositions;
   private final boolean hasPayloads;
+  private final int[] termLengths;
 
   /** Sole constructor.
    *
@@ -55,11 +56,14 @@
     boolean _hasOffsets = true;
     boolean _hasPositions = true;
     boolean _hasPayloads = false;
+    termLengths = new int[subs.length];
     for(int i=0;i<subs.length;i++) {
       _hasFreqs &= subs[i].hasFreqs();
       _hasOffsets &= subs[i].hasOffsets();
       _hasPositions &= subs[i].hasPositions();
       _hasPayloads |= subs[i].hasPayloads();
+      BytesRef minTerm = subs[i].getMin();
+      termLengths[i] = minTerm == null ? -1 : minTerm.length;
     }
 
     hasFreqs = _hasFreqs;
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
index 6ae2c7c..5a94688 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
@@ -17,13 +17,14 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.Bits;
-
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Comparator;
+
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.PriorityQueue;
 
 /**
  * Exposes {@link TermsEnum} API, merged from {@link TermsEnum} API of sub-segments.
@@ -148,10 +149,10 @@
       if (seekOpt) {
         final BytesRef curTerm = currentSubs[i].current;
         if (curTerm != null) {
-          final int cmp = term.compareTo(curTerm);
-          if (cmp == 0) {
+          final int x = term.compareTo(curTerm);
+          if (x == 0) {
             status = true;
-          } else if (cmp < 0) {
+          } else if (x < 0) {
             status = false;
           } else {
             status = currentSubs[i].terms.seekExact(term);
@@ -200,10 +201,10 @@
       if (seekOpt) {
         final BytesRef curTerm = currentSubs[i].current;
         if (curTerm != null) {
-          final int cmp = term.compareTo(curTerm);
-          if (cmp == 0) {
+          final int x = term.compareTo(curTerm);
+          if (x == 0) {
             status = SeekStatus.FOUND;
-          } else if (cmp < 0) {
+          } else if (x < 0) {
             status = SeekStatus.NOT_FOUND;
           } else {
             status = currentSubs[i].terms.seekCeil(term);
@@ -304,7 +305,6 @@
     } else {
       current = null;
     }
-
     return current;
   }
 
@@ -499,16 +499,16 @@
     }
   }
 
-  private final static class TermMergeQueue extends PriorityQueue<TermsEnumWithSlice> {
+  private final class TermMergeQueue extends PriorityQueue<TermsEnumWithSlice> {
     TermMergeQueue(int size) {
       super(size);
     }
 
     @Override
     protected boolean lessThan(TermsEnumWithSlice termsA, TermsEnumWithSlice termsB) {
-      final int cmp = termsA.current.compareTo(termsB.current);
-      if (cmp != 0) {
-        return cmp < 0;
+      final int x = termsA.current.compareTo(termsB.current);
+      if (x != 0) {
+        return x < 0;
       } else {
         return termsA.subSlice.start < termsB.subSlice.start;
       }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexDocument.java b/lucene/core/src/java/org/apache/lucene/index/NotUniqueException.java
similarity index 70%
rename from lucene/core/src/java/org/apache/lucene/index/IndexDocument.java
rename to lucene/core/src/java/org/apache/lucene/index/NotUniqueException.java
index b8bde65..4a3eed7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexDocument.java
+++ b/lucene/core/src/java/org/apache/lucene/index/NotUniqueException.java
@@ -17,15 +17,10 @@
  * limitations under the License.
  */
 
-/**
- * Elementary interface used for indexing an document.
- * @lucene.internal
- */
-public interface IndexDocument {
+import org.apache.lucene.util.BytesRef;
 
-  /** Obtains all indexable fields in document */
-  public Iterable<? extends IndexableField> indexableFields();
-  
-  /** Obtains all storable fields in document */
-  public Iterable<? extends StorableField> storableFields();
+public class NotUniqueException extends IllegalArgumentException {
+  public NotUniqueException(String fieldName, BytesRef value) {
+    super("field \"" + fieldName + "\" must be unique, but value=" + value + " appears more than once");
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/NumericDocValuesFieldUpdates.java b/lucene/core/src/java/org/apache/lucene/index/NumericDocValuesFieldUpdates.java
index dd6e859..2647e5c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/NumericDocValuesFieldUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/NumericDocValuesFieldUpdates.java
@@ -1,6 +1,5 @@
 package org.apache.lucene.index;
 
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.InPlaceMergeSorter;
 import org.apache.lucene.util.packed.PackedInts;
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
index f6905d4..0087a66 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
@@ -23,6 +23,8 @@
 import java.util.List;
 import java.util.Set;
 
+import org.apache.lucene.document.FieldTypes;
+
 /** An {@link CompositeReader} which reads multiple, parallel indexes.  Each index added
  * must have the same number of documents, and exactly the same hierarchical subreader structure,
  * but typically each contains different fields. Deletions are taken from the first reader.
@@ -48,6 +50,7 @@
  */
 public class ParallelCompositeReader extends BaseCompositeReader<IndexReader> {
   private final boolean closeSubReaders;
+  private final FieldTypes fieldTypes;
   private final Set<IndexReader> completeReaderSet =
     Collections.newSetFromMap(new IdentityHashMap<IndexReader,Boolean>());
 
@@ -77,10 +80,27 @@
         reader.incRef();
       }
     }
+    if (readers.length > 0) {
+      // Merge all field types; this will throw exc if any field types are not congurent:
+      fieldTypes = new FieldTypes(null);
+      for (final IndexReader reader : completeReaderSet) {
+        FieldTypes readerFieldTypes = reader.getFieldTypes();
+        assert readerFieldTypes != null;
+        fieldTypes.addAll(readerFieldTypes);
+      }
+    } else {
+      fieldTypes = new FieldTypes(null);
+    }
+
     // finally add our own synthetic readers, so we close or decRef them, too (it does not matter what we do)
     completeReaderSet.addAll(getSequentialSubReaders());
   }
 
+  @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
   private static IndexReader[] prepareSubReaders(CompositeReader[] readers, CompositeReader[] storedFieldsReaders) throws IOException {
     if (readers.length == 0) {
       if (storedFieldsReaders.length > 0)
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
index 9a78fe1..398d17f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.IdentityHashMap;
 import java.util.Iterator;
@@ -26,6 +27,7 @@
 import java.util.SortedMap;
 import java.util.TreeMap;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 
 /** An {@link LeafReader} which reads multiple, parallel indexes.  Each index
@@ -57,7 +59,8 @@
   private final boolean hasDeletions;
   private final SortedMap<String,LeafReader> fieldToReader = new TreeMap<>();
   private final SortedMap<String,LeafReader> tvFieldToReader = new TreeMap<>();
-  
+  private final FieldTypes fieldTypes;
+
   /** Create a ParallelLeafReader based on the provided
    *  readers; auto-closes the given readers on {@link #close()}. */
   public ParallelLeafReader(LeafReader... readers) throws IOException {
@@ -84,9 +87,11 @@
       this.maxDoc = first.maxDoc();
       this.numDocs = first.numDocs();
       this.hasDeletions = first.hasDeletions();
+      this.fieldTypes = new FieldTypes(null);
     } else {
       this.maxDoc = this.numDocs = 0;
       this.hasDeletions = false;
+      this.fieldTypes = null;
     }
     Collections.addAll(completeReaderSet, this.parallelReaders);
     Collections.addAll(completeReaderSet, this.storedFieldsReaders);
@@ -103,6 +108,8 @@
     // build FieldInfos and fieldToReader map:
     for (final LeafReader reader : this.parallelReaders) {
       final FieldInfos readerFieldInfos = reader.getFieldInfos();
+      FieldTypes readerFieldTypes = reader.getFieldTypes();
+      assert readerFieldTypes != null;
       for (FieldInfo fieldInfo : readerFieldInfos) {
         // NOTE: first reader having a given field "wins":
         if (!fieldToReader.containsKey(fieldInfo.name)) {
@@ -111,6 +118,7 @@
           if (fieldInfo.hasVectors()) {
             tvFieldToReader.put(fieldInfo.name, reader);
           }
+          fieldTypes.mergeOneField(readerFieldTypes, fieldInfo.name);
         }
       }
     }
@@ -137,6 +145,11 @@
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
+  @Override
   public String toString() {
     final StringBuilder buffer = new StringBuilder("ParallelLeafReader(");
     for (final Iterator<LeafReader> iter = completeReaderSet.iterator(); iter.hasNext();) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 4a68f51..fa0c803 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -20,8 +20,8 @@
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
-import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -31,8 +31,7 @@
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.LiveDocsFormat;
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -131,7 +130,7 @@
   public SegmentReader getReader(IOContext context) throws IOException {
     if (reader == null) {
       // We steal returned ref:
-      reader = new SegmentReader(info, context);
+      reader = new SegmentReader(writer.fieldTypes, info, context);
       if (liveDocs == null) {
         liveDocs = reader.getLiveDocs();
       }
@@ -189,7 +188,7 @@
     // force new liveDocs in initWritableLiveDocs even if it's null
     liveDocsShared = true;
     if (liveDocs != null) {
-      return new SegmentReader(reader.getSegmentInfo(), reader, liveDocs, info.info.getDocCount() - info.getDelCount() - pendingDeleteCount);
+      return new SegmentReader(new FieldTypes(writer.fieldTypes), reader.getSegmentInfo(), reader, liveDocs, info.info.getDocCount() - info.getDelCount() - pendingDeleteCount);
     } else {
       // liveDocs == null and reader != null. That can only be if there are no deletes
       assert reader.getLiveDocs() == null;
@@ -477,7 +476,7 @@
 
       // reader could be null e.g. for a just merged segment (from
       // IndexWriter.commitMergedDeletes).
-      final SegmentReader reader = this.reader == null ? new SegmentReader(info, IOContext.READONCE) : this.reader;
+      final SegmentReader reader = this.reader == null ? new SegmentReader(writer.fieldTypes, info, IOContext.READONCE) : this.reader;
       try {
         // clone FieldInfos so that we can update their dvGen separately from
         // the reader's infos and write them to a new fieldInfos_gen file
@@ -579,7 +578,7 @@
 
     // if there is a reader open, reopen it to reflect the updates
     if (reader != null) {
-      SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.info.getDocCount() - info.getDelCount() - pendingDeleteCount);
+      SegmentReader newReader = new SegmentReader(writer.fieldTypes, info, reader, liveDocs, info.info.getDocCount() - info.getDelCount() - pendingDeleteCount);
       boolean reopened = false;
       try {
         reader.decRef();
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReindexingReader.java b/lucene/core/src/java/org/apache/lucene/index/ReindexingReader.java
new file mode 100644
index 0000000..bdca81d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/ReindexingReader.java
@@ -0,0 +1,661 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.StringHelper;
+
+// TODO: how to use FieldTypes to give a higher level API here?
+
+/** Provides a {@link ReaderManager} allowing you to also make "parallel"
+ *  changes to the previously indexed documents, e.g. turning stored
+ *  fields into doc values, changing norm values, etc.
+ *
+ *  <p>
+ *  This uses ParallelLeafReader to index new
+ *  stuff (postings, DVs, etc.) from previously stored fields, on the
+ *  fly (during NRT reader reopen), after the  initial indexing.  The
+ *  test indexes just a single stored field with text "content X" (X is
+ *  a number embedded in the text).
+ *
+ *  <p>
+ *  Then, on reopen, for any newly created segments (flush or merge), it
+ *  builds a new parallel segment by loading all stored docs, parsing
+ *  out that X, and adding it as DV and numeric indexed (trie) field.
+ *
+ *  <p>
+ *  Finally, for searching, it builds a top-level MultiReader, with
+ *  ParallelLeafReader for each segment, and then tests that random
+ *  numeric range queries, and sorting by the new DV field, work
+ *  correctly.
+ *
+ *  <p>
+ *  Each per-segment index lives in a private directory next to the main
+ *  index, and they are deleted once their segments are removed from the
+ *  index.  They are "volatile", meaning if e.g. the index is replicated to
+ *  another machine, it's OK to not copy parallel segments indices,
+ *  since they will just be regnerated (at a cost though). */
+public abstract class ReindexingReader implements Closeable {
+
+  private static boolean DEBUG = false;
+
+  /** Key used to store the current schema gen in the SegmentInfo diagnostics */
+  public final static String SCHEMA_GEN_KEY = "schema_gen";
+
+  public final IndexWriter w;
+  public final ReaderManager mgr;
+
+  // Main index directory:
+  public final Directory indexDir;
+
+  // Parent directory holding sub directory index for each segment + gen:
+  private final Path segsRootPath;
+
+  /** Which segments have been closed, but their parallel index is not yet not removed. */
+  private final Set<SegmentIDAndGen> closedSegments = Collections.newSetFromMap(new ConcurrentHashMap<SegmentIDAndGen,Boolean>());
+
+  /** Holds currently open parallel readers for each segment. */
+  private final Map<SegmentIDAndGen,LeafReader> parallelReaders = new ConcurrentHashMap<>();
+
+  void printRefCounts() {
+    System.out.println("All refCounts:");
+    for(Map.Entry<SegmentIDAndGen,LeafReader> ent : parallelReaders.entrySet()) {
+      System.out.println("  " + ent.getKey() + " " + ent.getValue() + " refCount=" + ent.getValue().getRefCount());
+    }
+  }
+
+  public ReindexingReader(Directory indexDir, Path segsRootPath) throws IOException {
+
+    this.indexDir = indexDir;
+
+    // Per-segment parallel indices are stored under subdirs "segs":
+    this.segsRootPath = segsRootPath;
+    Files.createDirectories(segsRootPath);
+
+    IndexWriterConfig iwc = getIndexWriterConfig();
+    iwc.setMergePolicy(new ReindexingMergePolicy(iwc.getMergePolicy()));
+    if (DEBUG) {
+      System.out.println("TEST: use IWC:\n" + iwc);
+    }
+    w = new IndexWriter(indexDir, iwc);
+
+    w.getConfig().setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
+        @Override
+        public void warm(LeafReader reader) throws IOException {
+          // This will build the parallel index for the merged segment before the merge becomes visible, so reopen delay is only due to
+          // newly flushed segments:
+          if (DEBUG) System.out.println(Thread.currentThread().getName() +": TEST: now warm " + reader);
+          // TODO: it's not great that we pass false here; it means we close the reader & reopen again for NRT reader; still we did "warm" by
+          // building the parallel index, if necessary
+          getParallelLeafReader(reader, false, getCurrentSchemaGen());
+        }
+      });
+
+    // start with empty commit:
+    w.commit();
+    mgr = new ReaderManager(new ParallelLeafDirectoryReader(DirectoryReader.open(w, true)));
+    if (DEBUG) {
+      DirectoryReader r = mgr.acquire();
+      try {
+        System.out.println("TEST: ReindexingReader.init current=" + r);
+      } finally {
+        mgr.release(r);
+      }
+    }
+  }
+
+  protected abstract IndexWriterConfig getIndexWriterConfig() throws IOException;
+
+  /** Optional method to validate that the provided parallel reader in fact reflects the changes in schemaGen. */
+  protected void checkParallelReader(LeafReader reader, LeafReader parallelReader, long schemaGen) throws IOException {
+  }
+
+  /** Override to customize Directory impl. */
+  protected Directory openDirectory(Path path) throws IOException {
+    return FSDirectory.open(path);
+  }
+
+  public void commit() throws IOException {
+    w.commit();
+  }
+    
+  LeafReader getCurrentReader(LeafReader reader, long schemaGen) throws IOException {
+    LeafReader parallelReader = getParallelLeafReader(reader, true, schemaGen);
+    if (parallelReader != null) {
+
+      // We should not be embedding one ParallelLeafReader inside another:
+      assert parallelReader instanceof ParallelLeafReader == false;
+      assert reader instanceof ParallelLeafReader == false;
+
+      // NOTE: important that parallelReader is first, so if there are field name overlaps, because changes to the schema
+      // overwrote existing field names, it wins:
+      LeafReader newReader = new ParallelLeafReader(false, parallelReader, reader) {
+          @Override
+          public Bits getLiveDocs() {
+            return getParallelReaders()[1].getLiveDocs();
+          }
+          @Override
+          public int numDocs() {
+            return getParallelReaders()[1].numDocs();
+          }
+        };
+
+      // Because ParallelLeafReader does its own (extra) incRef:
+      parallelReader.decRef();
+
+      return newReader;
+
+    } else {
+      // This segment was already current as of currentSchemaGen:
+      return reader;
+    }
+  }
+
+  private class ParallelLeafDirectoryReader extends FilterDirectoryReader {
+    private final FieldTypes fieldTypes;
+    public ParallelLeafDirectoryReader(DirectoryReader in) {
+      super(in, new FilterDirectoryReader.SubReaderWrapper() {
+          final long currentSchemaGen = getCurrentSchemaGen();
+          @Override
+          public LeafReader wrap(LeafReader reader) {
+            try {
+              return getCurrentReader(reader, currentSchemaGen);
+            } catch (IOException ioe) {
+              // TODO: must close on exc here:
+              throw new RuntimeException(ioe);
+            }
+          }
+        });
+
+      // TODO: move this logic up?
+      fieldTypes = new FieldTypes(in.getFieldTypes());
+      for(LeafReaderContext ctx : leaves()) {
+        LeafReader leafReader = ctx.reader();
+        if (leafReader instanceof ParallelLeafReader) {
+          fieldTypes.addAll(((ParallelLeafReader) leafReader).getParallelReaders()[0].getFieldTypes());
+        }
+      }
+    }
+      
+    @Override
+    public FieldTypes getFieldTypes() {
+      return fieldTypes;
+    }
+
+    @Override
+    protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+      return new ParallelLeafDirectoryReader(in);
+    }
+
+    @Override
+    protected void doClose() throws IOException {
+      Throwable firstExc = null;
+      for (final LeafReader r : getSequentialSubReaders()) {
+        if (r instanceof ParallelLeafReader) {
+          // try to close each reader, even if an exception is thrown
+          try {
+            r.decRef();
+          } catch (Throwable t) {
+            if (firstExc == null) {
+              firstExc = t;
+            }
+          }
+        }
+      }
+      // Also close in, so it decRef's the SegmentInfos
+      try {
+        in.doClose();
+      } catch (Throwable t) {
+        if (firstExc == null) {
+          firstExc = t;
+        }
+      }
+      // throw the first exception
+      IOUtils.reThrow(firstExc);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    w.close();
+    if (DEBUG) System.out.println("TEST: after close writer index=" + SegmentInfos.readLatestCommit(indexDir).toString(indexDir));
+
+    /*
+      DirectoryReader r = mgr.acquire();
+      try {
+      TestUtil.checkReader(r);
+      } finally {
+      mgr.release(r);
+      }
+    */
+    mgr.close();
+    pruneOldSegments(true);
+    assertNoExtraSegments();
+  }
+
+  // Make sure we deleted all parallel indices for segments that are no longer in the main index: 
+  private void assertNoExtraSegments() throws IOException {
+    Set<String> liveIDs = new HashSet<String>();
+    for(SegmentCommitInfo info : SegmentInfos.readLatestCommit(indexDir)) {
+      String idString = StringHelper.idToString(info.info.getId());
+      liveIDs.add(idString);
+    }
+
+    // At this point (closing) the only segments in closedSegments should be the still-live ones:
+    for(SegmentIDAndGen segIDGen : closedSegments) {
+      assert liveIDs.contains(segIDGen.segID);
+    }
+
+    boolean fail = false;
+    try (DirectoryStream<Path> stream = Files.newDirectoryStream(segsRootPath)) {
+        for (Path path : stream) {
+          SegmentIDAndGen segIDGen = new SegmentIDAndGen(path.getFileName().toString());
+          if (liveIDs.contains(segIDGen.segID) == false) {
+            if (DEBUG) System.out.println("TEST: fail seg=" + path.getFileName() + " is not live but still has a parallel index");
+            fail = true;
+          }
+        }
+      }
+    assert fail == false;
+  }
+
+  private static class SegmentIDAndGen {
+    public final String segID;
+    public final long schemaGen;
+
+    public SegmentIDAndGen(String segID, long schemaGen) {
+      this.segID = segID;
+      this.schemaGen = schemaGen;
+    }
+
+    public SegmentIDAndGen(String s) {
+      String[] parts = s.split("_");
+      if (parts.length != 2) {
+        throw new IllegalArgumentException("invalid SegmentIDAndGen \"" + s + "\"");
+      }
+      // TODO: better checking of segID?
+      segID = parts[0];
+      schemaGen = Long.parseLong(parts[1]);
+    }
+
+    @Override
+    public int hashCode() {
+      return (int) (segID.hashCode() * schemaGen);
+    }
+
+    @Override
+    public boolean equals(Object _other) {
+      if (_other instanceof SegmentIDAndGen) {
+        SegmentIDAndGen other = (SegmentIDAndGen) _other;
+        return segID.equals(other.segID) && schemaGen == other.schemaGen;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public String toString() {
+      return segID + "_" + schemaGen;
+    }
+  }
+
+  private class ParallelReaderClosed implements LeafReader.ReaderClosedListener {
+    private final SegmentIDAndGen segIDGen;
+    private final Directory dir;
+
+    public ParallelReaderClosed(SegmentIDAndGen segIDGen, Directory dir) {
+      this.segIDGen = segIDGen;
+      this.dir = dir;
+    }
+
+    @Override
+    public void onClose(IndexReader ignored) {
+      try {
+        // TODO: make this sync finer, i.e. just the segment + schemaGen
+        synchronized(ReindexingReader.this) {
+          if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now close parallel parLeafReader dir=" + dir + " segIDGen=" + segIDGen);
+          parallelReaders.remove(segIDGen);
+          dir.close();
+          closedSegments.add(segIDGen);
+        }
+      } catch (IOException ioe) {
+        System.out.println("TEST: hit IOExc closing dir=" + dir);
+        ioe.printStackTrace(System.out);
+        throw new RuntimeException(ioe);
+      }
+    }
+  }
+
+  // Returns a ref
+  LeafReader getParallelLeafReader(final LeafReader leaf, boolean doCache, long schemaGen) throws IOException {
+    assert leaf instanceof SegmentReader;
+    SegmentInfo info = ((SegmentReader) leaf).getSegmentInfo().info;
+
+    long infoSchemaGen = getSchemaGen(info);
+
+    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: getParallelLeafReader: " + leaf + " infoSchemaGen=" + infoSchemaGen + " vs schemaGen=" + schemaGen + " doCache=" + doCache);
+
+    if (infoSchemaGen == schemaGen) {
+      if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: segment is already current schemaGen=" + schemaGen + "; skipping");
+      return null;
+    }
+
+    if (infoSchemaGen > schemaGen) {
+      throw new IllegalStateException("segment infoSchemaGen (" + infoSchemaGen + ") cannot be greater than requested schemaGen (" + schemaGen + ")");
+    }
+
+    final SegmentIDAndGen segIDGen = new SegmentIDAndGen(StringHelper.idToString(info.getId()), schemaGen);
+
+    // While loop because the parallel reader may be closed out from under us, so we must retry:
+    while (true) {
+
+      // TODO: make this sync finer, i.e. just the segment + schemaGen
+      synchronized (this) {
+        LeafReader parReader = parallelReaders.get(segIDGen);
+      
+        assert doCache || parReader == null;
+
+        if (parReader == null) {
+
+          Path leafIndex = segsRootPath.resolve(segIDGen.toString());
+
+          final Directory dir = openDirectory(leafIndex);
+
+          if (Files.exists(leafIndex.resolve("done")) == false) {
+            if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: build segment index for " + leaf + " " + segIDGen + " (source: " + info.getDiagnostics().get("source") + ") dir=" + leafIndex);
+
+            if (dir.listAll().length != 0) {
+              // It crashed before finishing last time:
+              if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: remove old incomplete index files: " + leafIndex);
+              IOUtils.rm(leafIndex);
+            }
+
+            reindex(infoSchemaGen, schemaGen, leaf, dir);
+
+            // Marker file, telling us this index is in fact done.  This way if we crash while doing the reindexing for a given segment, we will
+            // later try again:
+            dir.createOutput("done", IOContext.DEFAULT).close();
+          } else {
+            if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: segment index already exists for " + leaf + " " + segIDGen + " (source: " + info.getDiagnostics().get("source") + ") dir=" + leafIndex);
+          }
+
+          if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check index " + dir);
+          //TestUtil.checkIndex(dir);
+
+          SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
+          final LeafReader parLeafReader;
+          if (infos.size() == 1) {
+            parLeafReader = new SegmentReader(FieldTypes.getFieldTypes(infos, null, null), infos.info(0), IOContext.DEFAULT);
+          } else {
+            // This just means we didn't forceMerge above:
+            parLeafReader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
+          }
+
+          //checkParallelReader(leaf, parLeafReader, schemaGen);
+
+          if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: opened parallel reader: " + parLeafReader);
+          if (doCache) {
+            parallelReaders.put(segIDGen, parLeafReader);
+
+            // Our id+gen could have been previously closed, e.g. if it was a merged segment that was warmed, so we must clear this else
+            // the pruning may remove our directory:
+            closedSegments.remove(segIDGen);
+
+            parLeafReader.addReaderClosedListener(new ParallelReaderClosed(segIDGen, dir));
+
+          } else {
+            // Used only for merged segment warming:
+            // Messy: we close this reader now, instead of leaving open for reuse:
+            if (DEBUG) System.out.println("TEST: now decRef non cached refCount=" + parLeafReader.getRefCount());
+            parLeafReader.decRef();
+            dir.close();
+
+            // Must do this after dir is closed, else another thread could "rm -rf" while we are closing (which makes MDW.close's
+            // checkIndex angry):
+            closedSegments.add(segIDGen);
+            parReader = null;
+          }
+          parReader = parLeafReader;
+
+        } else {
+          if (parReader.tryIncRef() == false) {
+            // We failed: this reader just got closed by another thread, e.g. refresh thread opening a new reader, so this reader is now
+            // closed and we must try again.
+            if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: tryIncRef failed for " + parReader + "; retry");
+            parReader = null;
+            continue;
+          }
+          if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: use existing already opened parReader=" + parReader + " refCount=" + parReader.getRefCount());
+          //checkParallelReader(leaf, parReader, schemaGen);
+        }
+
+        // We return the new reference to caller
+        return parReader;
+      }
+    }
+  }
+
+  // TODO: we could pass a writer already opened...?
+  protected abstract void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException;
+
+  /** Returns the gen for the current schema. */
+  protected abstract long getCurrentSchemaGen();
+
+  /** Returns the gen that should be merged, meaning those changes will be folded back into the main index. */
+  protected long getMergingSchemaGen() {
+    return getCurrentSchemaGen();
+  }
+
+  /** Removes the parallel index that are no longer in the last commit point.  We can't
+   *  remove this when the parallel reader is closed because it may still be referenced by
+   *  the last commit. */
+  private void pruneOldSegments(boolean removeOldGens) throws IOException {
+    SegmentInfos lastCommit = SegmentInfos.readLatestCommit(indexDir);
+    if (DEBUG) System.out.println("TEST: prune; removeOldGens=" + removeOldGens);
+
+    Set<String> liveIDs = new HashSet<String>();
+    for(SegmentCommitInfo info : lastCommit) {
+      String idString = StringHelper.idToString(info.info.getId());
+      liveIDs.add(idString);
+      if (DEBUG) System.out.println("TEST: live id " + idString + " seg=" + info);
+    }
+
+    long currentSchemaGen = getCurrentSchemaGen();
+
+    if (Files.exists(segsRootPath)) {
+      try (DirectoryStream<Path> stream = Files.newDirectoryStream(segsRootPath)) {
+          for (Path path : stream) {
+            if (Files.isDirectory(path)) {
+              SegmentIDAndGen segIDGen = new SegmentIDAndGen(path.getFileName().toString());
+              assert segIDGen.schemaGen <= currentSchemaGen;
+              if (DEBUG) System.out.println("TEST: check dir=" + path + " live?=" + (liveIDs.contains(segIDGen.segID)) + " closed=" + (closedSegments.contains(segIDGen)) + " currentSchemaGen=" + currentSchemaGen);
+              if (liveIDs.contains(segIDGen.segID) == false && (closedSegments.contains(segIDGen) || (removeOldGens && segIDGen.schemaGen < currentSchemaGen))) {
+                if (DEBUG) System.out.println("TEST: remove " + segIDGen);
+                try {
+                  IOUtils.rm(path);
+                  closedSegments.remove(segIDGen);
+                } catch (IOException ioe) {
+                  // OK, we'll retry later
+                  if (DEBUG) System.out.println("TEST: ignore ioe during delete " + path + ":" + ioe);
+                }
+              }
+            }
+          }
+        }
+    }
+  }
+
+  /** Just replaces the sub-readers with parallel readers, so reindexed fields are merged into new segments. */
+  private class ReindexingMergePolicy extends MergePolicy {
+
+    class ReindexingOneMerge extends OneMerge {
+
+      List<LeafReader> parallelReaders;
+      final long schemaGen;
+
+      ReindexingOneMerge(List<SegmentCommitInfo> segments) {
+        super(segments);
+        // Commit up front to which schemaGen we will merge; we don't want a schema change sneaking in for some of our leaf readers but not others:
+        schemaGen = getMergingSchemaGen();
+        long currentSchemaGen = getCurrentSchemaGen();
+
+        // Defensive sanity check:
+        if (schemaGen > currentSchemaGen) {
+          throw new IllegalStateException("currentSchemaGen (" + currentSchemaGen + ") must always be >= mergingSchemaGen (" + schemaGen + ")");
+        }
+      }
+
+      @Override
+      public List<CodecReader> getMergeReaders() throws IOException {
+        if (parallelReaders == null) {
+          parallelReaders = new ArrayList<>();
+          for (CodecReader reader : super.getMergeReaders()) {
+            parallelReaders.add(getCurrentReader((SegmentReader) reader, schemaGen));
+          }
+        }
+
+        // TODO: fix ParallelLeafReader, if this is a good use case
+        List<CodecReader> mergeReaders = new ArrayList<>();
+        for (LeafReader reader : parallelReaders) {
+          mergeReaders.add(SlowCodecReaderWrapper.wrap(reader));
+        }
+        return mergeReaders;
+      }
+
+      @Override
+      public void mergeFinished() throws IOException {
+        Throwable th = null;
+        for(LeafReader r : parallelReaders) {
+          if (r instanceof ParallelLeafReader) {
+            try {
+              r.decRef();
+            } catch (Throwable t) {
+              if (th == null) {
+                th = t;
+              }
+            }
+          }
+        }
+
+        // If any error occured, throw it.
+        IOUtils.reThrow(th);
+      }
+    
+      @Override
+      public void setInfo(SegmentCommitInfo info) {
+        // Record that this merged segment is current as of this schemaGen:
+        info.info.getDiagnostics().put(SCHEMA_GEN_KEY, Long.toString(schemaGen));
+        super.setInfo(info);
+      }
+
+      @Override
+      public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
+        return super.getDocMap(mergeState);
+      }
+    }
+
+    class ReindexingMergeSpecification extends MergeSpecification {
+      @Override
+      public void add(OneMerge merge) {
+        super.add(new ReindexingOneMerge(merge.segments));
+      }
+
+      @Override
+      public String segString(Directory dir) {
+        return "ReindexingMergeSpec(" + super.segString(dir) + ")";
+      }
+    }
+
+    MergeSpecification wrap(MergeSpecification spec) {
+      MergeSpecification wrapped = null;
+      if (spec != null) {
+        wrapped = new ReindexingMergeSpecification();
+        for (OneMerge merge : spec.merges) {
+          wrapped.add(merge);
+        }
+      }
+      return wrapped;
+    }
+
+    final MergePolicy in;
+
+    /** Create a new {@code MergePolicy} that sorts documents with the given {@code sort}. */
+    public ReindexingMergePolicy(MergePolicy in) {
+      this.in = in;
+    }
+
+    @Override
+    public MergeSpecification findMerges(MergeTrigger mergeTrigger,
+                                         SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
+      return wrap(in.findMerges(mergeTrigger, segmentInfos, writer));
+    }
+
+    @Override
+    public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+                                               int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
+      throws IOException {
+      // TODO: do we need to force-force this?  Ie, wrapped MP may think index is already optimized, yet maybe its schemaGen is old?  need test!
+      return wrap(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer));
+    }
+
+    @Override
+    public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
+      throws IOException {
+      return wrap(in.findForcedDeletesMerges(segmentInfos, writer));
+    }
+
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments,
+                                   SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
+      return in.useCompoundFile(segments, newSegment, writer);
+    }
+
+    @Override
+    public String toString() {
+      return "ReindexingMergePolicy(" + in + ")";
+    }
+  }
+
+  static long getSchemaGen(SegmentInfo info) {
+    String s = info.getDiagnostics().get(SCHEMA_GEN_KEY);
+    if (s == null) {
+      return -1;
+    } else {
+      return Long.parseLong(s);
+    }
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
index 7549be2..3b89d03 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -122,9 +122,14 @@
   /** The file format version for the segments_N codec header, since 5.0+ */
   public static final int VERSION_50 = 4;
 
+  /** The file format version for the segments_N codec header, since 6.0+ */
+  public static final int VERSION_60 = 5;
+
   /** Used to name new segments. */
   // TODO: should this be a long ...?
   public int counter;
+
+  public int infosVersion;
   
   /** Counts how often the index has been changed.  */
   public long version;
@@ -274,12 +279,13 @@
       if (magic != CodecUtil.CODEC_MAGIC) {
         throw new IndexFormatTooOldException(input, magic, CodecUtil.CODEC_MAGIC, CodecUtil.CODEC_MAGIC);
       }
-      CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_50, VERSION_50);
+      int infosVersion = CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_50, VERSION_60);
       byte id[] = new byte[StringHelper.ID_LENGTH];
       input.readBytes(id, 0, id.length);
       CodecUtil.checkIndexHeaderSuffix(input, Long.toString(generation, Character.MAX_RADIX));
       
       SegmentInfos infos = new SegmentInfos();
+      infos.infosVersion = infosVersion;
       infos.id = id;
       infos.generation = generation;
       infos.lastGeneration = generation;
@@ -726,6 +732,7 @@
   void replace(SegmentInfos other) {
     rollbackSegmentInfos(other.asList());
     lastGeneration = other.lastGeneration;
+    userData = other.userData;
   }
 
   /** Returns sum of all segment's docCounts.  Note that
@@ -823,6 +830,7 @@
   /** Clear all {@link SegmentCommitInfo}s. */
   public void clear() {
     segments.clear();
+    userData.clear();
   }
 
   /** Remove the provided {@link SegmentCommitInfo}.
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
index c265054..9aec81d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.codecs.NormsConsumer;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.codecs.TermVectorsWriter;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.InfoStream;
@@ -48,12 +49,12 @@
   private final FieldInfos.Builder fieldInfosBuilder;
 
   // note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!!
-  SegmentMerger(List<CodecReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, Directory dir,
+  SegmentMerger(FieldTypes fieldTypes, List<CodecReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, Directory dir,
                 FieldInfos.FieldNumbers fieldNumbers, IOContext context) throws IOException {
     if (context.context != IOContext.Context.MERGE) {
       throw new IllegalArgumentException("IOContext.context should be MERGE; got: " + context.context);
     }
-    mergeState = new MergeState(readers, segmentInfo, infoStream);
+    mergeState = new MergeState(fieldTypes, readers, segmentInfo, infoStream);
     directory = dir;
     this.codec = segmentInfo.getCodec();
     this.context = context;
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
index b24dd7c..81c5ada 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
@@ -27,6 +27,8 @@
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.Bits;
@@ -50,6 +52,7 @@
 
   final SegmentCoreReaders core;
   final SegmentDocValues segDocValues;
+  final FieldTypes fieldTypes;
   
   final DocValuesProducer docValuesProducer;
   final FieldInfos fieldInfos;
@@ -60,8 +63,9 @@
    * @throws IOException if there is a low-level IO error
    */
   // TODO: why is this public?
-  public SegmentReader(SegmentCommitInfo si, IOContext context) throws IOException {
+  public SegmentReader(FieldTypes fieldTypes, SegmentCommitInfo si, IOContext context) throws IOException {
     this.si = si;
+    this.fieldTypes = fieldTypes;
     core = new SegmentCoreReaders(this, si.info.dir, si, context);
     segDocValues = new SegmentDocValues();
     
@@ -96,8 +100,8 @@
   /** Create new SegmentReader sharing core from a previous
    *  SegmentReader and loading new live docs from a new
    *  deletes file.  Used by openIfChanged. */
-  SegmentReader(SegmentCommitInfo si, SegmentReader sr) throws IOException {
-    this(si, sr,
+  SegmentReader(FieldTypes fieldTypes, SegmentCommitInfo si, SegmentReader sr) throws IOException {
+    this(fieldTypes, si, sr,
          si.info.getCodec().liveDocsFormat().readLiveDocs(si.info.dir, si, IOContext.READONCE),
          si.info.getDocCount() - si.getDelCount());
   }
@@ -106,7 +110,8 @@
    *  SegmentReader and using the provided in-memory
    *  liveDocs.  Used by IndexWriter to provide a new NRT
    *  reader */
-  SegmentReader(SegmentCommitInfo si, SegmentReader sr, Bits liveDocs, int numDocs) throws IOException {
+  SegmentReader(FieldTypes fieldTypes, SegmentCommitInfo si, SegmentReader sr, Bits liveDocs, int numDocs) throws IOException {
+    this.fieldTypes = fieldTypes;
     this.si = si;
     this.liveDocs = liveDocs;
     this.numDocs = numDocs;
@@ -141,6 +146,11 @@
       return segDocValues.getDocValuesProducer(-1L, si, dir, fieldInfos);
     }
   }
+
+  @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
   
   /**
    * init most recent FieldInfos for the current commit
diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
index 9ee7b89..85af99a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
@@ -25,6 +25,7 @@
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 
 /**
@@ -114,6 +115,11 @@
         public void removeCoreClosedListener(CoreClosedListener listener) {
           reader.removeCoreClosedListener(listener);
         }
+
+        @Override
+        public FieldTypes getFieldTypes() {
+          return reader.getFieldTypes();
+        }
       };
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
index 1213ab4..e214653 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
@@ -21,11 +21,11 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.lucene.util.Bits;
-
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.MultiDocValues.MultiSortedDocValues;
 import org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues;
 import org.apache.lucene.index.MultiDocValues.OrdinalMap;
+import org.apache.lucene.util.Bits;
 
 /**
  * This class forces a composite reader (eg a {@link
@@ -47,6 +47,7 @@
   private final CompositeReader in;
   private final Fields fields;
   private final Bits liveDocs;
+  private final FieldTypes fieldTypes;
   private final boolean merging;
   
   /** This method is sugar for getting an {@link LeafReader} from
@@ -68,10 +69,16 @@
     fields = MultiFields.getFields(in);
     liveDocs = MultiFields.getLiveDocs(in);
     in.registerParentReader(this);
+    fieldTypes = in.getFieldTypes();
     this.merging = merging;
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
+  @Override
   public String toString() {
     return "SlowCompositeReaderWrapper(" + in + ")";
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 2bab5fe..eef6bda 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -35,27 +36,35 @@
   private final IndexWriter writer;
   private final SegmentInfos segmentInfos;
   private final boolean applyAllDeletes;
+  private final FieldTypes fieldTypes;
   
   /** called only from static open() methods */
-  StandardDirectoryReader(Directory directory, LeafReader[] readers, IndexWriter writer,
-    SegmentInfos sis, boolean applyAllDeletes) {
+  StandardDirectoryReader(FieldTypes fieldTypes, Directory directory, LeafReader[] readers, IndexWriter writer,
+    SegmentInfos sis, boolean applyAllDeletes) throws IOException {
     super(directory, readers);
+    this.fieldTypes = fieldTypes;
     this.writer = writer;
     this.segmentInfos = sis;
     this.applyAllDeletes = applyAllDeletes;
   }
 
+  @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
   /** called from DirectoryReader.open(...) methods */
   static DirectoryReader open(final Directory directory, final IndexCommit commit) throws IOException {
     return new SegmentInfos.FindSegmentsFile<DirectoryReader>(directory) {
       @Override
       protected DirectoryReader doBody(String segmentFileName) throws IOException {
         SegmentInfos sis = SegmentInfos.readCommit(directory, segmentFileName);
+        FieldTypes fieldTypes = FieldTypes.getFieldTypes(sis, null, null);
         final SegmentReader[] readers = new SegmentReader[sis.size()];
         for (int i = sis.size()-1; i >= 0; i--) {
           boolean success = false;
           try {
-            readers[i] = new SegmentReader(sis.info(i), IOContext.READ);
+            readers[i] = new SegmentReader(fieldTypes, sis.info(i), IOContext.READ);
             success = true;
           } finally {
             if (!success) {
@@ -63,7 +72,7 @@
             }
           }
         }
-        return new StandardDirectoryReader(directory, readers, null, sis, false);
+        return new StandardDirectoryReader(fieldTypes, directory, readers, null, sis, false);
       }
     }.run(commit);
   }
@@ -79,6 +88,12 @@
     final Directory dir = writer.getDirectory();
 
     final SegmentInfos segmentInfos = infos.clone();
+
+    // Carry over current schema:
+    segmentInfos.getUserData().put(FieldTypes.FIELD_TYPES_KEY, writer.fieldTypes.writeToString());
+
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(segmentInfos, null, null);
+
     int infosUpto = 0;
     boolean success = false;
     try {
@@ -107,7 +122,7 @@
       
       writer.incRefDeleter(segmentInfos);
       
-      StandardDirectoryReader result = new StandardDirectoryReader(dir,
+      StandardDirectoryReader result = new StandardDirectoryReader(fieldTypes, dir,
           readers.toArray(new SegmentReader[readers.size()]), writer,
           segmentInfos, applyAllDeletes);
       success = true;
@@ -142,7 +157,9 @@
     }
     
     SegmentReader[] newReaders = new SegmentReader[infos.size()];
-    
+
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(infos, null, null);
+
     for (int i = infos.size() - 1; i>=0; i--) {
       SegmentCommitInfo commitInfo = infos.info(i);
 
@@ -163,7 +180,7 @@
         if (oldReader == null || commitInfo.info.getUseCompoundFile() != oldReader.getSegmentInfo().info.getUseCompoundFile()) {
 
           // this is a new reader; in case we hit an exception we can decRef it safely
-          newReader = new SegmentReader(commitInfo, IOContext.READ);
+          newReader = new SegmentReader(fieldTypes, commitInfo, IOContext.READ);
           newReaders[i] = newReader;
         } else {
           if (oldReader.getSegmentInfo().getDelGen() == commitInfo.getDelGen()
@@ -191,10 +208,10 @@
 
             if (oldReader.getSegmentInfo().getDelGen() == commitInfo.getDelGen()) {
               // only DV updates
-              newReaders[i] = new SegmentReader(commitInfo, oldReader, oldReader.getLiveDocs(), oldReader.numDocs());
+              newReaders[i] = new SegmentReader(fieldTypes, commitInfo, oldReader, oldReader.getLiveDocs(), oldReader.numDocs());
             } else {
               // both DV and liveDocs have changed
-              newReaders[i] = new SegmentReader(commitInfo, oldReader);
+              newReaders[i] = new SegmentReader(fieldTypes, commitInfo, oldReader);
             }
           }
         }
@@ -205,7 +222,8 @@
         }
       }
     }    
-    return new StandardDirectoryReader(directory, newReaders, null, infos, false);
+
+    return new StandardDirectoryReader(fieldTypes, directory, newReaders, null, infos, false);
   }
 
   // TODO: move somewhere shared if it's useful elsewhere
diff --git a/lucene/core/src/java/org/apache/lucene/index/StorableField.java b/lucene/core/src/java/org/apache/lucene/index/StorableField.java
deleted file mode 100644
index 136fd17..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/StorableField.java
+++ /dev/null
@@ -1,42 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.Reader;
-
-import org.apache.lucene.util.BytesRef;
-
-/** Represents a single stored field in lucene document. These fields
- * are contained in document retrieved from IndexReader.
- *
- *  @lucene.experimental */
-
-public interface StorableField extends GeneralField {
-
-  /** Non-null if this field has a binary value */
-  public BytesRef binaryValue();
-
-  /** Non-null if this field has a string value */
-  public String stringValue();
-
-  /** Non-null if this field has a Reader value */
-  public Reader readerValue();
-
-  /** Non-null if this field has a numeric value */
-  public Number numericValue(); 
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java b/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java
deleted file mode 100644
index b79c6b6..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java
+++ /dev/null
@@ -1,206 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.util.BytesRef;
-
-/** 
-* StoredDocument is retrieved from IndexReader containing only stored fields from indexed {@link IndexDocument}.
-*/
-// TODO: shouldn't this really be in the .document package?
-public class StoredDocument implements Iterable<StorableField> {
-
-  private final List<StorableField> fields = new ArrayList<>();
-
-  /** Sole constructor. */
-  public StoredDocument() {
-  }
-  
-  /**
-   * Adds a field to a document.
-   * <p> This method supports construction of a StoredDocument from a 
-   * {@link StoredFieldVisitor}. This method cannot
-   * be used to change the content of an existing index! In order to achieve this,
-   * a document has to be deleted from an index and a new changed version of that
-   * document has to be added.</p>
-   */
-  public final void add(StorableField field) {
-    fields.add(field);
-  }
-  
-  /**
-   * Returns an array of {@link StorableField}s with the given name.
-   * This method returns an empty array when there are no
-   * matching fields.  It never returns null.
-   *
-   * @param name the name of the field
-   * @return a <code>StorableField[]</code> array
-   */
-  public StorableField[] getFields(String name) {
-    List<StorableField> result = new ArrayList<>();
-    for (StorableField field : fields) {
-      if (field.name().equals(name)) {
-        result.add(field);
-      }
-    }
-  
-    return result.toArray(new StorableField[result.size()]);
-  }
-  
-  /** Returns a field with the given name if any exist in this document, or
-   * null.  If multiple fields exists with this name, this method returns the
-   * first value added.
-   */
-  public final StorableField getField(String name) {
-    for (StorableField field : fields) {
-      if (field.name().equals(name)) {
-        return field;
-      }
-    }
-    return null;
-  }
-  
-
-  /** Returns a List of all the fields in a document.
-   * <p>Note that fields which are <i>not</i> stored are
-   * <i>not</i> available in documents retrieved from the
-   * index, e.g. {@link IndexSearcher#doc(int)} or {@link
-   * IndexReader#document(int)}.
-   * 
-   * @return an immutable <code>List&lt;StorableField&gt;</code> 
-   */
-  public final List<StorableField> getFields() {
-    return fields;
-  }
-  
-  @Override
-  public Iterator<StorableField> iterator() {
-    return this.fields.iterator();
-  }
-  
-  /**
-   * Returns an array of byte arrays for of the fields that have the name specified
-   * as the method parameter.  This method returns an empty
-   * array when there are no matching fields.  It never
-   * returns null.
-   *
-   * @param name the name of the field
-   * @return a <code>BytesRef[]</code> of binary field values
-   */
-   public final BytesRef[] getBinaryValues(String name) {
-     final List<BytesRef> result = new ArrayList<>();
-     for (StorableField field : fields) {
-       if (field.name().equals(name)) {
-         final BytesRef bytes = field.binaryValue();
-         if (bytes != null) {
-           result.add(bytes);
-         }
-       }
-     }
-   
-     return result.toArray(new BytesRef[result.size()]);
-   }
-   
-   /**
-   * Returns an array of bytes for the first (or only) field that has the name
-   * specified as the method parameter. This method will return <code>null</code>
-   * if no binary fields with the specified name are available.
-   * There may be non-binary fields with the same name.
-   *
-   * @param name the name of the field.
-   * @return a <code>BytesRef</code> containing the binary field value or <code>null</code>
-   */
-   public final BytesRef getBinaryValue(String name) {
-     for (StorableField field : fields) {
-       if (field.name().equals(name)) {
-         final BytesRef bytes = field.binaryValue();
-         if (bytes != null) {
-           return bytes;
-         }
-       }
-     }
-     return null;
-   }
-   private final static String[] NO_STRINGS = new String[0];
-  
-   /**
-    * Returns an array of values of the field specified as the method parameter.
-    * This method returns an empty array when there are no
-    * matching fields.  It never returns null.
-    * For {@link IntField}, {@link LongField}, {@link
-    * FloatField} and {@link DoubleField} it returns the string value of the number. If you want
-    * the actual numeric field instances back, use {@link #getFields}.
-    * @param name the name of the field
-    * @return a <code>String[]</code> of field values
-    */
-   public final String[] getValues(String name) {
-     List<String> result = new ArrayList<>();
-     for (StorableField field : fields) {
-       if (field.name().equals(name) && field.stringValue() != null) {
-         result.add(field.stringValue());
-       }
-     }
-     
-     if (result.size() == 0) {
-       return NO_STRINGS;
-     }
-     
-     return result.toArray(new String[result.size()]);
-   }
-  
-   /** Returns the string value of the field with the given name if any exist in
-    * this document, or null.  If multiple fields exist with this name, this
-    * method returns the first value added. If only binary fields with this name
-    * exist, returns null.
-    * For {@link IntField}, {@link LongField}, {@link
-    * FloatField} and {@link DoubleField} it returns the string value of the number. If you want
-    * the actual numeric field instance back, use {@link #getField}.
-    */
-   public final String get(String name) {
-     for (StorableField field : fields) {
-       if (field.name().equals(name) && field.stringValue() != null) {
-         return field.stringValue();
-       }
-     }
-     return null;
-   }
-
-  /** Prints the fields of a document for human consumption. */
-  @Override
-  public final String toString() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append("StoredDocument<");
-    for (int i = 0; i < fields.size(); i++) {
-      StorableField field = fields.get(i);
-      buffer.append(field.toString());
-      if (i != fields.size()-1)
-        buffer.append(" ");
-    }
-    buffer.append(">");
-    return buffer.toString();
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java b/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java
index 752c245..29e774f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StoredFieldVisitor.java
@@ -19,8 +19,7 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DocumentStoredFieldVisitor;
+import org.apache.lucene.document.DocumentStoredFieldVisitor; // javadocs
 
 /**
  * Expert: provides a low-level means of accessing the stored field
@@ -33,7 +32,7 @@
  * fields for most codecs is not reeentrant and you will see
  * strange exceptions as a result.
  *
- * <p>See {@link DocumentStoredFieldVisitor}, which is a
+ * <p>See {@link Document2StoredFieldVisitor}, which is a
  * <code>StoredFieldVisitor</code> that builds the
  * {@link Document} containing all stored fields.  This is
  * used by {@link IndexReader#document(int)}.
@@ -57,6 +56,9 @@
   public void stringField(FieldInfo fieldInfo, String value) throws IOException {
   }
 
+  // TODO: simplify this API; stored fields can just deal with long and FieldTypes instead of having to implement its own
+  // per-document-per-field baby schema here:
+
   /** Process a int numeric field. */
   public void intField(FieldInfo fieldInfo, int value) throws IOException {
   }
@@ -93,4 +95,4 @@
     /** STOP: don't visit this field and stop processing any other fields for this document. */
     STOP
   }
-}
\ No newline at end of file
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermContext.java b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
index ada4fc1..47478f1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.Arrays;
 
+import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.util.BytesRef;
 
 /**
@@ -165,4 +166,30 @@
   public void setDocFreq(int docFreq) {
     this.docFreq = docFreq;
   }
-}
\ No newline at end of file
+
+  /** Returns true if all terms stored here are real (e.g., not auto-prefix terms).
+   *
+   *  @lucene.internal */
+  public boolean hasOnlyRealTerms() {
+    for(TermState termState : states) {
+      if (termState instanceof BlockTermState && ((BlockTermState) termState).isRealTerm == false) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("TermContext\n");
+    for(TermState termState : states) {
+      sb.append("  state=");
+      sb.append(termState.toString());
+      sb.append('\n');
+    }
+
+    return sb.toString();
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
index f211720..a7343b2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java
@@ -89,6 +89,7 @@
 
       // Get BytesRef
       termBytePool.setBytesRef(flushTerm, postings.textStarts[termID]);
+
       tv.startTerm(flushTerm, freq);
       
       if (doVectorPositions || doVectorOffsets) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/Terms.java b/lucene/core/src/java/org/apache/lucene/index/Terms.java
index a3109af..99195e2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Terms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/Terms.java
@@ -19,6 +19,9 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
+import org.apache.lucene.search.PrefixTermsEnum;
+import org.apache.lucene.search.TermRangeTermsEnum;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
@@ -42,17 +45,22 @@
    *  implementation can do so. */
   public abstract TermsEnum iterator(TermsEnum reuse) throws IOException;
 
-  /** Returns a TermsEnum that iterates over all terms that
-   *  are accepted by the provided {@link
+  /** Returns a TermsEnum that iterates over all terms and
+   *  documents that are accepted by the provided {@link
    *  CompiledAutomaton}.  If the <code>startTerm</code> is
-   *  provided then the returned enum will only accept terms
+   *  provided then the returned enum will only return terms
    *  {@code > startTerm}, but you still must call
    *  next() first to get to the first term.  Note that the
    *  provided <code>startTerm</code> must be accepted by
    *  the automaton.
    *
-   * <p><b>NOTE</b>: the returned TermsEnum cannot
-   * seek</p>. */
+   *  <p><b>NOTE</b>: the returned TermsEnum cannot seek</p>.
+   *
+   *  <p><b>NOTE</b>: the terms dictionary is free to
+   *  return arbitrary terms as long as the resulted visited
+   *  docs is the same.  E.g., {@link BlockTreeTermsWriter}
+   *  creates auto-prefix terms during indexing to reduce the
+   *  number of terms visited. */
   public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException {
     
     // TODO: could we factor out a common interface b/w
@@ -64,13 +72,35 @@
     // TODO: eventually we could support seekCeil/Exact on
     // the returned enum, instead of only being able to seek
     // at the start
+
+    TermsEnum termsEnum = iterator(null);
+
+    if (compiled.type == CompiledAutomaton.AUTOMATON_TYPE.RANGE) {
+      if (startTerm != null) {
+        throw new IllegalArgumentException("cannot intersect RANGE with startTerm");
+      }
+      return new TermRangeTermsEnum(termsEnum,
+                                    compiled.term,
+                                    compiled.maxTerm,
+                                    compiled.minInclusive,
+                                    compiled.maxInclusive);
+    }
+    
+    if (compiled.type == CompiledAutomaton.AUTOMATON_TYPE.PREFIX) {
+      if (startTerm != null) {
+        throw new IllegalArgumentException("cannot intersect PREFIX with startTerm");
+      }
+      return new PrefixTermsEnum(termsEnum, compiled.term);
+    }
+
     if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
       throw new IllegalArgumentException("please use CompiledAutomaton.getTermsEnum instead");
     }
+
     if (startTerm == null) {
-      return new AutomatonTermsEnum(iterator(null), compiled);
+      return new AutomatonTermsEnum(termsEnum, compiled);
     } else {
-      return new AutomatonTermsEnum(iterator(null), compiled) {
+      return new AutomatonTermsEnum(termsEnum, compiled) {
         @Override
         protected BytesRef nextSeekTerm(BytesRef term) throws IOException {
           if (term == null) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
index 4c51e5c..0dc054f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
@@ -18,14 +18,15 @@
  */
 
 import java.io.IOException;
+import java.util.Comparator;
 
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash.BytesStartArray;
 import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.Counter;
 import org.apache.lucene.util.IntBlockPool;
-import org.apache.lucene.util.BytesRefHash.BytesStartArray;
 
 abstract class TermsHashPerField implements Comparable<TermsHashPerField> {
   private static final int HASH_INIT_SIZE = 4;
@@ -50,6 +51,8 @@
 
   final BytesRefHash bytesHash;
 
+  int maxTermLength;
+
   ParallelPostingsArray postingsArray;
   private final Counter bytesUsed;
 
@@ -156,6 +159,7 @@
     //System.out.println("add term=" + termBytesRef.utf8ToString() + " doc=" + docState.docID + " termID=" + termID);
 
     if (termID >= 0) {// New posting
+      maxTermLength = Math.max(maxTermLength, termBytesRef.length);
       bytesHash.byteStart(termID);
       // Init stream slices
       if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
index d1972cd..2afcfff 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
@@ -20,7 +20,8 @@
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.ControlledRealTimeReopenThread; // javadocs
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.Directory;
@@ -47,10 +48,18 @@
     this.writer = writer;
   }
 
+  public Document newDocument() {
+    return writer.newDocument();
+  }
+
+  public FieldTypes getFieldTypes() {
+    return writer.getFieldTypes();
+  }
+
   /** Calls {@link
-   *  IndexWriter#updateDocument(Term,IndexDocument)} and
+   *  IndexWriter#updateDocument(Term,Iterable)} and
    *  returns the generation that reflects this change. */
-  public long updateDocument(Term t, IndexDocument d) throws IOException {
+  public long updateDocument(Term t, Iterable<? extends IndexableField> d) throws IOException {
     writer.updateDocument(t, d);
     // Return gen as of when indexing finished:
     return indexingGen.get();
@@ -59,7 +68,7 @@
   /** Calls {@link
    *  IndexWriter#updateDocuments(Term,Iterable)} and returns
    *  the generation that reflects this change. */
-  public long updateDocuments(Term t, Iterable<? extends IndexDocument> docs) throws IOException {
+  public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     writer.updateDocuments(t, docs);
     // Return gen as of when indexing finished:
     return indexingGen.get();
@@ -105,9 +114,9 @@
     return indexingGen.get();
   }
 
-  /** Calls {@link IndexWriter#addDocument(IndexDocument)}
+  /** Calls {@link IndexWriter#addDocument(Iterable)}
    *  and returns the generation that reflects this change. */
-  public long addDocument(IndexDocument d) throws IOException {
+  public long addDocument(Iterable<? extends IndexableField> d) throws IOException {
     writer.addDocument(d);
     // Return gen as of when indexing finished:
     return indexingGen.get();
@@ -115,7 +124,7 @@
 
   /** Calls {@link IndexWriter#addDocuments(Iterable)} and
    *  returns the generation that reflects this change. */
-  public long addDocuments(Iterable<? extends IndexDocument> docs) throws IOException {
+  public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     writer.addDocuments(docs);
     // Return gen as of when indexing finished:
     return indexingGen.get();
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java b/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
index 311995c..f61d020 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
@@ -53,6 +53,32 @@
     }
   };
 
+  /** A full {@code DocIdSet} instance (matches all docs). */
+  // TODO: does this already exist somewhere?
+  public static DocIdSet full(final int maxDoc) {
+    return new DocIdSet() {
+      @Override
+      public DocIdSetIterator iterator() {
+        return DocIdSetIterator.full(maxDoc);
+      }
+    
+      @Override
+      public boolean isCacheable() {
+        return true;
+      }
+    
+      @Override
+      public Bits bits() {
+        return new Bits.MatchAllBits(maxDoc);
+      }
+
+      @Override
+      public long ramBytesUsed() {
+        return 0L;
+      }
+    };
+  };
+
   /** Provides a {@link DocIdSetIterator} to access the set.
    * This implementation can return <code>null</code> if there
    * are no docs that match. */
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
index 55df0db..2f99c42 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
@@ -58,6 +58,44 @@
       }
     };
   }
+
+  // TODO: does this already exist somewhere?  it's silly...
+  /** A full {@code DocIdSetIterator} instance */
+  public static final DocIdSetIterator full(final int maxDoc) {
+    return new DocIdSetIterator() {
+      int docID = -1;
+      
+      @Override
+      public int advance(int target) {
+        assert target >= 0;
+        if (target >= maxDoc) {
+          docID = NO_MORE_DOCS;
+        } else {
+          docID = target;
+        }
+        return docID;
+      }
+      
+      @Override
+      public int docID() {
+        return docID;
+      }
+
+      @Override
+      public int nextDoc() {
+        docID++;
+        if (docID >= maxDoc) {
+          docID = NO_MORE_DOCS;
+        }
+        return docID;
+      }
+      
+      @Override
+      public long cost() {
+        return 0;
+      }
+    };
+  }
   
   /**
    * When returned by {@link #nextDoc()}, {@link #advance(int)} and
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
index 03eb14c..eb850ba 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesRangeFilter.java
@@ -18,16 +18,14 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.DoubleField; // for javadocs
-import org.apache.lucene.document.FloatField; // for javadocs
-import org.apache.lucene.document.IntField; // for javadocs
-import org.apache.lucene.document.LongField; // for javadocs
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.HalfFloat;
 import org.apache.lucene.util.NumericUtils;
 
 /**
@@ -69,13 +67,15 @@
   final T upperVal;
   final boolean includeLower;
   final boolean includeUpper;
-  
-  private DocValuesRangeFilter(String field, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
+  final String desc;
+
+  private DocValuesRangeFilter(String field, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper, String desc) {
     this.field = field;
     this.lowerVal = lowerVal;
     this.upperVal = upperVal;
     this.includeLower = includeLower;
     this.includeUpper = includeUpper;
+    this.desc = desc;
   }
   
   /** This method is implemented for each data type */
@@ -87,8 +87,8 @@
    * fields containing zero or one term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static DocValuesRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<String>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<String>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
@@ -144,8 +144,8 @@
    * of the values to <code>null</code>.
    */
   // TODO: bogus that newStringRange doesnt share this code... generics hell
-  public static DocValuesRangeFilter<BytesRef> newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<BytesRef>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<BytesRef> newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<BytesRef>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
@@ -200,8 +200,8 @@
    * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static DocValuesRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<Integer>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<Integer>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         final int inclusiveLowerPoint, inclusiveUpperPoint;
@@ -242,8 +242,8 @@
    * long fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static DocValuesRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<Long>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<Long>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         final long inclusiveLowerPoint, inclusiveUpperPoint;
@@ -284,19 +284,19 @@
    * float fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static DocValuesRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<Float>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<Float>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         // we transform the floating point numbers to sortable integers
-        // using NumericUtils to easier find the next bigger/lower value
+        // using Document to easier find the next bigger/lower value
         final float inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
           float f = lowerVal.floatValue();
           if (!includeUpper && f > 0.0f && Float.isInfinite(f))
             return null;
-          int i = NumericUtils.floatToSortableInt(f);
-          inclusiveLowerPoint = NumericUtils.sortableIntToFloat( includeLower ?  i : (i + 1) );
+          int i = NumericUtils.floatToInt(f);
+          inclusiveLowerPoint = NumericUtils.intToFloat(includeLower ? i : (i + 1));
         } else {
           inclusiveLowerPoint = Float.NEGATIVE_INFINITY;
         }
@@ -304,8 +304,8 @@
           float f = upperVal.floatValue();
           if (!includeUpper && f < 0.0f && Float.isInfinite(f))
             return null;
-          int i = NumericUtils.floatToSortableInt(f);
-          inclusiveUpperPoint = NumericUtils.sortableIntToFloat( includeUpper ? i : (i - 1) );
+          int i = NumericUtils.floatToInt(f);
+          inclusiveUpperPoint = NumericUtils.intToFloat(includeUpper ? i : (i - 1));
         } else {
           inclusiveUpperPoint = Float.POSITIVE_INFINITY;
         }
@@ -330,19 +330,19 @@
    * double fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static DocValuesRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeFilter<Double>(field, lowerVal, upperVal, includeLower, includeUpper) {
+  public static DocValuesRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<Double>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
       @Override
       public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
         // we transform the floating point numbers to sortable integers
-        // using NumericUtils to easier find the next bigger/lower value
+        // using Document to easier find the next bigger/lower value
         final double inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
           double f = lowerVal.doubleValue();
           if (!includeUpper && f > 0.0 && Double.isInfinite(f))
             return null;
-          long i = NumericUtils.doubleToSortableLong(f);
-          inclusiveLowerPoint = NumericUtils.sortableLongToDouble( includeLower ?  i : (i + 1L) );
+          long i = NumericUtils.doubleToLong(f);
+          inclusiveLowerPoint = NumericUtils.longToDouble(includeLower ? i : (i + 1L));
         } else {
           inclusiveLowerPoint = Double.NEGATIVE_INFINITY;
         }
@@ -350,8 +350,8 @@
           double f = upperVal.doubleValue();
           if (!includeUpper && f < 0.0 && Double.isInfinite(f))
             return null;
-          long i = NumericUtils.doubleToSortableLong(f);
-          inclusiveUpperPoint = NumericUtils.sortableLongToDouble( includeUpper ? i : (i - 1L) );
+          long i = NumericUtils.doubleToLong(f);
+          inclusiveUpperPoint = NumericUtils.longToDouble(includeUpper ? i : (i - 1L));
         } else {
           inclusiveUpperPoint = Double.POSITIVE_INFINITY;
         }
@@ -372,15 +372,77 @@
     };
   }
   
+  /**
+   * Creates a numeric range filter using {@link org.apache.lucene.index.LeafReader#getNumericDocValues(String)}. This works with all
+   * half-float fields containing exactly one numeric term in the field. The range can be half-open by setting one
+   * of the values to <code>null</code>.
+   */
+  public static DocValuesRangeFilter<Float> newHalfFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper, String desc) {
+    return new DocValuesRangeFilter<Float>(field, lowerVal, upperVal, includeLower, includeUpper, desc) {
+      @Override
+      public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
+        // we transform the floating point numbers to sortable shorts
+        // using NumericUtils to easier find the next bigger/lower value
+
+        final short inclusiveLowerShort;
+        if (lowerVal != null) {
+          float f = lowerVal.floatValue();
+          short s = NumericUtils.halfFloatToShort(f);
+          if (includeLower == false) {
+            if (s == Short.MAX_VALUE) {
+              return null;
+            }
+            s++;
+          }
+          inclusiveLowerShort = s;
+        } else {
+          inclusiveLowerShort = NumericUtils.halfFloatToShort(Float.NEGATIVE_INFINITY);
+        }
+
+        final short inclusiveUpperShort;
+        if (upperVal != null) {
+          float f = upperVal.floatValue();
+          short s = NumericUtils.halfFloatToShort(f);
+          if (includeUpper == false) {
+            if (s == Short.MIN_VALUE) {
+              return null;
+            }
+            s--;
+          }
+          inclusiveUpperShort = s;
+        } else {
+          inclusiveUpperShort = NumericUtils.halfFloatToShort(Float.POSITIVE_INFINITY);
+        }
+        
+        if (inclusiveLowerShort > inclusiveUpperShort) {
+          return null;
+        }
+
+        final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
+        return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
+          @Override
+          protected boolean matchDoc(int doc) {
+            final short value = (short) values.get(doc);
+            return value >= inclusiveLowerShort && value <= inclusiveUpperShort;
+          }
+        };
+      }
+    };
+  }
+  
   @Override
   public final String toString() {
-    final StringBuilder sb = new StringBuilder(field).append(":");
-    return sb.append(includeLower ? '[' : '{')
-      .append((lowerVal == null) ? "*" : lowerVal.toString())
-      .append(" TO ")
-      .append((upperVal == null) ? "*" : upperVal.toString())
-      .append(includeUpper ? ']' : '}')
-      .toString();
+    if (desc == null) {
+      final StringBuilder sb = new StringBuilder(field).append(":");
+      return sb.append(includeLower ? '[' : '{')
+        .append((lowerVal == null) ? "*" : lowerVal.toString())
+        .append(" TO ")
+        .append((upperVal == null) ? "*" : upperVal.toString())
+        .append(includeUpper ? ']' : '}')
+        .toString();
+    } else {
+      return desc;
+    }
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/DoubleComparator.java b/lucene/core/src/java/org/apache/lucene/search/DoubleComparator.java
new file mode 100644
index 0000000..2c02167
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/DoubleComparator.java
@@ -0,0 +1,44 @@
+package org.apache.lucene.search;
+
+import org.apache.lucene.util.NumericUtils;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Parses field's values as double (using {@link
+ *  org.apache.lucene.index.LeafReader#getNumericDocValues} and sorts by ascending value */
+public class DoubleComparator extends NumericComparator<Double> {
+
+  /** 
+   * Creates a new comparator based on {@link Double#compare} for {@code numHits}.
+   * When a document has no value for the field, {@code missingValue} is substituted. 
+   */
+  public DoubleComparator(int numHits, String field, Double missingValue) {
+    super(numHits, field, NumericUtils.doubleToLong(missingValue));
+  }
+
+  @Override
+  protected Double longToValue(long value) {
+    return NumericUtils.longToDouble(value);
+  }
+
+  @Override
+  protected long valueToLong(Double value) {
+    return NumericUtils.doubleToLong(value);
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
index 142b2cd..36937e1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
@@ -28,6 +28,7 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.NumericUtils;
 
 /**
  * Expert: a FieldComparator compares hits so as to determine their
@@ -133,332 +134,6 @@
     }
   }
 
-
-  /**
-   * Base FieldComparator class for numeric types
-   */
-  public static abstract class NumericComparator<T extends Number> extends SimpleFieldComparator<T> {
-    protected final T missingValue;
-    protected final String field;
-    protected Bits docsWithField;
-    protected NumericDocValues currentReaderValues;
-    
-    public NumericComparator(String field, T missingValue) {
-      this.field = field;
-      this.missingValue = missingValue;
-    }
-
-    @Override
-    protected void doSetNextReader(LeafReaderContext context) throws IOException {
-      currentReaderValues = getNumericDocValues(context, field);
-      if (missingValue != null) {
-        docsWithField = DocValues.getDocsWithField(context.reader(), field);
-        // optimization to remove unneeded checks on the bit interface:
-        if (docsWithField instanceof Bits.MatchAllBits) {
-          docsWithField = null;
-        }
-      } else {
-        docsWithField = null;
-      }
-    }
-    
-    /** Retrieves the NumericDocValues for the field in this segment */
-    protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
-      return DocValues.getNumeric(context.reader(), field);
-    }
-  }
-
-  /** Parses field's values as double (using {@link
-   *  org.apache.lucene.index.LeafReader#getNumericDocValues} and sorts by ascending value */
-  public static class DoubleComparator extends NumericComparator<Double> {
-    private final double[] values;
-    private double bottom;
-    private double topValue;
-
-    /** 
-     * Creates a new comparator based on {@link Double#compare} for {@code numHits}.
-     * When a document has no value for the field, {@code missingValue} is substituted. 
-     */
-    public DoubleComparator(int numHits, String field, Double missingValue) {
-      super(field, missingValue);
-      values = new double[numHits];
-    }
-
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Double.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Double.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      values[slot] = v2;
-    }
-    
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public void setTopValue(Double value) {
-      topValue = value;
-    }
-
-    @Override
-    public Double value(int slot) {
-      return Double.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareTop(int doc) {
-      double docValue = Double.longBitsToDouble(currentReaderValues.get(doc));
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Double.compare(topValue, docValue);
-    }
-  }
-
-  /** Parses field's values as float (using {@link
-   *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
-  public static class FloatComparator extends NumericComparator<Float> {
-    private final float[] values;
-    private float bottom;
-    private float topValue;
-
-    /** 
-     * Creates a new comparator based on {@link Float#compare} for {@code numHits}.
-     * When a document has no value for the field, {@code missingValue} is substituted. 
-     */
-    public FloatComparator(int numHits, String field, Float missingValue) {
-      super(field, missingValue);
-      values = new float[numHits];
-    }
-    
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Float.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      // TODO: are there sneaky non-branch ways to compute sign of float?
-      float v2 = Float.intBitsToFloat((int)currentReaderValues.get(doc));
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Float.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      float v2 =  Float.intBitsToFloat((int)currentReaderValues.get(doc));
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      values[slot] = v2;
-    }
-    
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public void setTopValue(Float value) {
-      topValue = value;
-    }
-
-    @Override
-    public Float value(int slot) {
-      return Float.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareTop(int doc) {
-      float docValue = Float.intBitsToFloat((int)currentReaderValues.get(doc));
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Float.compare(topValue, docValue);
-    }
-  }
-
-  /** Parses field's values as int (using {@link
-   *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
-  public static class IntComparator extends NumericComparator<Integer> {
-    private final int[] values;
-    private int bottom;                           // Value of bottom of queue
-    private int topValue;
-
-    /** 
-     * Creates a new comparator based on {@link Integer#compare} for {@code numHits}.
-     * When a document has no value for the field, {@code missingValue} is substituted. 
-     */
-    public IntComparator(int numHits, String field, Integer missingValue) {
-      super(field, missingValue);
-      values = new int[numHits];
-    }
-        
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Integer.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      int v2 = (int) currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Integer.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      int v2 = (int) currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      values[slot] = v2;
-    }
-    
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public void setTopValue(Integer value) {
-      topValue = value;
-    }
-
-    @Override
-    public Integer value(int slot) {
-      return Integer.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareTop(int doc) {
-      int docValue = (int) currentReaderValues.get(doc);
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Integer.compare(topValue, docValue);
-    }
-  }
-
-  /** Parses field's values as long (using {@link
-   *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
-  public static class LongComparator extends NumericComparator<Long> {
-    private final long[] values;
-    private long bottom;
-    private long topValue;
-
-    /** 
-     * Creates a new comparator based on {@link Long#compare} for {@code numHits}.
-     * When a document has no value for the field, {@code missingValue} is substituted. 
-     */
-    public LongComparator(int numHits, String field, Long missingValue) {
-      super(field, missingValue);
-      values = new long[numHits];
-    }
-
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Long.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      // TODO: there are sneaky non-branch ways to compute
-      // -1/+1/0 sign
-      long v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Long.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      long v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      values[slot] = v2;
-    }
-    
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public void setTopValue(Long value) {
-      topValue = value;
-    }
-
-    @Override
-    public Long value(int slot) {
-      return Long.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareTop(int doc) {
-      long docValue = currentReaderValues.get(doc);
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Long.compare(topValue, docValue);
-    }
-  }
-
   /** Sorts by descending relevance.  NOTE: if you are
    *  sorting only by descending relevance and then
    *  secondarily by ascending docID, performance is faster
@@ -967,7 +642,6 @@
 
     @Override
     public int compareValues(BytesRef val1, BytesRef val2) {
-      // missing always sorts first:
       if (val1 == null) {
         if (val2 == null) {
           return 0;
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index 3a6f664..db8fb59 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -61,7 +61,6 @@
     public OneComparatorFieldValueHitQueue(SortField[] fields, int size)
         throws IOException {
       super(fields, size);
-
       assert fields.length == 1;
       oneComparator = comparators[0];
       oneReverseMul = reverseMul[0];
diff --git a/lucene/core/src/java/org/apache/lucene/search/FloatComparator.java b/lucene/core/src/java/org/apache/lucene/search/FloatComparator.java
new file mode 100644
index 0000000..03c645d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/FloatComparator.java
@@ -0,0 +1,44 @@
+package org.apache.lucene.search;
+
+import org.apache.lucene.util.NumericUtils;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Parses field's values as float (using {@link
+ *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
+public class FloatComparator extends NumericComparator<Float> {
+
+  /** 
+   * Creates a new comparator based on {@link Float#compare} for {@code numHits}.
+   * When a document has no value for the field, {@code missingValue} is substituted. 
+   */
+  public FloatComparator(int numHits, String field, Float missingValue) {
+    super(numHits, field, NumericUtils.floatToInt(missingValue));
+  }
+
+  @Override
+  protected Float longToValue(long value) {
+    return NumericUtils.intToFloat((int) value);
+  }
+
+  @Override
+  protected long valueToLong(Float value) {
+    return NumericUtils.floatToInt(value);
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/search/HalfFloatComparator.java b/lucene/core/src/java/org/apache/lucene/search/HalfFloatComparator.java
new file mode 100644
index 0000000..4d7a100
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/HalfFloatComparator.java
@@ -0,0 +1,43 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.NumericUtils;
+
+public class HalfFloatComparator extends NumericComparator<Float> {
+
+  /** 
+   * Creates a new comparator based on {@link Float#compare} for {@code numHits}.
+   * When a document has no value for the field, {@code missingValue} is substituted. 
+   */
+  public HalfFloatComparator(int numHits, String field, float missingValue) {
+    super(numHits, field, NumericUtils.halfFloatToShort(missingValue));
+  }
+    
+  @Override
+  protected Float longToValue(long value) {
+    return NumericUtils.shortToHalfFloat((short) value);
+  }
+
+  @Override
+  protected long valueToLong(Float value) {
+    return NumericUtils.halfFloatToShort(value);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 84df738..1bf92e9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -27,6 +27,8 @@
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader; // javadocs
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
@@ -34,7 +36,6 @@
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.StoredFieldVisitor;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
@@ -175,11 +176,15 @@
     return reader;
   }
 
+  public FieldTypes getFieldTypes() {
+    return reader.getFieldTypes();
+  }
+
   /** 
-   * Sugar for <code>.getIndexReader().document(docID)</code> 
-   * @see IndexReader#document(int) 
+   * Sugar for <code>.getIndexReader().document2(docID)</code> 
+   * @see IndexReader#document2(int) 
    */
-  public StoredDocument doc(int docID) throws IOException {
+  public Document doc(int docID) throws IOException {
     return reader.document(docID);
   }
 
@@ -195,7 +200,7 @@
    * Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code>
    * @see IndexReader#document(int, Set) 
    */
-  public StoredDocument doc(int docID, Set<String> fieldsToLoad) throws IOException {
+  public Document doc(int docID, Set<String> fieldsToLoad) throws IOException {
     return reader.document(docID, fieldsToLoad);
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/IntComparator.java b/lucene/core/src/java/org/apache/lucene/search/IntComparator.java
new file mode 100644
index 0000000..040a0ec
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/IntComparator.java
@@ -0,0 +1,42 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Parses field's values as int (using {@link
+ *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
+public class IntComparator extends NumericComparator<Integer> {
+
+  /** 
+   * Creates a new comparator based on {@link Integer#compare} for {@code numHits}.
+   * When a document has no value for the field, {@code missingValue} is substituted. 
+   */
+  public IntComparator(int numHits, String field, Integer missingValue) {
+    super(numHits, field, missingValue);
+  }
+
+  @Override
+  protected Integer longToValue(long value) {
+    return (int) value;
+  }
+
+  @Override
+  protected long valueToLong(Integer value) {
+    return value.longValue();
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/search/LongComparator.java b/lucene/core/src/java/org/apache/lucene/search/LongComparator.java
new file mode 100644
index 0000000..f39f817
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/LongComparator.java
@@ -0,0 +1,41 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Parses field's values as int (using {@link
+ *  org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
+public class LongComparator extends NumericComparator<Long> {
+
+  /** 
+   * Creates a new comparator based on {@link Integer#compare} for {@code numHits}.
+   * When a document has no value for the field, {@code missingValue} is substituted. 
+   */
+  public LongComparator(int numHits, String field, Long missingValue) {
+    super(numHits, field, missingValue);
+  }
+
+  @Override
+  protected Long longToValue(long value) {
+    return value;
+  }
+
+  @Override
+  protected long valueToLong(Long value) {
+    return value;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/NumericComparator.java b/lucene/core/src/java/org/apache/lucene/search/NumericComparator.java
new file mode 100644
index 0000000..f594c8c
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/NumericComparator.java
@@ -0,0 +1,109 @@
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.util.Bits;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Base FieldComparator class for numeric types
+ */
+public abstract class NumericComparator<T extends Number> extends SimpleFieldComparator<T> {
+  private final long[] values;
+  private final long missingValue;
+  private long bottom;
+  private long topValue;
+  protected final String field;
+  protected Bits docsWithField;
+  protected NumericDocValues currentReaderValues;
+    
+  public NumericComparator(int numHits, String field, long missingValue) {
+    this.field = field;
+    this.values = new long[numHits];
+    this.missingValue = missingValue;
+  }
+
+  @Override
+  public int compare(int slot1, int slot2) {
+    return Long.compare(values[slot1], values[slot2]);
+  }
+
+  private long getDocValue(int doc) {
+    long v = currentReaderValues.get(doc);
+    // Test for v == 0 to save Bits.get method call for
+    // the common case (doc has value and value is non-zero):
+    if (docsWithField != null && v == 0 && !docsWithField.get(doc)) {
+      v = missingValue;
+    }
+    return v;
+  }
+
+  @Override
+  public int compareBottom(int doc) {
+    return Long.compare(bottom, getDocValue(doc));
+  }
+
+  @Override
+  public void copy(int slot, int doc) {
+    values[slot] = getDocValue(doc);
+  }
+    
+  @Override
+  public void setBottom(final int bottom) {
+    this.bottom = values[bottom];
+  }
+
+  @Override
+  public void setTopValue(T value) {
+    topValue = valueToLong(value);
+  }
+
+  @Override
+  public void doSetNextReader(LeafReaderContext context) throws IOException {
+    currentReaderValues = getNumericDocValues(context, field);
+    docsWithField = DocValues.getDocsWithField(context.reader(), field);
+    // optimization to remove unneeded checks on the bit interface:
+    if (docsWithField instanceof Bits.MatchAllBits) {
+      docsWithField = null;
+    }
+  }
+    
+  @Override
+  public int compareTop(int doc) {
+    return Long.compare(topValue, getDocValue(doc));
+  }
+
+  protected abstract T longToValue(long value);
+
+  protected abstract long valueToLong(T value);
+
+  @Override
+  public T value(int slot) {
+    return longToValue(values[slot]);
+  }
+
+  /** Retrieves the NumericDocValues for the field in this segment */
+  protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
+    return DocValues.getNumeric(context.reader(), field);
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java
deleted file mode 100644
index 05453ad..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java
+++ /dev/null
@@ -1,197 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
-import org.apache.lucene.document.DoubleField; // for javadocs
-import org.apache.lucene.document.FloatField; // for javadocs
-import org.apache.lucene.document.IntField; // for javadocs
-import org.apache.lucene.document.LongField; // for javadocs
-import org.apache.lucene.util.NumericUtils; // for javadocs
-
-/**
- * A {@link Filter} that only accepts numeric values within
- * a specified range. To use this, you must first index the
- * numeric values using {@link IntField}, {@link
- * FloatField}, {@link LongField} or {@link DoubleField} (expert: {@link
- * NumericTokenStream}).
- *
- * <p>You create a new NumericRangeFilter with the static
- * factory methods, eg:
- *
- * <pre class="prettyprint">
- * Filter f = NumericRangeFilter.newFloatRange("weight", 0.03f, 0.10f, true, true);
- * </pre>
- *
- * accepts all documents whose float valued "weight" field
- * ranges from 0.03 to 0.10, inclusive.
- * See {@link NumericRangeQuery} for details on how Lucene
- * indexes and searches numeric valued fields.
- *
- * @since 2.9
- **/
-public final class NumericRangeFilter<T extends Number> extends MultiTermQueryWrapperFilter<NumericRangeQuery<T>> {
-
-  private NumericRangeFilter(final NumericRangeQuery<T> query) {
-    super(query);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>long</code>
-   * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Long> newLongRange(final String field, final int precisionStep,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newLongRange(field, precisionStep, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>long</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Long> newLongRange(final String field,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newLongRange(field, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>int</code>
-   * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Integer> newIntRange(final String field, final int precisionStep,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newIntRange(field, precisionStep, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>int</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Integer> newIntRange(final String field,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newIntRange(field, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>double</code>
-   * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Double> newDoubleRange(final String field, final int precisionStep,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newDoubleRange(field, precisionStep, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>double</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Double> newDoubleRange(final String field,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newDoubleRange(field, min, max, minInclusive, maxInclusive)
-    );
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>float</code>
-   * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Float> newFloatRange(final String field, final int precisionStep,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newFloatRange(field, precisionStep, min, max, minInclusive, maxInclusive)
-    );
-  }
-
-  /**
-   * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>float</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeFilter<Float> newFloatRange(final String field,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeFilter<>(
-      NumericRangeQuery.newFloatRange(field, min, max, minInclusive, maxInclusive)
-    );
-  }
-
-  /** Returns <code>true</code> if the lower endpoint is inclusive */
-  public boolean includesMin() { return query.includesMin(); }
-  
-  /** Returns <code>true</code> if the upper endpoint is inclusive */
-  public boolean includesMax() { return query.includesMax(); }
-
-  /** Returns the lower value of this range filter */
-  public T getMin() { return query.getMin(); }
-
-  /** Returns the upper value of this range filter */
-  public T getMax() { return query.getMax(); }
-  
-  /** Returns the precision step. */
-  public int getPrecisionStep() { return query.getPrecisionStep(); }
-  
-}
diff --git a/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java
deleted file mode 100644
index 6736516..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java
+++ /dev/null
@@ -1,529 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
-import org.apache.lucene.document.DoubleField; // for javadocs
-import org.apache.lucene.document.FloatField; // for javadocs
-import org.apache.lucene.document.IntField; // for javadocs
-import org.apache.lucene.document.LongField; // for javadocs
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.ToStringUtils;
-import org.apache.lucene.index.Term; // for javadocs
-
-/**
- * <p>A {@link Query} that matches numeric values within a
- * specified range.  To use this, you must first index the
- * numeric values using {@link IntField}, {@link
- * FloatField}, {@link LongField} or {@link DoubleField} (expert: {@link
- * NumericTokenStream}).  If your terms are instead textual,
- * you should use {@link TermRangeQuery}.  {@link
- * NumericRangeFilter} is the filter equivalent of this
- * query.</p>
- *
- * <p>You create a new NumericRangeQuery with the static
- * factory methods, eg:
- *
- * <pre class="prettyprint">
- * Query q = NumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
- * </pre>
- *
- * matches all documents whose float valued "weight" field
- * ranges from 0.03 to 0.10, inclusive.
- *
- * <p>The performance of NumericRangeQuery is much better
- * than the corresponding {@link TermRangeQuery} because the
- * number of terms that must be searched is usually far
- * fewer, thanks to trie indexing, described below.</p>
- *
- * <p>You can optionally specify a <a
- * href="#precisionStepDesc"><code>precisionStep</code></a>
- * when creating this query.  This is necessary if you've
- * changed this configuration from its default (4) during
- * indexing.  Lower values consume more disk space but speed
- * up searching.  Suitable values are between <b>1</b> and
- * <b>8</b>. A good starting point to test is <b>4</b>,
- * which is the default value for all <code>Numeric*</code>
- * classes.  See <a href="#precisionStepDesc">below</a> for
- * details.
- *
- * <p>This query defaults to {@linkplain
- * MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}.
- * With precision steps of &le;4, this query can be run with
- * one of the BooleanQuery rewrite methods without changing
- * BooleanQuery's default max clause count.
- *
- * <br><h3>How it works</h3>
- *
- * <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
- * where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
- *
- * <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
- * <em>Generic XML-based Framework for Metadata Portals.</em>
- * Computers &amp; Geosciences 34 (12), 1947-1955.
- * <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
- * target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
- *
- * <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
- * search engine and not a conventional database, it cannot handle numerical ranges
- * (e.g., field value is inside user defined bounds, even dates are numerical values).
- * We have developed an extension to Apache Lucene that stores
- * the numerical values in a special string-encoded format with variable precision
- * (all numerical values like doubles, longs, floats, and ints are converted to
- * lexicographic sortable string representations and stored with different precisions
- * (for a more detailed description of how the values are stored,
- * see {@link NumericUtils}). A range is then divided recursively into multiple intervals for searching:
- * The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
- * while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
- *
- * <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
- * uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
- * lowest precision. Overall, a range could consist of a theoretical maximum of
- * <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
- * 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
- * because it would always be possible to reduce the full 256 values to one term with degraded precision).
- * In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
- * and a uniform value distribution).</p>
- *
- * <a name="precisionStepDesc"><h3>Precision Step</h3>
- * <p>You can choose any <code>precisionStep</code> when encoding values.
- * Lower step values mean more precisions and so more terms in index (and index gets larger). The number
- * of indexed terms per value is (those are generated by {@link NumericTokenStream}):
- * <p style="font-family:serif">
- * &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
- * </p>
- * As the lower precision terms are shared by many values, the additional terms only
- * slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
- * impact on the postings (the postings file will have  more entries, as every document is linked to
- * <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
- * of the term dictionary in comparison to one term per value:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}" />
- * </p>
- * <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
- * which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
- * executing the query is:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)" />
- * </p>
- * <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
- * step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
- * in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
- * be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
- * using a multiple of the original step value.</p>
- *
- * <p>Good values for <code>precisionStep</code> are depending on usage and data type:
- * <ul>
- *  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
- *  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
- *  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
- *  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
- *  fair to use {@link Integer#MAX_VALUE} (see below).
- *  <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
- *  per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
- *  to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
- *  <code>precisionStep</code>). Using {@link IntField},
- *  {@link LongField}, {@link FloatField} or {@link DoubleField} for sorting
- *  is ideal, because building the field cache is much faster than with text-only numbers.
- *  These fields have one term per value and therefore also work with term enumeration for building distinct lists
- *  (e.g. facets / preselected values to search for).
- *  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
- * </ul>
- *
- * <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
- * that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
- * took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
- * and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
- * precision step). This query type was developed for a geographic portal, where the performance for
- * e.g. bounding boxes or exact date/time stamps is important.</p>
- *
- * @since 2.9
- **/
-public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
-
-  private NumericRangeQuery(final String field, final int precisionStep, final NumericType dataType,
-    T min, T max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    super(field);
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    this.dataType = dataType;
-    this.min = min;
-    this.max = max;
-    this.minInclusive = minInclusive;
-    this.maxInclusive = maxInclusive;
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, precisionStep, NumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Long> newLongRange(final String field,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, precisionStep, NumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Integer> newIntRange(final String field,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT_32, NumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, precisionStep, NumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Double> newDoubleRange(final String field,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, precisionStep, NumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static NumericRangeQuery<Float> newFloatRange(final String field,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT_32, NumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-
-  @Override @SuppressWarnings("unchecked")
-  protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
-    // very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
-    if (min != null && max != null && ((Comparable<T>) min).compareTo(max) > 0) {
-      return TermsEnum.EMPTY;
-    }
-    return new NumericRangeTermsEnum(terms.iterator(null));
-  }
-
-  /** Returns <code>true</code> if the lower endpoint is inclusive */
-  public boolean includesMin() { return minInclusive; }
-  
-  /** Returns <code>true</code> if the upper endpoint is inclusive */
-  public boolean includesMax() { return maxInclusive; }
-
-  /** Returns the lower value of this range query */
-  public T getMin() { return min; }
-
-  /** Returns the upper value of this range query */
-  public T getMax() { return max; }
-  
-  /** Returns the precision step. */
-  public int getPrecisionStep() { return precisionStep; }
-  
-  @Override
-  public String toString(final String field) {
-    final StringBuilder sb = new StringBuilder();
-    if (!getField().equals(field)) sb.append(getField()).append(':');
-    return sb.append(minInclusive ? '[' : '{')
-      .append((min == null) ? "*" : min.toString())
-      .append(" TO ")
-      .append((max == null) ? "*" : max.toString())
-      .append(maxInclusive ? ']' : '}')
-      .append(ToStringUtils.boost(getBoost()))
-      .toString();
-  }
-
-  @Override
-  @SuppressWarnings({"unchecked","rawtypes"})
-  public final boolean equals(final Object o) {
-    if (o==this) return true;
-    if (!super.equals(o))
-      return false;
-    if (o instanceof NumericRangeQuery) {
-      final NumericRangeQuery q=(NumericRangeQuery)o;
-      return (
-        (q.min == null ? min == null : q.min.equals(min)) &&
-        (q.max == null ? max == null : q.max.equals(max)) &&
-        minInclusive == q.minInclusive &&
-        maxInclusive == q.maxInclusive &&
-        precisionStep == q.precisionStep
-      );
-    }
-    return false;
-  }
-
-  @Override
-  public final int hashCode() {
-    int hash = super.hashCode();
-    hash += precisionStep^0x64365465;
-    if (min != null) hash += min.hashCode()^0x14fa55fb;
-    if (max != null) hash += max.hashCode()^0x733fa5fe;
-    return hash +
-      (Boolean.valueOf(minInclusive).hashCode()^0x14fa55fb)+
-      (Boolean.valueOf(maxInclusive).hashCode()^0x733fa5fe);
-  }
-
-  // members (package private, to be also fast accessible by NumericRangeTermEnum)
-  final int precisionStep;
-  final NumericType dataType;
-  final T min, max;
-  final boolean minInclusive,maxInclusive;
-
-  // used to handle float/double infinity correcty
-  static final long LONG_NEGATIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
-  static final long LONG_POSITIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
-  static final int INT_NEGATIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
-  static final int INT_POSITIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
-
-  /**
-   * Subclass of FilteredTermsEnum for enumerating all terms that match the
-   * sub-ranges for trie range queries, using flex API.
-   * <p>
-   * WARNING: This term enumeration is not guaranteed to be always ordered by
-   * {@link Term#compareTo}.
-   * The ordering depends on how {@link NumericUtils#splitLongRange} and
-   * {@link NumericUtils#splitIntRange} generates the sub-ranges. For
-   * {@link MultiTermQuery} ordering is not relevant.
-   */
-  private final class NumericRangeTermsEnum extends FilteredTermsEnum {
-
-    private BytesRef currentLowerBound, currentUpperBound;
-
-    private final LinkedList<BytesRef> rangeBounds = new LinkedList<>();
-
-    NumericRangeTermsEnum(final TermsEnum tenum) {
-      super(tenum);
-      switch (dataType) {
-        case LONG:
-        case DOUBLE: {
-          // lower
-          long minBound;
-          if (dataType == NumericType.LONG) {
-            minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
-          } else {
-            assert dataType == NumericType.DOUBLE;
-            minBound = (min == null) ? LONG_NEGATIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(min.doubleValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Long.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          long maxBound;
-          if (dataType == NumericType.LONG) {
-            maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
-          } else {
-            assert dataType == NumericType.DOUBLE;
-            maxBound = (max == null) ? LONG_POSITIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(max.doubleValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Long.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        case INT:
-        case FLOAT: {
-          // lower
-          int minBound;
-          if (dataType == NumericType.INT) {
-            minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
-          } else {
-            assert dataType == NumericType.FLOAT;
-            minBound = (min == null) ? INT_NEGATIVE_INFINITY
-              : NumericUtils.floatToSortableInt(min.floatValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Integer.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          int maxBound;
-          if (dataType == NumericType.INT) {
-            maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
-          } else {
-            assert dataType == NumericType.FLOAT;
-            maxBound = (max == null) ? INT_POSITIVE_INFINITY
-              : NumericUtils.floatToSortableInt(max.floatValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Integer.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        default:
-          // should never happen
-          throw new IllegalArgumentException("Invalid NumericType");
-      }
-    }
-    
-    private void nextRange() {
-      assert rangeBounds.size() % 2 == 0;
-
-      currentLowerBound = rangeBounds.removeFirst();
-      assert currentUpperBound == null || currentUpperBound.compareTo(currentLowerBound) <= 0 :
-        "The current upper bound must be <= the new lower bound";
-      
-      currentUpperBound = rangeBounds.removeFirst();
-    }
-    
-    @Override
-    protected final BytesRef nextSeekTerm(BytesRef term) {
-      while (rangeBounds.size() >= 2) {
-        nextRange();
-        
-        // if the new upper bound is before the term parameter, the sub-range is never a hit
-        if (term != null && term.compareTo(currentUpperBound) > 0)
-          continue;
-        // never seek backwards, so use current term if lower bound is smaller
-        return (term != null && term.compareTo(currentLowerBound) > 0) ?
-          term : currentLowerBound;
-      }
-      
-      // no more sub-range enums available
-      assert rangeBounds.isEmpty();
-      currentLowerBound = currentUpperBound = null;
-      return null;
-    }
-    
-    @Override
-    protected final AcceptStatus accept(BytesRef term) {
-      while (currentUpperBound == null || term.compareTo(currentUpperBound) > 0) {
-        if (rangeBounds.isEmpty())
-          return AcceptStatus.END;
-        // peek next sub-range, only seek if the current term is smaller than next lower bound
-        if (term.compareTo(rangeBounds.getFirst()) < 0)
-          return AcceptStatus.NO_AND_SEEK;
-        // step forward to next range without seeking, as next lower range bound is less or equal current term
-        nextRange();
-      }
-      return AcceptStatus.YES;
-    }
-
-  }
-  
-}
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index bf5a373..08caf6f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -83,8 +83,9 @@
    */
   public void add(Term term) {
     int position = 0;
-    if(positions.size() > 0)
-        position = positions.get(positions.size()-1).intValue() + 1;
+    if (positions.size() > 0) {
+      position = positions.get(positions.size()-1).intValue() + 1;
+    }
 
     add(term, position);
   }
@@ -117,10 +118,10 @@
    * Returns the relative positions of terms in this phrase.
    */
   public int[] getPositions() {
-      int[] result = new int[positions.size()];
-      for(int i = 0; i < positions.size(); i++)
-          result[i] = positions.get(i).intValue();
-      return result;
+    int[] result = new int[positions.size()];
+    for(int i = 0; i < positions.size(); i++)
+      result[i] = positions.get(i).intValue();
+    return result;
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/PrefixQuery.java b/lucene/core/src/java/org/apache/lucene/search/PrefixQuery.java
index 84bdca2..f3c9389 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PrefixQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PrefixQuery.java
@@ -19,11 +19,12 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.ToStringUtils;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 
 /** A Query that matches documents containing terms with a specified prefix. A PrefixQuery
  * is built by QueryParser for input like <code>app*</code>.
@@ -31,13 +32,16 @@
  * <p>This query uses the {@link
  * MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
  * rewrite method. */
+
 public class PrefixQuery extends MultiTermQuery {
-  private Term prefix;
+  private final Term prefix;
+  private final CompiledAutomaton compiled;
 
   /** Constructs a query for terms starting with <code>prefix</code>. */
   public PrefixQuery(Term prefix) {
     super(prefix.field());
     this.prefix = prefix;
+    this.compiled = new CompiledAutomaton(prefix.bytes());
   }
 
   /** Returns the prefix of this query. */
@@ -45,13 +49,11 @@
   
   @Override  
   protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
-    TermsEnum tenum = terms.iterator(null);
-    
     if (prefix.bytes().length == 0) {
       // no prefix -- match all terms for this field:
-      return tenum;
+      return terms.iterator(null);
     }
-    return new PrefixTermsEnum(tenum, prefix.bytes());
+    return compiled.getTermsEnum(terms);
   }
 
   /** Prints a user-readable version of this query. */
diff --git a/lucene/core/src/java/org/apache/lucene/search/Query.java b/lucene/core/src/java/org/apache/lucene/search/Query.java
index 92dc692..f9d7410 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Query.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Query.java
@@ -36,7 +36,6 @@
     <li> {@link FuzzyQuery}
     <li> {@link RegexpQuery}
     <li> {@link TermRangeQuery}
-    <li> {@link NumericRangeQuery}
     <li> {@link ConstantScoreQuery}
     <li> {@link DisjunctionMaxQuery}
     <li> {@link MatchAllDocsQuery}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java b/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
index 47d9740..a915304 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ScoringRewrite.java
@@ -18,19 +18,20 @@
  */
 
 import java.io.IOException;
+
+import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.MultiTermQuery.RewriteMethod;
-
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 
 /** 
  * Base rewrite method that translates each term into a query, and keeps
@@ -112,7 +113,7 @@
       for (int i = 0; i < size; i++) {
         final int pos = sort[i];
         final Term term = new Term(query.getField(), col.terms.get(pos, new BytesRef()));
-        assert reader.docFreq(term) == termStates[pos].docFreq();
+        assert termStates[pos].hasOnlyRealTerms() == false || reader.docFreq(term) == termStates[pos].docFreq();
         addClause(result, term, termStates[pos].docFreq(), query.getBoost() * boost[pos], termStates[pos]);
       }
     }
@@ -137,7 +138,7 @@
       final int e = terms.add(bytes);
       final TermState state = termsEnum.termState();
       assert state != null; 
-      if (e < 0 ) {
+      if (e < 0) {
         // duplicate term: update docFreq
         final int pos = (-e)-1;
         array.termState[pos].register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortField.java b/lucene/core/src/java/org/apache/lucene/search/SortField.java
index daac17c..346a723 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortField.java
@@ -34,6 +34,8 @@
  */
 public class SortField {
 
+  // TODO: remove this eventually (FieldTypes knows the type):
+
   /**
    * Specifies the type of the terms to be sorted, or special types such as CUSTOM
    */
@@ -146,8 +148,8 @@
       if (missingValue != STRING_FIRST && missingValue != STRING_LAST) {
         throw new IllegalArgumentException("For STRING type, missing value must be either STRING_FIRST or STRING_LAST");
       }
-    } else if (type != Type.INT && type != Type.FLOAT && type != Type.LONG && type != Type.DOUBLE) {
-      throw new IllegalArgumentException("Missing value only works for numeric or STRING types");
+    } else if (type != Type.INT && type != Type.FLOAT && type != Type.LONG && type != Type.DOUBLE && type != Type.CUSTOM) {
+      throw new IllegalArgumentException("Missing value only works for numeric or STRING or CUSTOM types");
     }
     this.missingValue = missingValue;
   }
@@ -332,16 +334,16 @@
       return new FieldComparator.DocComparator(numHits);
 
     case INT:
-      return new FieldComparator.IntComparator(numHits, field, (Integer) missingValue);
+      return new IntComparator(numHits, field, missingValue == null ? 0 : (Integer) missingValue);
 
     case FLOAT:
-      return new FieldComparator.FloatComparator(numHits, field, (Float) missingValue);
+      return new FloatComparator(numHits, field, missingValue == null ? 0 : (Float) missingValue);
 
     case LONG:
-      return new FieldComparator.LongComparator(numHits, field, (Long) missingValue);
+      return new LongComparator(numHits, field, missingValue == null ? 0 : (Long) missingValue);
 
     case DOUBLE:
-      return new FieldComparator.DoubleComparator(numHits, field, (Double) missingValue);
+      return new DoubleComparator(numHits, field, missingValue == null ? 0 : (Double) missingValue);
 
     case CUSTOM:
       assert comparatorSource != null;
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSelector.java b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSelector.java
index e3f275a..f91403b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSelector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSelector.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
@@ -49,13 +50,7 @@
    * Wraps a multi-valued SortedNumericDocValues as a single-valued view, using the specified selector 
    * and numericType.
    */
-  public static NumericDocValues wrap(SortedNumericDocValues sortedNumeric, Type selector, SortField.Type numericType) {
-    if (numericType != SortField.Type.INT &&
-        numericType != SortField.Type.LONG && 
-        numericType != SortField.Type.FLOAT &&
-        numericType != SortField.Type.DOUBLE) {
-      throw new IllegalArgumentException("numericType must be a numeric type");
-    }
+  public static NumericDocValues wrap(SortedNumericDocValues sortedNumeric, Type selector) {
     final NumericDocValues view;
     NumericDocValues singleton = DocValues.unwrapSingleton(sortedNumeric);
     if (singleton != null) {
@@ -75,25 +70,8 @@
           throw new AssertionError();
       }
     }
-    // undo the numericutils sortability
-    switch(numericType) {
-      case FLOAT:
-        return new NumericDocValues() {
-          @Override
-          public long get(int docID) {
-            return NumericUtils.sortableFloatBits((int) view.get(docID));
-          }
-        };
-      case DOUBLE:
-        return new NumericDocValues() {
-          @Override
-          public long get(int docID) {
-            return NumericUtils.sortableDoubleBits(view.get(docID));
-          }
-        };
-      default:
-        return view;
-    }
+
+    return view;
   }
   
   /** Wraps a SortedNumericDocValues and returns the first value (min) */
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
index eca8ab6..df7c7e3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
@@ -135,31 +135,31 @@
   public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
     switch(type) {
       case INT:
-        return new FieldComparator.IntComparator(numHits, getField(), (Integer) missingValue) {
+        return new IntComparator(numHits, getField(), missingValue == null ? 0 : (Integer) missingValue) {
           @Override
           protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
-            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type);
+            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector);
           } 
         };
       case FLOAT:
-        return new FieldComparator.FloatComparator(numHits, getField(), (Float) missingValue) {
+        return new FloatComparator(numHits, getField(), missingValue == null ? 0 : (Float) missingValue) {
           @Override
           protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
-            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type);
+            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector);
           } 
         };
       case LONG:
-        return new FieldComparator.LongComparator(numHits, getField(), (Long) missingValue) {
+        return new LongComparator(numHits, getField(), missingValue == null ? 0 : (Long) missingValue) {
           @Override
           protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
-            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type);
+            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector);
           }
         };
       case DOUBLE:
-        return new FieldComparator.DoubleComparator(numHits, getField(), (Double) missingValue) {
+        return new DoubleComparator(numHits, getField(), missingValue == null ? 0 : (Double) missingValue) {
           @Override
           protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
-            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type);
+            return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector);
           } 
         };
       default:
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java b/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java
index a382e82..1855274 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedSetSelector.java
@@ -77,7 +77,7 @@
       return new MinValue(sortedSet);
     } else {
       if (sortedSet instanceof RandomAccessOrds == false) {
-        throw new UnsupportedOperationException("codec does not support random access ordinals, cannot use selector: " + selector);
+        throw new UnsupportedOperationException("codec does not support random access ordinals, cannot use selector: " + selector + "; sortedSet=" + sortedSet);
       }
       RandomAccessOrds randomOrds = (RandomAccessOrds) sortedSet;
       switch(selector) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java b/lucene/core/src/java/org/apache/lucene/search/TermFilter.java
similarity index 88%
rename from lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
rename to lucene/core/src/java/org/apache/lucene/search/TermFilter.java
index c075984..c51915d 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermFilter.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.queries;
+package org.apache.lucene.search;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -66,6 +66,14 @@
     if (!termsEnum.seekExact(term.bytes())) {
       return null;
     }
+
+    int maxDoc = context.reader().maxDoc();
+    if (termsEnum.docFreq() == maxDoc) {
+      // Term matches all docs
+      // TODO: is there a better way?  Do we have a BitsDocIdSet that i can just wrap acceptDocs with if it's non-null?
+      return BitsFilteredDocIdSet.wrap(DocIdSet.full(maxDoc), acceptDocs);
+    }
+
     return new DocIdSet() {
       @Override
       public DocIdSetIterator iterator() throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
index 9789a32..4ba0169 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermRangeFilter.java
@@ -1,5 +1,6 @@
 package org.apache.lucene.search;
 
+import org.apache.lucene.document.Document;
 import org.apache.lucene.util.BytesRef;
 
 /*
@@ -25,15 +26,15 @@
  *
  * <p>This filter matches the documents looking for terms that fall into the
  * supplied range according to {@link
- * Byte#compareTo(Byte)},  It is not intended
- * for numerical ranges; use {@link NumericRangeFilter} instead.
+ * Byte#compareTo(Byte)}.
  *
  * <p>If you construct a large number of range filters with different ranges but on the 
  * same field, {@link DocValuesRangeFilter} may have significantly better performance. 
  * @since 2.9
  */
 public class TermRangeFilter extends MultiTermQueryWrapperFilter<TermRangeQuery> {
-    
+  private final String desc;
+
   /**
    * @param fieldName The field this range applies to
    * @param lowerTerm The lower bound on this range
@@ -45,8 +46,15 @@
    *  and includeUpper)
    */
   public TermRangeFilter(String fieldName, BytesRef lowerTerm, BytesRef upperTerm,
-                     boolean includeLower, boolean includeUpper) {
-      super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper));
+                         boolean includeLower, boolean includeUpper) {
+    super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper));
+    this.desc = null;
+  }
+
+  public TermRangeFilter(String fieldName, BytesRef lowerTerm, BytesRef upperTerm,
+                         boolean includeLower, boolean includeUpper, String desc) {
+    super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper));
+    this.desc = desc;
   }
 
   /**
@@ -63,7 +71,7 @@
    * less than or equal to <code>upperTerm</code>.
    */
   public static TermRangeFilter Less(String fieldName, BytesRef upperTerm) {
-      return new TermRangeFilter(fieldName, null, upperTerm, false, true);
+    return new TermRangeFilter(fieldName, null, upperTerm, false, true);
   }
 
   /**
@@ -71,7 +79,7 @@
    * greater than or equal to <code>lowerTerm</code>.
    */
   public static TermRangeFilter More(String fieldName, BytesRef lowerTerm) {
-      return new TermRangeFilter(fieldName, lowerTerm, null, true, false);
+    return new TermRangeFilter(fieldName, lowerTerm, null, true, false);
   }
   
   /** Returns the lower value of this range filter */
@@ -85,4 +93,13 @@
   
   /** Returns <code>true</code> if the upper endpoint is inclusive */
   public boolean includesUpper() { return query.includesUpper(); }
+
+  @Override
+  public String toString() {
+    if (desc == null) {
+      return super.toString();
+    } else {
+      return desc;
+    }
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermRangeQuery.java
index 794d5cd..4a5725c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermRangeQuery.java
@@ -25,14 +25,13 @@
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.ToStringUtils;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 
 /**
  * A Query that matches documents within an range of terms.
  *
  * <p>This query matches the documents looking for terms that fall into the
- * supplied range according to {@link
- * Byte#compareTo(Byte)}. It is not intended
- * for numerical ranges; use {@link NumericRangeQuery} instead.
+ * supplied range according to {@link BytesRef#compareTo(BytesRef)}.
  *
  * <p>This query uses the {@link
  * MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
@@ -41,11 +40,11 @@
  */
 
 public class TermRangeQuery extends MultiTermQuery {
-  private BytesRef lowerTerm;
-  private BytesRef upperTerm;
-  private boolean includeLower;
-  private boolean includeUpper;
-
+  private final BytesRef lowerTerm;
+  private final BytesRef upperTerm;
+  private final boolean includeLower;
+  private final boolean includeUpper;
+  private final CompiledAutomaton compiled;
 
   /**
    * Constructs a query selecting all terms greater/equal than <code>lowerTerm</code>
@@ -75,6 +74,7 @@
     this.upperTerm = upperTerm;
     this.includeLower = includeLower;
     this.includeUpper = includeUpper;
+    this.compiled = new CompiledAutomaton(lowerTerm, lowerTerm == null || includeLower, upperTerm, upperTerm == null || includeUpper);
   }
 
   /**
@@ -101,16 +101,37 @@
   @Override
   protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
     if (lowerTerm != null && upperTerm != null && lowerTerm.compareTo(upperTerm) > 0) {
+      // Matches no terms:
       return TermsEnum.EMPTY;
     }
-    
+
+    if (terms.size() == 0) {
+      // No terms
+      return TermsEnum.EMPTY;
+    }
+
+    BytesRef minTerm = terms.getMin();
+    BytesRef maxTerm = terms.getMax();
+
+    // Optimization: if our range is outside of the range indexed in this segment, skip it:
+    if (upperTerm != null && minTerm.compareTo(upperTerm) > 0) {
+      return TermsEnum.EMPTY;
+    }
+
+    if (lowerTerm != null && maxTerm.compareTo(lowerTerm) < 0) {
+      return TermsEnum.EMPTY;
+    }      
+     
     TermsEnum tenum = terms.iterator(null);
     
     if ((lowerTerm == null || (includeLower && lowerTerm.length == 0)) && upperTerm == null) {
-      return tenum;
+      // Matches all terms:
+      return terms.iterator(null);
     }
-    return new TermRangeTermsEnum(tenum,
-        lowerTerm, upperTerm, includeLower, includeUpper);
+
+    // TODO: we can detect when range matches all terms here, but then how to optimize?  It's best to just let auto-prefix take it?
+
+    return compiled.getTermsEnum(terms);
   }
 
   /** Prints a user-readable version of this query. */
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermStatistics.java b/lucene/core/src/java/org/apache/lucene/search/TermStatistics.java
index 13480ab..255778a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermStatistics.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermStatistics.java
@@ -30,7 +30,7 @@
   
   public TermStatistics(BytesRef term, long docFreq, long totalTermFreq) {
     assert docFreq >= 0;
-    assert totalTermFreq == -1 || totalTermFreq >= docFreq; // #positions must be >= #postings
+    assert totalTermFreq == -1 || totalTermFreq >= docFreq: "totalTermFreq=" + totalTermFreq + " docFreq=" + docFreq; // #positions must be >= #postings
     this.term = term;
     this.docFreq = docFreq;
     this.totalTermFreq = totalTermFreq;
diff --git a/lucene/core/src/java/org/apache/lucene/util/BitSet.java b/lucene/core/src/java/org/apache/lucene/util/BitSet.java
index 2ddb615..c33d3e0 100644
--- a/lucene/core/src/java/org/apache/lucene/util/BitSet.java
+++ b/lucene/core/src/java/org/apache/lucene/util/BitSet.java
@@ -66,7 +66,7 @@
   /** Assert that the current doc is -1. */
   protected final void assertUnpositioned(DocIdSetIterator iter) {
     if (iter.docID() != -1) {
-      throw new IllegalStateException("This operation only works with an unpositioned iterator, got current position = " + iter.docID());
+      throw new IllegalStateException("This operation only works with an unpositioned iterator, got current position=" + iter.docID() + " iter=" + iter);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/Bits.java b/lucene/core/src/java/org/apache/lucene/util/Bits.java
index a5cef3b..22074d3 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Bits.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Bits.java
@@ -43,7 +43,7 @@
   public static class MatchAllBits implements Bits {
     final int len;
     
-    public MatchAllBits( int len ) {
+    public MatchAllBits(int len) {
       this.len = len;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/HalfFloat.java b/lucene/core/src/java/org/apache/lucene/util/HalfFloat.java
new file mode 100644
index 0000000..1ca87af
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/util/HalfFloat.java
@@ -0,0 +1,80 @@
+package org.apache.lucene.util;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Utility class to convert half-precision 16 bit floating-point number according
+ *  to IEEE 754-2008. */
+
+public class HalfFloat {
+
+  // From https://github.com/ata4/ioutils/blob/master/src/info/ata4/io/util/HalfFloat.java, in turn from http://stackoverflow.com/a/6162687
+  private HalfFloat() {
+  }
+
+  public static float shortToFloat(short hbits) {
+    int mant = hbits & 0x03ff;          // 10 bits mantissa
+    int exp = hbits & 0x7c00;           // 5 bits exponent
+    if (exp == 0x7c00) {                // NaN/Inf
+      exp = 0x3fc00;                  // -> NaN/Inf
+    } else if (exp != 0) {              // normalized value
+      exp += 0x1c000;                 // exp - 15 + 127
+      if (mant == 0 && exp > 0x1c400) {  // smooth transition
+        return Float.intBitsToFloat((hbits & 0x8000) << 16
+                                    | exp << 13 | 0x3ff);
+      }
+    } else if (mant != 0) {             // && exp==0 -> subnormal
+      exp = 0x1c400;                  // make it normal
+      do {
+        mant <<= 1;                 // mantissa * 2
+        exp -= 0x400;               // decrease exp by 1
+      } while ((mant & 0x400) == 0);  // while not normal
+      mant &= 0x3ff;                  // discard subnormal bit
+    }                                   // else +/-0 -> +/-0
+    return Float.intBitsToFloat(        // combine all parts
+                                (hbits & 0x8000) << 16      // sign  << ( 31 - 15 )
+                                | (exp | mant) << 13);      // value << ( 23 - 10 )
+  }
+
+  public static short floatToShort(float fval) {
+    int fbits = Float.floatToIntBits(fval);
+    int sign = fbits >>> 16 & 0x8000;           // sign only
+    int val = (fbits & 0x7fffffff) + 0x1000;    // rounded value
+    if (val >= 0x47800000) {                    // might be or become NaN/Inf
+      // avoid Inf due to rounding
+      if ((fbits & 0x7fffffff) >= 0x47800000) { // is or must become NaN/Inf
+        if (val < 0x7f800000) {             // was value but too large
+          return (short) (sign | 0x7c00);           // make it +/-Inf
+        }
+        return (short) (sign | 0x7c00 |              // remains +/-Inf or NaN
+                        (fbits & 0x007fffff) >>> 13); // keep NaN (and Inf) bits
+      }
+      return (short) (sign | 0x7bff);                   // unrounded not quite Inf
+    }
+    if (val >= 0x38800000) {                    // remains normalized value
+      return (short) (sign | val - 0x38000000 >>> 13);  // exp - 127 + 15
+    }
+    if (val < 0x33000000) {                     // too small for subnormal
+      return (short) sign;                            // becomes +/-0
+    }
+    val = (fbits & 0x7fffffff) >>> 23;          // tmp exp for subnormal calc
+    return (short) (sign | ((fbits & 0x7fffff | 0x800000) // add subnormal bit
+                            + (0x800000 >>> val - 102)          // round depending on cut off
+                            >>> 126 - val)); // div by 2^(1-(exp-127+15)) and >> 13 | exp=0
+  }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java b/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
index d2f244d..2eb90f5 100644
--- a/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
+++ b/lucene/core/src/java/org/apache/lucene/util/NumericUtils.java
@@ -18,269 +18,35 @@
  */
 
 import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Arrays;
 
-import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.document.DoubleField; // javadocs
-import org.apache.lucene.document.FloatField; // javadocs
-import org.apache.lucene.document.IntField; // javadocs
-import org.apache.lucene.document.LongField; // javadocs
 import org.apache.lucene.index.FilterLeafReader;
 import org.apache.lucene.index.FilteredTermsEnum;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.NumericRangeFilter;
-import org.apache.lucene.search.NumericRangeQuery; // for javadocs
 
 /**
  * This is a helper class to generate prefix-encoded representations for numerical values
  * and supplies converters to represent float/double values as sortable integers/longs.
  *
- * <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
- * into multiple intervals for searching: The center of the range is searched only with
- * the lowest possible precision in the trie, while the boundaries are matched
- * more exactly. This reduces the number of terms dramatically.
- *
- * <p>This class generates terms to achieve this: First the numerical integer values need to
- * be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
- * and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
- * sortable like the original integer value (even using UTF-8 sort order). Each value is also
- * prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
- * during encoding.
- *
  * <p>To also index floating point numbers, this class supplies two methods to convert them
- * to integer values by changing their bit layout: {@link #doubleToSortableLong},
- * {@link #floatToSortableInt}. You will have no precision loss by
+ * to integer values by changing their bit layout: {@link #doubleToLong},
+ * {@link #floatToInt}. You will have no precision loss by
  * converting floating point numbers to integers and back (only that the integer form
  * is not usable). Other data types like dates can easily converted to longs or ints (e.g.
  * date to long: {@link java.util.Date#getTime}).
  *
- * <p>For easy usage, the trie algorithm is implemented for indexing inside
- * {@link NumericTokenStream} that can index <code>int</code>, <code>long</code>,
- * <code>float</code>, and <code>double</code>. For querying,
- * {@link NumericRangeQuery} and {@link NumericRangeFilter} implement the query part
- * for the same data types.
- *
- * <p>This class can also be used, to generate lexicographically sortable (according to
- * {@link BytesRef#getUTF8SortedAsUTF16Comparator()}) representations of numeric data
- * types for other usages (e.g. sorting).
- *
  * @lucene.internal
  * @since 2.9, API changed non backwards-compliant in 4.0
  */
 public final class NumericUtils {
 
   private NumericUtils() {} // no instance!
-  
-  /**
-   * The default precision step used by {@link LongField},
-   * {@link DoubleField}, {@link NumericTokenStream}, {@link
-   * NumericRangeQuery}, and {@link NumericRangeFilter}.
-   */
-  public static final int PRECISION_STEP_DEFAULT = 16;
-  
-  /**
-   * The default precision step used by {@link IntField} and
-   * {@link FloatField}.
-   */
-  public static final int PRECISION_STEP_DEFAULT_32 = 8;
-  
-  /**
-   * Longs are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_LONG+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_LONG = 0x20;
 
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>long</code> values.
-   * @see #longToPrefixCodedBytes
-   */
-  public static final int BUF_SIZE_LONG = 63/7 + 2;
-
-  /**
-   * Integers are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_INT+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_INT  = 0x60;
-
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>int</code> values.
-   * @see #intToPrefixCodedBytes
-   */
-  public static final int BUF_SIZE_INT = 31/7 + 2;
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link NumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0. 
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
-    longToPrefixCodedBytes(val, shift, bytes);
-  }
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link NumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0.
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
-    intToPrefixCodedBytes(val, shift, bytes);
-  }
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link NumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0.
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void longToPrefixCodedBytes(final long val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..63
-    if ((shift & ~0x3f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
-    }
-    int nChars = (((63-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(BUF_SIZE_LONG);
-    bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
-    long sortableBits = val ^ 0x8000000000000000L;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link NumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0. 
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void intToPrefixCodedBytes(final int val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..31
-    if ((shift & ~0x1f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
-    }
-    int nChars = (((31-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(NumericUtils.BUF_SIZE_LONG);  // use the max
-    bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
-    int sortableBits = val ^ 0x80000000;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-
-  /**
-   * Returns the shift value from a prefix encoded {@code long}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedLongShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
-    if (shift > 63 || shift < 0)
-      throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns the shift value from a prefix encoded {@code int}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedIntShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_INT;
-    if (shift > 31 || shift < 0)
-      throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns a long from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #longToPrefixCodedBytes
-   */
-  public static long prefixCodedToLong(final BytesRef val) {
-    long sortableBits = 0L;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
-  }
-
-  /**
-   * Returns an int from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #intToPrefixCodedBytes
-   */
-  public static int prefixCodedToInt(final BytesRef val) {
-    int sortableBits = 0;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
-  }
-
-  /**
-   * Converts a <code>double</code> value to a sortable signed <code>long</code>.
-   * The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
-   * bit layout and then some bits are swapped, to be able to compare the result as long.
-   * By this the precision is not reduced, but the value can easily used as a long.
-   * The sort order (including {@link Double#NaN}) is defined by
-   * {@link Double#compareTo}; {@code NaN} is greater than positive infinity.
-   * @see #sortableLongToDouble
-   */
-  public static long doubleToSortableLong(double val) {
-    return sortableDoubleBits(Double.doubleToLongBits(val));
-  }
-
-  /**
-   * Converts a sortable <code>long</code> back to a <code>double</code>.
-   * @see #doubleToSortableLong
-   */
-  public static double sortableLongToDouble(long val) {
-    return Double.longBitsToDouble(sortableDoubleBits(val));
+  public static short halfFloatToShort(float value) {
+    return sortableHalfFloatBits((short) HalfFloat.floatToShort(value));
   }
 
   /**
@@ -290,297 +56,186 @@
    * By this the precision is not reduced, but the value can easily used as an int.
    * The sort order (including {@link Float#NaN}) is defined by
    * {@link Float#compareTo}; {@code NaN} is greater than positive infinity.
-   * @see #sortableIntToFloat
+   * @see #intToFloat
    */
-  public static int floatToSortableInt(float val) {
+  public static int floatToInt(float val) {
     return sortableFloatBits(Float.floatToIntBits(val));
   }
 
   /**
-   * Converts a sortable <code>int</code> back to a <code>float</code>.
-   * @see #floatToSortableInt
+   * Converts a <code>double</code> value to a sortable signed <code>long</code>.
+   * The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
+   * bit layout and then some bits are swapped, to be able to compare the result as long.
+   * By this the precision is not reduced, but the value can easily used as a long.
+   * The sort order (including {@link Double#NaN}) is defined by
+   * {@link Double#compareTo}; {@code NaN} is greater than positive infinity.
+   * @see #longToDouble
    */
-  public static float sortableIntToFloat(int val) {
+  public static long doubleToLong(double val) {
+    return sortableDoubleBits(Double.doubleToLongBits(val));
+  }
+
+  public static float shortToHalfFloat(short v) {
+    return HalfFloat.shortToFloat(sortableHalfFloatBits(v));
+  }
+
+  /**
+   * Converts a sortable <code>int</code> back to a <code>float</code>.
+   * @see #floatToInt
+   */
+  public static float intToFloat(int val) {
     return Float.intBitsToFloat(sortableFloatBits(val));
   }
   
-  /** Converts IEEE 754 representation of a double to sortable order (or back to the original) */
-  public static long sortableDoubleBits(long bits) {
-    return bits ^ (bits >> 63) & 0x7fffffffffffffffL;
+  /**
+   * Converts a sortable <code>long</code> back to a <code>double</code>.
+   * @see #doubleToLong
+   */
+  public static double longToDouble(long val) {
+    return Double.longBitsToDouble(sortableDoubleBits(val));
   }
-  
+
+  /** Converts IEEE 754 representation of a half float to sortable order (or back to the original) */
+  public static short sortableHalfFloatBits(short bits) {
+    return (short) (bits ^ (bits >> 15) & 0x7fff);
+  }
+
   /** Converts IEEE 754 representation of a float to sortable order (or back to the original) */
   public static int sortableFloatBits(int bits) {
     return bits ^ (bits >> 31) & 0x7fffffff;
   }
 
-  /**
-   * Splits a long range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link NumericRangeQuery}.
-   */
-  public static void splitLongRange(final LongRangeBuilder builder,
-    final int precisionStep,  final long minBound, final long maxBound
-  ) {
-    splitRange(builder, 64, precisionStep, minBound, maxBound);
+  /** Converts IEEE 754 representation of a double to sortable order (or back to the original) */
+  public static long sortableDoubleBits(long bits) {
+    return bits ^ (bits >> 63) & 0x7fffffffffffffffL;
   }
   
-  /**
-   * Splits an int range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link NumericRangeQuery}.
-   */
-  public static void splitIntRange(final IntRangeBuilder builder,
-    final int precisionStep,  final int minBound, final int maxBound
-  ) {
-    splitRange(builder, 32, precisionStep, minBound, maxBound);
-  }
-  
-  /** This helper does the splitting for both 32 and 64 bit. */
-  private static void splitRange(
-    final Object builder, final int valSize,
-    final int precisionStep, long minBound, long maxBound
-  ) {
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    if (minBound > maxBound) return;
-    for (int shift=0; ; shift += precisionStep) {
-      // calculate new bounds for inner precision
-      final long diff = 1L << (shift+precisionStep),
-        mask = ((1L<<precisionStep) - 1L) << shift;
-      final boolean
-        hasLower = (minBound & mask) != 0L,
-        hasUpper = (maxBound & mask) != mask;
-      final long
-        nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
-        nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
-      final boolean
-        lowerWrapped = nextMinBound < minBound,
-        upperWrapped = nextMaxBound > maxBound;
-      
-      if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
-        // We are in the lowest precision or the next precision is not available.
-        addRange(builder, valSize, minBound, maxBound, shift);
-        // exit the split recursion loop
-        break;
-      }
-      
-      if (hasLower)
-        addRange(builder, valSize, minBound, minBound | mask, shift);
-      if (hasUpper)
-        addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
-      
-      // recurse to next precision
-      minBound = nextMinBound;
-      maxBound = nextMaxBound;
+  public static short bytesToShort(BytesRef bytes) {
+    if (bytes.length != 2) {
+      throw new IllegalArgumentException("incoming bytes should be length=2; got length=" + bytes.length);
     }
-  }
-  
-  /** Helper that delegates to correct range builder */
-  private static void addRange(
-    final Object builder, final int valSize,
-    long minBound, long maxBound,
-    final int shift
-  ) {
-    // for the max bound set all lower bits (that were shifted away):
-    // this is important for testing or other usages of the splitted range
-    // (e.g. to reconstruct the full range). The prefixEncoding will remove
-    // the bits anyway, so they do not hurt!
-    maxBound |= (1L << shift) - 1L;
-    // delegate to correct range builder
-    switch(valSize) {
-      case 64:
-        ((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
-        break;
-      case 32:
-        ((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
-        break;
-      default:
-        // Should not happen!
-        throw new IllegalArgumentException("valSize must be 32 or 64.");
-    }
-  }
-
-  /**
-   * Callback for {@link #splitLongRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class LongRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical (inclusive) range queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw long range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final long min, final long max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      longToPrefixCodedBytes(min, shift, minBytes);
-      longToPrefixCodedBytes(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Callback for {@link #splitIntRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class IntRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical range (inclusive) queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw int range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final int min, final int max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      intToPrefixCodedBytes(min, shift, minBytes);
-      intToPrefixCodedBytes(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-      
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /** Just like FilteredTermsEnum, except it adds a limited
-   *  seekCeil implementation that only works with {@link
-   *  #filterPrefixCodedInts} and {@link
-   *  #filterPrefixCodedLongs}. */
-  private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
-    public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
-      super(tenum, false);
+    short sortableBits = 0;
+    for(int i=0;i<2;i++) {
+      sortableBits = (short) ((sortableBits << 8) | bytes.bytes[bytes.offset + i] & 0xff);
     }
 
-    @Override
-    @SuppressWarnings("fallthrough")
-    public SeekStatus seekCeil(BytesRef term) throws IOException {
+    return (short) (sortableBits ^ 0x8000);
+  }
 
-      // NOTE: This is not general!!  It only handles YES
-      // and END, because that's all we need for the numeric
-      // case here
+  public static int bytesToInt(BytesRef bytes) {
+    if (bytes.length != 4) {
+      throw new IllegalArgumentException("incoming bytes should be length=4; got length=" + bytes.length);
+    }
+    int sortableBits = 0;
+    for(int i=0;i<4;i++) {
+      sortableBits = (sortableBits << 8) | bytes.bytes[bytes.offset + i] & 0xff;
+    }
 
-      SeekStatus status = tenum.seekCeil(term);
-      if (status == SeekStatus.END) {
-        return SeekStatus.END;
-      }
+    return sortableBits ^ 0x80000000;
+  }
 
-      actualTerm = tenum.term();
+  public static long bytesToLong(BytesRef bytes) {
+    if (bytes.length != 8) {
+      throw new IllegalArgumentException("incoming bytes should be length=8; got length=" + bytes.length);
+    }
+    long sortableBits = 0;
+    for(int i=0;i<8;i++) {
+      sortableBits = (sortableBits << 8) | bytes.bytes[bytes.offset + i] & 0xff;
+    }
 
-      if (accept(actualTerm) == AcceptStatus.YES) {
-        return status;
-      } else {
-        return SeekStatus.END;
-      }
+    return sortableBits ^ 0x8000000000000000L;
+  }
+
+  public static BytesRef shortToBytes(short v) {
+    int sortableBits = v ^ 0x8000;
+    BytesRef token = new BytesRef(2);
+    token.length = 2;
+    int index = 1;
+    while (index >= 0) {
+      token.bytes[index] = (byte) (sortableBits & 0xff);
+      index--;
+      sortableBits >>>= 8;
+    }
+    return token;
+  }
+
+  public static BytesRef intToBytes(int v) {
+    int sortableBits = v ^ 0x80000000;
+    BytesRef token = new BytesRef(4);
+    token.length = 4;
+    int index = 3;
+    while (index >= 0) {
+      token.bytes[index] = (byte) (sortableBits & 0xff);
+      index--;
+      sortableBits >>>= 8;
+    }
+    return token;
+  }
+
+  public static BytesRef longToBytes(long v) {
+    long sortableBits = v ^ 0x8000000000000000L;
+    BytesRef token = new BytesRef(8);
+    token.length = 8;
+    int index = 7;
+    while (index >= 0) {
+      token.bytes[index] = (byte) (sortableBits & 0xff);
+      index--;
+      sortableBits >>>= 8;
+    }
+    return token;
+  }
+
+  public static BytesRef halfFloatToBytes(float value) {
+    return shortToBytes(halfFloatToShort(value));
+  }
+
+  public static BytesRef floatToBytes(float value) {
+    return intToBytes(floatToInt(value));
+  }
+
+  public static BytesRef doubleToBytes(double value) {
+    return longToBytes(doubleToLong(value));
+  }
+
+  public static float bytesToHalfFloat(BytesRef bytes) {
+    return HalfFloat.shortToFloat(sortableHalfFloatBits(bytesToShort(bytes)));
+  }
+
+  public static float bytesToFloat(BytesRef bytes) {
+    return intToFloat(bytesToInt(bytes));
+  }
+
+  public static double bytesToDouble(BytesRef bytes) {
+    return longToDouble(bytesToLong(bytes));
+  }
+
+  public static BytesRef bigIntToBytes(BigInteger value, int maxBytes) {
+    byte[] bytes = value.toByteArray();
+    if (bytes.length > maxBytes) {
+      throw new IllegalArgumentException("BigInteger " + value + " exceeds allowed byte width " + maxBytes);
+    }
+    byte[] bytes2 = new byte[maxBytes];
+    System.arraycopy(bytes, 0, bytes2, maxBytes-bytes.length, bytes.length);
+    if (bytes.length < maxBytes && (bytes[0] & 0x80) != 0) {
+      Arrays.fill(bytes2, 0, maxBytes-bytes.length, (byte) 0xff);
+    }
+    sortableBigIntBytes(bytes2);
+    BytesRef br = new BytesRef(bytes2);
+    //System.out.println("BI " + value + " -> " + br);
+    return br;
+  }
+
+  public static BigInteger bytesToBigInt(BytesRef bytes) {
+    byte[] copy = new byte[bytes.length];
+    System.arraycopy(bytes.bytes, bytes.offset, copy, 0, bytes.length);
+    sortableBigIntBytes(copy);
+    return new BigInteger(copy);
+  }
+
+  private static void sortableBigIntBytes(byte[] bytes) {
+    bytes[0] ^= 0x80;
+    for(int i=1;i<bytes.length;i++)  {
+      bytes[i] ^= 0;
     }
   }
-
-  private static Terms intTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator(TermsEnum reuse) throws IOException {
-          return filterPrefixCodedInts(in.iterator(reuse));
-        }
-      };
-  }
-
-  private static Terms longTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator(TermsEnum reuse) throws IOException {
-          return filterPrefixCodedLongs(in.iterator(reuse));
-        }
-      };
-  }
-    
-  /** Returns the minimum int value indexed into this
-   *  numeric field. */
-  public static int getMinInt(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min: 
-    return NumericUtils.prefixCodedToInt(terms.getMin());
-  }
-
-  /** Returns the maximum int value indexed into this
-   *  numeric field. */
-  public static int getMaxInt(Terms terms) throws IOException {
-    return NumericUtils.prefixCodedToInt(intTerms(terms).getMax());
-  }
-
-  /** Returns the minimum long value indexed into this
-   *  numeric field. */
-  public static long getMinLong(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min: 
-    return NumericUtils.prefixCodedToLong(terms.getMin());
-  }
-
-  /** Returns the maximum long value indexed into this
-   *  numeric field. */
-  public static long getMaxLong(Terms terms) throws IOException {
-    return NumericUtils.prefixCodedToLong(longTerms(terms).getMax());
-  }
-  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/Automata.java b/lucene/core/src/java/org/apache/lucene/util/automaton/Automata.java
index 76523e8..f3b6071 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/Automata.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/Automata.java
@@ -72,6 +72,18 @@
     a.finishState();
     return a;
   }
+
+  /**
+   * Returns a new (deterministic) automaton that accepts all binary terms.
+   */
+  public static Automaton makeAnyBinary() {
+    Automaton a = new Automaton();
+    int s = a.createState();
+    a.setAccept(s, true);
+    a.addTransition(s, s, 0, 255);
+    a.finishState();
+    return a;
+  }
   
   /**
    * Returns a new (deterministic) automaton that accepts any single codepoint.
@@ -204,8 +216,166 @@
     return s;
   }
 
+  /** Creates a new deterministic, minimal automaton accepting
+   *  all binary terms in the specified interval.  Note that unlike
+   *  {@link #makeDecimalInterval}, the returned automaton is infinite,
+   *  because terms behave like floating point numbers leading with
+   *  a decimal point.  However, in the special case where min == max,
+   *  and both are inclusive, the automata will be finite and accept
+   *  exactly one term. */
+  public static Automaton makeBinaryInterval(BytesRef min, boolean minInclusive, BytesRef max, boolean maxInclusive) {
+    if (min == null && minInclusive == false) {
+      throw new IllegalArgumentException("minInclusive must be true when min is null (open ended)");
+    }
+
+    if (max == null && maxInclusive == false) {
+      throw new IllegalArgumentException("maxInclusive must be true when max is null (open ended)");
+    }
+
+    if (min == null) {
+      if (max == null) {
+        // Accepts all terms:
+        return makeAnyBinary();
+      }
+      min = new BytesRef();
+      minInclusive = true;
+    }
+    int cmp;
+    if (max != null) {
+      cmp = min.compareTo(max);
+    } else {
+      cmp = -1;
+    }
+    if (cmp == 0) {
+      if (minInclusive == false || maxInclusive == false) {
+        return makeEmpty();
+      } else {
+        return makeBinary(min);
+      }
+    } else if (cmp > 0) {
+      // max > min
+      return makeEmpty();
+    }
+
+    Automaton a = new Automaton();
+    int startState = a.createState();
+    int sinkState = a.createState();
+    a.setAccept(sinkState, true);
+
+    // This state accepts all suffixes:
+    a.addTransition(sinkState, sinkState, 0, 255);
+
+    boolean equalPrefix = true;
+    int lastState = startState;
+    int firstMaxState = -1;
+    int sharedPrefixLength = 0;
+    for(int i=0;i<min.length;i++) {
+      int minLabel = min.bytes[min.offset+i] & 0xff;
+
+      int maxLabel;
+      if (max != null && equalPrefix && i < max.length) {
+        maxLabel = max.bytes[max.offset+i] & 0xff;
+      } else {
+        maxLabel = -1;
+      }
+
+      int nextState;
+      if (minInclusive && i == min.length-1 && (equalPrefix == false || minLabel != maxLabel)) {
+        nextState = sinkState;
+      } else {
+        nextState = a.createState();
+      }
+
+      if (equalPrefix) {
+
+        if (minLabel == maxLabel) {
+          // Still in shared prefix
+          a.addTransition(lastState, nextState, minLabel);
+        } else if (max == null) {
+          equalPrefix = false;
+          sharedPrefixLength = 0;
+          a.addTransition(lastState, sinkState, minLabel+1, 0xff);
+          a.addTransition(lastState, nextState, minLabel);
+        } else {
+          // This is the first point where min & max diverge:
+          assert maxLabel > minLabel;
+
+          a.addTransition(lastState, nextState, minLabel);
+
+          if (maxLabel > minLabel + 1) {
+            a.addTransition(lastState, sinkState, minLabel+1, maxLabel-1);
+          }
+
+          // Now fork off path for max:
+          if (maxInclusive || i < max.length-1) {
+            firstMaxState = a.createState();
+            if (i < max.length-1) {
+              a.setAccept(firstMaxState, true);
+            }
+            a.addTransition(lastState, firstMaxState, maxLabel);
+          }
+          equalPrefix = false;
+          sharedPrefixLength = i;
+        }
+      } else {
+        // OK, already diverged:
+        a.addTransition(lastState, nextState, minLabel);
+        if (minLabel < 255) {
+          a.addTransition(lastState, sinkState, minLabel+1, 255);
+        }
+      }
+      lastState = nextState;
+    }
+
+    // Accept any suffix appended to the min term:
+    if (equalPrefix == false && lastState != sinkState && lastState != startState) {
+      a.addTransition(lastState, sinkState, 0, 255);
+    }
+
+    if (minInclusive) {
+      // Accept exactly the min term:
+      a.setAccept(lastState, true);
+    }
+
+    if (max != null) {
+
+      // Now do max:
+      if (firstMaxState == -1) {
+        // Min was a full prefix of max
+        sharedPrefixLength = min.length;
+      } else {
+        lastState = firstMaxState;
+        sharedPrefixLength++;
+      }
+      for(int i=sharedPrefixLength;i<max.length;i++) {
+        int maxLabel = max.bytes[max.offset+i]&0xff;
+        if (maxLabel > 0) {
+          a.addTransition(lastState, sinkState, 0, maxLabel-1);
+        }
+        if (maxInclusive || i < max.length-1) {
+          int nextState = a.createState();
+          if (i < max.length-1) {
+            a.setAccept(nextState, true);
+          }
+          a.addTransition(lastState, nextState, maxLabel);
+          lastState = nextState;
+        }
+      }
+
+      if (maxInclusive) {
+        a.setAccept(lastState, true);
+      }
+    }
+
+    a.finishState();
+
+    assert a.isDeterministic(): a.toDot();
+
+    return a;
+  }
+
   /**
-   * Returns a new automaton that accepts strings representing decimal
+   * Returns a new automaton that accepts strings representing decimal (base 10)
    * non-negative integers in the given interval.
    * 
    * @param min minimal value of interval
@@ -218,7 +388,7 @@
    *              interval cannot be expressed with the given fixed number of
    *              digits
    */
-  public static Automaton makeInterval(int min, int max, int digits)
+  public static Automaton makeDecimalInterval(int min, int max, int digits)
       throws IllegalArgumentException {
     String x = Integer.toString(min);
     String y = Integer.toString(max);
@@ -275,7 +445,30 @@
     for (int i = 0, cp = 0; i < s.length(); i += Character.charCount(cp)) {
       int state = a.createState();
       cp = s.codePointAt(i);
-      a.addTransition(lastState, state, cp, cp);
+      a.addTransition(lastState, state, cp);
+      lastState = state;
+    }
+
+    a.setAccept(lastState, true);
+    a.finishState();
+
+    assert a.isDeterministic();
+    assert Operations.hasDeadStates(a) == false;
+
+    return a;
+  }
+
+  /**
+   * Returns a new (deterministic) automaton that accepts the single given
+   * binary term.
+   */
+  public static Automaton makeBinary(BytesRef term) {
+    Automaton a = new Automaton();
+    int lastState = a.createState();
+    for (int i=0;i<term.length;i++) {
+      int state = a.createState();
+      int label = term.bytes[term.offset+i] & 0xff;
+      a.addTransition(lastState, state, label);
       lastState = state;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java
index aff332f..3c6a8c5 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java
@@ -490,11 +490,50 @@
   public void getNextTransition(Transition t) {
     // Make sure there is still a transition left:
     assert (t.transitionUpto+3 - states[2*t.source]) <= 3*states[2*t.source+1];
+
+    // Make sure transitions are in fact sorted:
+    assert transitionSorted(t);
+
     t.dest = transitions[t.transitionUpto++];
     t.min = transitions[t.transitionUpto++];
     t.max = transitions[t.transitionUpto++];
   }
 
+  private boolean transitionSorted(Transition t) {
+
+    int upto = t.transitionUpto;
+    if (upto == states[2*t.source]) {
+      // Transition isn't initialzed yet (this is the first transition); don't check:
+      return true;
+    }
+
+    int nextDest = transitions[upto];
+    int nextMin = transitions[upto+1];
+    int nextMax = transitions[upto+2];
+    if (nextMin > t.min) {
+      return true;
+    } else if (nextMin < t.min) {
+      return false;
+    }
+
+    // Min is equal, now test max:
+    if (nextMax > t.max) {
+      return true;
+    } else if (nextMax < t.max) {
+      return false;
+    }
+
+    // Max is also equal, now test dest:
+    if (nextDest > t.dest) {
+      return true;
+    } else if (nextDest < t.dest) {
+      return false;
+    }
+
+    // We should never see fully equal transitions here:
+    return false;
+  }
+
   /** Fill the provided {@link Transition} with the index'th
    *  transition leaving the specified state. */
   public void getTransition(int state, int index, Transition t) {
@@ -564,7 +603,7 @@
       //System.out.println("toDot: state " + state + " has " + numTransitions + " transitions; t.nextTrans=" + t.transitionUpto);
       for(int i=0;i<numTransitions;i++) {
         getNextTransition(t);
-        //System.out.println("  t.nextTrans=" + t.transitionUpto);
+        //System.out.println("  t.nextTrans=" + t.transitionUpto + " t=" + t);
         assert t.max >= t.min;
         b.append("  ");
         b.append(state);
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/ByteRunAutomaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/ByteRunAutomaton.java
index 46af6d8..f420809 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/ByteRunAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/ByteRunAutomaton.java
@@ -26,10 +26,10 @@
   public ByteRunAutomaton(Automaton a) {
     this(a, false, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
   }
-  
-  /** expert: if utf8 is true, the input is already byte-based */
-  public ByteRunAutomaton(Automaton a, boolean utf8, int maxDeterminizedStates) {
-    super(utf8 ? a : new UTF32ToUTF8().convert(a), 256, true, maxDeterminizedStates);
+
+  /** expert: if isBinary is true, the input is already byte-based, else it's UTF-32 and we'll convert to UTF-8 */
+  public ByteRunAutomaton(Automaton a, boolean isBinary, int maxDeterminizedStates) {
+    super(isBinary ? a : new UTF32ToUTF8().convert(a), 256, true, maxDeterminizedStates);
   }
 
   /**
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
index 0fd5907..a6e5c88 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.index.SingleTermsEnum;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.PrefixTermsEnum;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 
@@ -49,6 +48,8 @@
     SINGLE, 
     /** Automaton that matches all Strings with a constant prefix. */
     PREFIX, 
+    /** Automaton that matches all binary terms (BytesRef) in a range from minTerm (inclusive or not) to maxTerm (inclusive or not). */
+    RANGE, 
     /** Catch-all for any other automata. */
     NORMAL
   };
@@ -59,10 +60,26 @@
   /** 
    * For {@link AUTOMATON_TYPE#PREFIX}, this is the prefix term; 
    * for {@link AUTOMATON_TYPE#SINGLE} this is the singleton term.
+   * for {@link AUTOMATON_TYPE#RANGE}, this is the min term; 
    */
   public final BytesRef term;
 
   /** 
+   * Only used for {@link AUTOMATON_TYPE#RANGE}; 
+   */
+  public final BytesRef maxTerm;
+
+  /** 
+   * Only used for {@link AUTOMATON_TYPE#RANGE}: true if the min term is included in the range. 
+   */
+  public final boolean minInclusive;
+
+  /** 
+   * Only used for {@link AUTOMATON_TYPE#RANGE}: true if the max term is included in the range. 
+   */
+  public final boolean maxInclusive;
+
+  /** 
    * Matcher for quickly determining if a byte[] is accepted.
    * only valid for {@link AUTOMATON_TYPE#NORMAL}.
    */
@@ -90,12 +107,86 @@
    */
   public final Boolean finite;
 
+  /** Which state accepts all suffixes; only set for RANGE and PREFIX, else -1. */
+  public final int sinkState;
+
   /** Create this, passing simplify=true and finite=null, so that we try
    *  to simplify the automaton and determine if it is finite. */
   public CompiledAutomaton(Automaton automaton) {
     this(automaton, null, true);
   }
 
+  // TODO: we could also allow direct binary automaton here: BlockTree can optimize that case more generally too, but we start with this
+  // more restricted (single term range) API:
+
+  /** Matches a range of terms.  Some terms dict implementations (e.g. BlockTree) can optimize this case by using
+   *  pre-computed auto prefix terms stored in the index. */
+  public CompiledAutomaton(BytesRef minTerm, boolean minInclusive, BytesRef maxTerm, boolean maxInclusive) {
+    if (minTerm == null) {
+      this.term = new BytesRef();
+      this.minInclusive = true;
+    } else {
+      this.term = minTerm;
+      this.minInclusive = minInclusive;
+    }
+    this.maxTerm = maxTerm;
+    this.maxInclusive = maxInclusive;
+    commonSuffixRef = null;
+    finite = false;
+    automaton = Automata.makeBinaryInterval(term, minInclusive, maxTerm, maxInclusive);
+
+    // Compute sinkState for this automaton, if any (it won't exist if maxTerm == minTerm):
+    int numStates = automaton.getNumStates();
+    Transition t = new Transition();
+    int foundState = -1;
+    for(int s=0;s<numStates;s++) {
+      if (automaton.isAccept(s)) {
+        int count = automaton.initTransition(s, t);
+        boolean isSinkState = false;
+        for(int i=0;i<count;i++) {
+          automaton.getNextTransition(t);
+          if (t.dest == s && t.min == 0 && t.max == 0xff) {
+            isSinkState = true;
+            break;
+          }
+        }
+        if (isSinkState) {
+          foundState = s;
+          break;
+        }
+      }
+    }
+    sinkState = foundState;
+    // It's safe to allow unlimited determinized here:
+    runAutomaton = new ByteRunAutomaton(automaton, true, Integer.MAX_VALUE);
+    type = AUTOMATON_TYPE.RANGE;
+  }
+
+  /** Matches the specified prefix term.  Some terms dict implementations (e.g. BlockTree) can optimize this case by using
+   *  pre-computed auto prefix terms stored in the index. */
+  public CompiledAutomaton(BytesRef prefixTerm) {
+    this.term = prefixTerm;
+    type = AUTOMATON_TYPE.PREFIX;
+    automaton = new Automaton();
+    int lastState = automaton.createState();
+    for(int i=0;i<prefixTerm.length;i++) {
+      int state = automaton.createState();
+      automaton.addTransition(lastState, state, prefixTerm.bytes[prefixTerm.offset+i]&0xff);
+      lastState = state;
+    }
+    automaton.setAccept(lastState, true);
+    automaton.addTransition(lastState, lastState, 0, 255);
+    sinkState = lastState;
+    automaton.finishState();
+    // It's safe to allow unlimited determinized here:
+    runAutomaton = new ByteRunAutomaton(automaton, true, Integer.MAX_VALUE);
+    commonSuffixRef = null;
+    finite = false;
+    minInclusive = false;
+    maxInclusive = false;
+    maxTerm = null;
+  }
+
   /** Create this.  If finite is null, we use {@link Operations#isFinite}
    *  to determine whether it is finite.  If simplify is true, we run
    *  possibly expensive operations to determine if the automaton is one
@@ -134,6 +225,10 @@
         runAutomaton = null;
         this.automaton = null;
         this.finite = null;
+        maxTerm = null;
+        minInclusive = false;
+        maxInclusive = false;
+        sinkState = -1;
         return;
       // NOTE: only approximate, because automaton may not be minimal:
       } else if (Operations.isTotal(automaton)) {
@@ -144,6 +239,10 @@
         runAutomaton = null;
         this.automaton = null;
         this.finite = null;
+        maxTerm = null;
+        minInclusive = false;
+        maxInclusive = false;
+        sinkState = -1;
         return;
       } else {
 
@@ -166,6 +265,10 @@
           runAutomaton = null;
           this.automaton = null;
           this.finite = null;
+          maxTerm = null;
+          minInclusive = false;
+          maxInclusive = false;
+          sinkState = -1;
           return;
         } else if (commonPrefix.length() > 0) {
           Automaton other = Operations.concatenate(Automata.makeString(commonPrefix), Automata.makeAnyString());
@@ -176,17 +279,37 @@
             type = AUTOMATON_TYPE.PREFIX;
             term = new BytesRef(commonPrefix);
             commonSuffixRef = null;
-            runAutomaton = null;
-            this.automaton = null;
-            this.finite = null;
+            automaton = new Automaton();
+            int lastState = automaton.createState();
+            for(int i=0;i<term.length;i++) {
+              int state = automaton.createState();
+              automaton.addTransition(lastState, state, term.bytes[term.offset+i]&0xff);
+              lastState = state;
+            }
+            automaton.setAccept(lastState, true);
+            automaton.addTransition(lastState, lastState, 0, 255);
+            sinkState = lastState;
+            automaton.finishState();
+            this.automaton = automaton;
+            // It's safe to allow unlimited determinized here:
+            runAutomaton = new ByteRunAutomaton(automaton, true, Integer.MAX_VALUE);
+            this.finite = false;
+            maxTerm = null;
+            minInclusive = false;
+            maxInclusive = false;
             return;
           }
         }
       }
     }
 
+    sinkState = -1;
+
     type = AUTOMATON_TYPE.NORMAL;
     term = null;
+    maxTerm = null;
+    minInclusive = false;
+    maxInclusive = false;
 
     if (finite == null) {
       this.finite = Operations.isFinite(automaton);
@@ -194,14 +317,14 @@
       this.finite = finite;
     }
 
-    Automaton utf8 = new UTF32ToUTF8().convert(automaton);
+    Automaton binary = new UTF32ToUTF8().convert(automaton);
     if (this.finite) {
       commonSuffixRef = null;
     } else {
       // NOTE: this is a very costly operation!  We should test if it's really warranted in practice...
-      commonSuffixRef = Operations.getCommonSuffixBytesRef(utf8, maxDeterminizedStates);
+      commonSuffixRef = Operations.getCommonSuffixBytesRef(binary, maxDeterminizedStates);
     }
-    runAutomaton = new ByteRunAutomaton(utf8, true, maxDeterminizedStates);
+    runAutomaton = new ByteRunAutomaton(binary, true, maxDeterminizedStates);
 
     this.automaton = runAutomaton.automaton;
   }
@@ -286,10 +409,8 @@
     case SINGLE:
       return new SingleTermsEnum(terms.iterator(null), term);
     case PREFIX:
-      // TODO: this is very likely faster than .intersect,
-      // but we should test and maybe cutover
-      return new PrefixTermsEnum(terms.iterator(null), term);
     case NORMAL:
+    case RANGE:
       return terms.intersect(this, null);
     default:
       // unreachable
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java b/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java
index ebea907..6757bdb 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java
@@ -600,7 +600,7 @@
         a = aa;
         break;
       case REGEXP_INTERVAL:
-        a = Automata.makeInterval(min, max, digits);
+        a = Automata.makeDecimalInterval(min, max, digits);
         break;
     }
     return a;
diff --git a/lucene/core/src/java/overview.html b/lucene/core/src/java/overview.html
index 433e79f..3beea54 100644
--- a/lucene/core/src/java/overview.html
+++ b/lucene/core/src/java/overview.html
@@ -38,9 +38,8 @@
     //Directory directory = FSDirectory.open("/tmp/testindex");
     IndexWriterConfig config = new IndexWriterConfig(analyzer);
     IndexWriter iwriter = new IndexWriter(directory, config);
-    Document doc = new Document();
-    String text = "This is the text to be indexed.";
-    doc.add(new Field("fieldname", text, TextField.TYPE_STORED));
+    Document doc = iwriter.newDocument();
+    doc.addLargeText("fieldname", "This is the text to be indexed.");
     iwriter.addDocument(doc);
     iwriter.close();
     
diff --git a/lucene/core/src/test/org/apache/lucene/TestDemo.java b/lucene/core/src/test/org/apache/lucene/TestDemo.java
index b512b5f..a17d5ff 100644
--- a/lucene/core/src/test/org/apache/lucene/TestDemo.java
+++ b/lucene/core/src/test/org/apache/lucene/TestDemo.java
@@ -22,12 +22,10 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.*;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -48,10 +46,10 @@
     // To store an index on disk, use this instead:
     // Directory directory = FSDirectory.open(new File("/tmp/testindex"));
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, analyzer);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
+    doc.addLargeText("fieldname", text);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -65,8 +63,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
     }
 
     // Test simple phrase query
diff --git a/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java
index 2371699..24c38f9 100644
--- a/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java
@@ -18,10 +18,8 @@
  */
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -38,24 +36,6 @@
 
 public class TestExternalCodecs extends LuceneTestCase {
 
-  private static final class CustomPerFieldCodec extends AssertingCodec {
-    
-    private final PostingsFormat ramFormat = PostingsFormat.forName("RAMOnly");
-    private final PostingsFormat defaultFormat = TestUtil.getDefaultPostingsFormat();
-    private final PostingsFormat memoryFormat = PostingsFormat.forName("Memory");
-
-    @Override
-    public PostingsFormat getPostingsFormatForField(String field) {
-      if (field.equals("field2") || field.equals("id")) {
-        return memoryFormat;
-      } else if (field.equals("field1")) {
-        return defaultFormat;
-      } else {
-        return ramFormat;
-      }
-    }
-  }
-
   // tests storing "id" and "field2" fields as pulsing codec,
   // whose term sort is backwards unicode code point, and
   // storing "field1" as a custom entirely-in-RAM codec
@@ -70,22 +50,22 @@
     dir.setCheckIndexOnClose(false); // we use a custom codec provider
     IndexWriter w = new IndexWriter(
         dir,
-        newIndexWriterConfig(new MockAnalyzer(random())).
-        setCodec(new CustomPerFieldCodec()).
-            setMergePolicy(newLogMergePolicy(3))
+        newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(3))
     );
-    Document doc = new Document();
-    // uses default codec:
-    doc.add(newTextField("field1", "this field uses the standard codec as the test", Field.Store.NO));
-    // uses memory codec:
-    Field field2 = newTextField("field2", "this field uses the memory codec as the test", Field.Store.NO);
-    doc.add(field2);
-    
-    Field idField = newStringField("id", "", Field.Store.NO);
 
-    doc.add(idField);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setPostingsFormat("id", "RAMOnly");
+    fieldTypes.disableFastRanges("id");
+    fieldTypes.setPostingsFormat("field1", TestUtil.getDefaultPostingsFormat().getName());
+    fieldTypes.setPostingsFormat("field2", "Memory");
+    
     for(int i=0;i<NUM_DOCS;i++) {
-      idField.setStringValue(""+i);
+      Document doc = w.newDocument();
+      // uses default codec:
+      doc.addLargeText("field1", "this field uses the standard codec as the test");
+      // uses memory codec:
+      doc.addLargeText("field2", "this field uses the memory codec as the test");
+      doc.addUniqueInt("id", i);
       w.addDocument(doc);
       if ((i+1)%10 == 0) {
         w.commit();
@@ -94,7 +74,7 @@
     if (VERBOSE) {
       System.out.println("TEST: now delete id=77");
     }
-    w.deleteDocuments(new Term("id", "77"));
+    w.deleteDocuments(fieldTypes.newIntTerm("id", 77));
 
     IndexReader r = DirectoryReader.open(w, true);
     
@@ -107,7 +87,7 @@
     if (VERBOSE) {
       System.out.println("\nTEST: now delete 2nd doc");
     }
-    w.deleteDocuments(new Term("id", "44"));
+    w.deleteDocuments(fieldTypes.newIntTerm("id", 44));
 
     if (VERBOSE) {
       System.out.println("\nTEST: now force merge");
@@ -122,9 +102,9 @@
     s = newSearcher(r);
     assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
     assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field2", "memory")), 1).totalHits);
-    assertEquals(1, s.search(new TermQuery(new Term("id", "76")), 1).totalHits);
-    assertEquals(0, s.search(new TermQuery(new Term("id", "77")), 1).totalHits);
-    assertEquals(0, s.search(new TermQuery(new Term("id", "44")), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactIntQuery("id", 76), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactIntQuery("id", 77), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactIntQuery("id", 44), 1).totalHits);
 
     if (VERBOSE) {
       System.out.println("\nTEST: now close NRT reader");
diff --git a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
index 0c3e15d..02e7c0d 100644
--- a/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
+++ b/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
@@ -17,15 +17,16 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergePolicy.OneMerge;
+import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergeScheduler;
 import org.apache.lucene.index.MergeTrigger;
 import org.apache.lucene.store.Directory;
@@ -33,8 +34,6 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-
 /**
  * Holds tests cases to verify external APIs are accessible
  * while not being in org.apache.lucene.index package.
@@ -89,18 +88,19 @@
     MockDirectoryWrapper dir = newMockDirectory();
     dir.failOn(new FailOnlyOnMerge());
 
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    doc.add(idField);
-    
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
         .setMergeScheduler(new MyMergeScheduler())
         .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergePolicy(newLogMergePolicy()));
+
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "");
+    
     LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
     logMP.setMergeFactor(10);
-    for(int i=0;i<20;i++)
+    for(int i=0;i<20;i++) {
       writer.addDocument(doc);
+    }
 
     ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
     writer.close();
@@ -137,9 +137,9 @@
     IndexWriterConfig conf = new IndexWriterConfig(null);
     conf.setMergeScheduler(new ReportingMergeScheduler());
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit(); // trigger flush
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit(); // trigger flush
     writer.forceMerge(1);
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearch.java b/lucene/core/src/test/org/apache/lucene/TestSearch.java
index d5002ca..443cce0 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearch.java
@@ -46,8 +46,8 @@
       
       IndexWriter writer = new IndexWriter(directory, conf);
       try {
-        Document d = new Document();
-        d.add(newTextField("foo", "bar", Field.Store.YES));
+        Document d = writer.newDocument();
+        d.addLargeText("foo", "bar");
         writer.addDocument(d);
       } finally {
         writer.close();
@@ -125,10 +125,9 @@
         "a c e a b c"
       };
       for (int j = 0; j < docs.length; j++) {
-        Document d = new Document();
-        d.add(newTextField("contents", docs[j], Field.Store.YES));
-        d.add(new IntField("id", j, Field.Store.NO));
-        d.add(new NumericDocValuesField("id", j));
+        Document d = writer.newDocument();
+        d.addLargeText("contents", docs[j]);
+        d.addInt("id", j);
         writer.addDocument(d);
       }
       writer.close();
@@ -151,8 +150,8 @@
 
         out.println(hits.length + " total results");
         for (int i = 0 ; i < hits.length && i < 10; i++) {
-          StoredDocument d = searcher.doc(hits[i].doc);
-          out.println(i + " " + hits[i].score + " " + d.get("contents"));
+          Document d = searcher.doc(hits[i].doc);
+          out.println(i + " " + hits[i].score + " " + d.getString("contents"));
         }
       }
       reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 4747557..1789187 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -47,78 +47,77 @@
    *        validate this output and make any changes to the checkHits method.
    */
   public void testRun() throws Exception {
-      StringWriter sw = new StringWriter();
-      PrintWriter pw = new PrintWriter(sw, true);
-      final int MAX_DOCS = atLeast(225);
-      doTest(random(), pw, false, MAX_DOCS);
-      pw.close();
-      sw.close();
-      String multiFileOutput = sw.toString();
-      //System.out.println(multiFileOutput);
+    StringWriter sw = new StringWriter();
+    PrintWriter pw = new PrintWriter(sw, true);
+    final int MAX_DOCS = atLeast(225);
+    doTest(random(), pw, false, MAX_DOCS);
+    pw.close();
+    sw.close();
+    String multiFileOutput = sw.toString();
+    //System.out.println(multiFileOutput);
 
-      sw = new StringWriter();
-      pw = new PrintWriter(sw, true);
-      doTest(random(), pw, true, MAX_DOCS);
-      pw.close();
-      sw.close();
-      String singleFileOutput = sw.toString();
+    sw = new StringWriter();
+    pw = new PrintWriter(sw, true);
+    doTest(random(), pw, true, MAX_DOCS);
+    pw.close();
+    sw.close();
+    String singleFileOutput = sw.toString();
 
-      assertEquals(multiFileOutput, singleFileOutput);
+    assertEquals(multiFileOutput, singleFileOutput);
   }
 
 
   private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception {
-      Directory directory = newDirectory();
-      Analyzer analyzer = new MockAnalyzer(random);
-      IndexWriterConfig conf = newIndexWriterConfig(analyzer);
-      final MergePolicy mp = conf.getMergePolicy();
-      mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0);
-      IndexWriter writer = new IndexWriter(directory, conf);
-      if (VERBOSE) {
-        System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
-      }
+    Directory directory = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random);
+    IndexWriterConfig conf = newIndexWriterConfig(analyzer);
+    final MergePolicy mp = conf.getMergePolicy();
+    mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0);
+    IndexWriter writer = new IndexWriter(directory, conf);
+    if (VERBOSE) {
+      System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
+    }
 
-      for (int j = 0; j < MAX_DOCS; j++) {
-        Document d = new Document();
-        d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
-        d.add(new IntField(ID_FIELD, j, Field.Store.YES));
-        d.add(new NumericDocValuesField(ID_FIELD, j));
-        writer.addDocument(d);
-      }
-      writer.close();
+    for (int j = 0; j < MAX_DOCS; j++) {
+      Document d = writer.newDocument();
+      d.addLargeText(PRIORITY_FIELD, HIGH_PRIORITY);
+      d.addInt(ID_FIELD, j);
+      writer.addDocument(d);
+    }
+    writer.close();
 
-      // try a search without OR
-      IndexReader reader = DirectoryReader.open(directory);
-      IndexSearcher searcher = newSearcher(reader);
+    // try a search without OR
+    IndexReader reader = DirectoryReader.open(directory);
+    IndexSearcher searcher = newSearcher(reader);
 
-      Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
-      out.println("Query: " + query.toString(PRIORITY_FIELD));
-      if (VERBOSE) {
-        System.out.println("TEST: search query=" + query);
-      }
+    Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
+    out.println("Query: " + query.toString(PRIORITY_FIELD));
+    if (VERBOSE) {
+      System.out.println("TEST: search query=" + query);
+    }
 
-      final Sort sort = new Sort(SortField.FIELD_SCORE,
-                                 new SortField(ID_FIELD, SortField.Type.INT));
+    final Sort sort = new Sort(SortField.FIELD_SCORE,
+                               new SortField(ID_FIELD, SortField.Type.INT));
 
-      ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
-      printHits(out, hits, searcher);
-      checkHits(hits, MAX_DOCS, searcher);
+    ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
+    printHits(out, hits, searcher);
+    checkHits(hits, MAX_DOCS, searcher);
 
-      // try a new search with OR
-      searcher = newSearcher(reader);
-      hits = null;
+    // try a new search with OR
+    searcher = newSearcher(reader);
+    hits = null;
 
-      BooleanQuery booleanQuery = new BooleanQuery();
-      booleanQuery.add(new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD);
-      booleanQuery.add(new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD);
-      out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD));
+    BooleanQuery booleanQuery = new BooleanQuery();
+    booleanQuery.add(new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD);
+    booleanQuery.add(new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD);
+    out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD));
 
-      hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs;
-      printHits(out, hits, searcher);
-      checkHits(hits, MAX_DOCS, searcher);
+    hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs;
+    printHits(out, hits, searcher);
+    checkHits(hits, MAX_DOCS, searcher);
 
-      reader.close();
-      directory.close();
+    reader.close();
+    directory.close();
   }
 
 
@@ -126,8 +125,8 @@
     out.println(hits.length + " total results\n");
     for (int i = 0 ; i < hits.length; i++) {
       if ( i < 10 || (i > 94 && i < 105) ) {
-        StoredDocument d = searcher.doc(hits[i].doc);
-        out.println(i + " " + d.get(ID_FIELD));
+        Document d = searcher.doc(hits[i].doc);
+        out.println(i + " " + d.getInt(ID_FIELD));
       }
     }
   }
@@ -136,10 +135,9 @@
     assertEquals("total results", expectedCount, hits.length);
     for (int i = 0 ; i < hits.length; i++) {
       if (i < 10 || (i > 94 && i < 105) ) {
-        StoredDocument d = searcher.doc(hits[i].doc);
-        assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
+        Document d = searcher.doc(hits[i].doc);
+        assertEquals("check " + i, i, d.getInt(ID_FIELD).intValue());
       }
     }
   }
-
 }
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index a4310c4..589992f 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -24,7 +24,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
@@ -39,7 +39,7 @@
   public void testCaching() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     AtomicInteger resetCount = new AtomicInteger(0);
     TokenStream stream = new TokenStream() {
       private int index = 0;
@@ -68,7 +68,7 @@
 
     stream = new CachingTokenFilter(stream);
 
-    doc.add(new TextField("preanalyzed", stream));
+    doc.addLargeText("preanalyzed", stream);
 
     // 1) we consume all tokens twice before we add the doc to the index
     assertFalse(((CachingTokenFilter)stream).isCached());
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
index 0a67974..afa3eb0 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -23,8 +23,7 @@
 import java.util.Random;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexOptions;
@@ -306,16 +305,20 @@
     };
 
     final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a);
-    final Document doc = new Document();
-    final FieldType ft = new FieldType();
-    ft.setIndexOptions(IndexOptions.DOCS);
-    ft.setTokenized(true);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(true);
-    ft.setStoreTermVectorOffsets(true);
-    doc.add(new Field("f", "a", ft));
-    doc.add(new Field("f", "a", ft));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    final Document doc = writer.newDocument();
+
+    fieldTypes.enableTermVectors("f");
+    fieldTypes.enableTermVectorPositions("f");
+    fieldTypes.enableTermVectorOffsets("f");
+    fieldTypes.setIndexOptions("f", IndexOptions.DOCS);
+    fieldTypes.setMultiValued("f");
+
+    doc.addAtom("f", "a");
+    doc.addAtom("f", "a");
     writer.addDocument(doc);
+
     final LeafReader reader = getOnlySegmentReader(writer.getReader());
     final Fields fields = reader.getTermVectors(0);
     final Terms terms = fields.terms("f");
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java b/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
deleted file mode 100644
index 5424eb3..0000000
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
+++ /dev/null
@@ -1,132 +0,0 @@
-package org.apache.lucene.analysis;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttributeImpl;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TestCharTermAttributeImpl;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
-
-public class TestNumericTokenStream extends BaseTokenStreamTestCase {
-
-  static final long lvalue = 4573245871874382L;
-  static final int ivalue = 123456;
-
-  public void testLongStream() throws Exception {
-    @SuppressWarnings("resource")
-    final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    final BytesRef bytes = bytesAtt.getBytesRef();
-    stream.reset();
-    assertEquals(64, numericAtt.getValueSize());
-    for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      bytesAtt.fillBytesRef();
-      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytes));
-      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-
-  public void testIntStream() throws Exception {
-    @SuppressWarnings("resource")
-    final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    final BytesRef bytes = bytesAtt.getBytesRef();
-    stream.reset();
-    assertEquals(32, numericAtt.getValueSize());
-    for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      bytesAtt.fillBytesRef();
-      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytes));
-      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-  
-  public void testNotInitialized() throws Exception {
-    final NumericTokenStream stream=new NumericTokenStream();
-    
-    try {
-      stream.reset();
-      fail("reset() should not succeed.");
-    } catch (IllegalStateException e) {
-      // pass
-    }
-
-    try {
-      stream.incrementToken();
-      fail("incrementToken() should not succeed.");
-    } catch (IllegalStateException e) {
-      // pass
-    }
-    
-    stream.close();
-  }
-  
-  public static interface TestAttribute extends CharTermAttribute {}
-  public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
-  
-  public void testCTA() throws Exception {
-    final NumericTokenStream stream=new NumericTokenStream();
-    try {
-      stream.addAttribute(CharTermAttribute.class);
-      fail("Succeeded to add CharTermAttribute.");
-    } catch (IllegalArgumentException iae) {
-      assertTrue(iae.getMessage().startsWith("NumericTokenStream does not support"));
-    }
-    try {
-      stream.addAttribute(TestAttribute.class);
-      fail("Succeeded to add TestAttribute.");
-    } catch (IllegalArgumentException iae) {
-      assertTrue(iae.getMessage().startsWith("NumericTokenStream does not support"));
-    }
-    stream.close();
-  }
-  
-  public void testAttributeClone() throws Exception {
-    NumericTermAttributeImpl att = new NumericTermAttributeImpl();
-    NumericTermAttributeImpl copy = TestCharTermAttributeImpl.assertCloneIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy.getBytesRef());
-    NumericTermAttributeImpl copy2 = TestCharTermAttributeImpl.assertCopyIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
-  }
-  
-}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
index 75f7367..c626421 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
@@ -23,14 +23,11 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
 import org.apache.lucene.index.CodecReader;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReaderContext;
@@ -40,7 +37,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.junit.Test;
-
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 
 public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTestCase {
@@ -71,31 +67,26 @@
     // Cannot use RIW because this test wants CFS to stay off:
     IndexWriter iw = new IndexWriter(dir, iwConf);
 
-    final Document validDoc = new Document();
-    validDoc.add(new IntField("id", 0, Store.YES));
+    Document validDoc = iw.newDocument();
+    validDoc.addInt("id", 0);
     iw.addDocument(validDoc);
     iw.commit();
     
     // make sure that #writeField will fail to trigger an abort
-    final Document invalidDoc = new Document();
-    FieldType fieldType = new FieldType();
-    fieldType.setStored(true);
-    invalidDoc.add(new Field("invalid", fieldType) {
-      
-      @Override
-      public String stringValue() {
-        // TODO: really bad & scary that this causes IW to
-        // abort the segment!!  We should fix this.
-        return null;
-      }
-      
-    });
-    
+    Document invalidDoc = iw.newDocument();
+    invalidDoc.add(new LowSchemaField(null, "invalid", "", IndexOptions.NONE, false) {
+        @Override
+        public String stringValue() {
+          // TODO: really bad & scary that this causes IW to
+          // abort the segment!!  We should fix this.
+          return null;
+        }
+      });
+
     try {
       iw.addDocument(invalidDoc);
       iw.commit();
-    }
-    finally {
+    } finally {
       // Abort should have closed the deleter:
       dir.close();
     }
@@ -288,8 +279,8 @@
     IndexWriter iw = new IndexWriter(dir, iwConf);
     DirectoryReader ir = DirectoryReader.open(iw, true);
     for (int i = 0; i < 5; i++) {
-      Document doc = new Document();
-      doc.add(new StoredField("text", "not very long at all"));
+      Document doc = iw.newDocument();
+      doc.addStoredString("text", "not very long at all");
       iw.addDocument(doc);
       // force flush
       DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
index 18a120d..3532fb0 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
@@ -5,22 +5,19 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
 import org.apache.lucene.index.CodecReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 
@@ -52,10 +49,9 @@
   public void testNoOrds() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    doc.add(new Field("foo", "this is a test", ft));
+    Document doc = iw.newDocument();
+    iw.getFieldTypes().enableTermVectors("foo");
+    doc.addLargeText("foo", "this is a test");
     iw.addDocument(doc);
     LeafReader ir = getOnlySegmentReader(iw.getReader());
     Terms terms = ir.getTermVector(0, "foo");
@@ -93,12 +89,12 @@
     // by this test.
     iwConf.setCodec(CompressingCodec.randomInstance(random(), 4*1024, 100, false, 8));
     IndexWriter iw = new IndexWriter(dir, iwConf);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("text");
     DirectoryReader ir = DirectoryReader.open(iw, true);
     for (int i = 0; i < 5; i++) {
-      Document doc = new Document();
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setStoreTermVectors(true);
-      doc.add(new Field("text", "not very long at all", ft));
+      Document doc = iw.newDocument();
+      doc.addShortText("text", "not very long at all");
       iw.addDocument(doc);
       // force flush
       DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestAutoPrefixTerms.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestAutoPrefixTerms.java
new file mode 100644
index 0000000..1f200d8
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestAutoPrefixTerms.java
@@ -0,0 +1,738 @@
+package org.apache.lucene.codecs.lucene50;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+
+public class TestAutoPrefixTerms extends LuceneTestCase {
+
+  private int minItemsPerBlock = TestUtil.nextInt(random(), 2, 100);
+  private int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random().nextInt(100);
+  private int minTermsAutoPrefix = TestUtil.nextInt(random(), 2, 100);
+  private int maxTermsAutoPrefix = random().nextBoolean() ? Math.max(2, (minTermsAutoPrefix-1)*2 + random().nextInt(100)) : Integer.MAX_VALUE;
+
+  private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene50PostingsFormat(minItemsPerBlock, maxItemsPerBlock,
+                                                                                       minTermsAutoPrefix, maxTermsAutoPrefix));
+
+  // Numbers in a restricted range, encoded in decimal, left-0-padded:
+  public void testBasicNumericRanges() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(codec);
+    IndexWriter w = new IndexWriter(dir, iwc);
+    int numTerms = TestUtil.nextInt(random(), 3000, 50000);
+    Set<String> terms = new HashSet<>();
+    int digits = TestUtil.nextInt(random(), 5, 10);
+    int maxValue = 1;
+    for(int i=0;i<digits;i++) {
+      maxValue *= 10;
+    }
+    String format = "%0" + digits + "d";
+    while (terms.size() < numTerms) {
+      terms.add(String.format(Locale.ROOT, format, random().nextInt(maxValue)));
+    }
+
+    for(String term : terms) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", term);
+      doc.addLong("long", Long.parseLong(term));
+      w.addDocument(doc);
+    }
+
+    if (VERBOSE) System.out.println("\nTEST: now optimize");
+    if (random().nextBoolean()) {
+      w.forceMerge(1);
+    }
+
+    if (VERBOSE) System.out.println("\nTEST: now done");
+    IndexReader r = DirectoryReader.open(w, true);
+
+    List<String> sortedTerms = new ArrayList<>(terms);
+    Collections.sort(sortedTerms);
+
+    if (VERBOSE) {
+      System.out.println("TEST: sorted terms:");
+      int idx = 0;
+      for(String term : sortedTerms) {
+        System.out.println(idx + ": " + term);
+        idx++;
+      }
+    }
+
+    int iters = atLeast(100);
+    for(int iter=0;iter<iters;iter++) {
+      int min, max;
+      while (true) {
+        min = random().nextInt(maxValue);
+        max = random().nextInt(maxValue);
+        if (min == max) {
+          continue;
+        } else if (min > max) {
+          int x = min;
+          min = max;
+          max = x;
+        }
+        break;
+      }
+      
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " min=" + min + " max=" + max);
+      }
+
+      boolean minInclusive = random().nextBoolean();
+      boolean maxInclusive = random().nextBoolean();
+      BytesRef minTerm = new BytesRef(String.format(Locale.ROOT, format, min));
+      BytesRef maxTerm = new BytesRef(String.format(Locale.ROOT, format, max));
+      CompiledAutomaton ca = new CompiledAutomaton(minTerm, minInclusive,
+                                                   maxTerm, maxInclusive);
+
+      TermsEnum te = ca.getTermsEnum(MultiFields.getTerms(r, "field"));
+      NumericDocValues docValues = MultiDocValues.getNumericValues(r, "long");
+      DocsEnum docsEnum = null;
+
+      VerifyAutoPrefixTerms verifier = new VerifyAutoPrefixTerms(r.maxDoc(), minTerm, maxTerm);
+
+      while (te.next() != null) {
+        if (VERBOSE) {
+          System.out.println("  got term=" + te.term().utf8ToString());
+        }
+        verifier.sawTerm(te.term());
+        docsEnum = te.docs(null, docsEnum);
+        int docID;
+        while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+          long v = docValues.get(docID);
+          assert v >= min && v <= max: "docID=" + docID + " v=" + v;
+          // The auto-prefix terms should never "overlap" one another, so we should only ever see a given docID one time:
+          if (VERBOSE) {
+            System.out.println("    got docID=" + docID + " v=" + v);
+          }
+          verifier.sawDoc(docID);
+        }
+      }
+      
+      int startLoc = Collections.binarySearch(sortedTerms, String.format(Locale.ROOT, format, min));
+      if (startLoc < 0) {
+        startLoc = -startLoc-1;
+      } else if (minInclusive == false) {
+        startLoc++;
+      }
+      int endLoc = Collections.binarySearch(sortedTerms, String.format(Locale.ROOT, format, max));
+      if (endLoc < 0) {
+        endLoc = -endLoc-2;
+      } else if (maxInclusive == false) {
+        endLoc--;
+      }
+      verifier.finish(endLoc-startLoc+1, maxTermsAutoPrefix);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  private static BytesRef intToBytes(int v) {
+    int sortableBits = v ^ 0x80000000;
+    BytesRef token = new BytesRef(4);
+    token.length = 4;
+    int index = 3;
+    while (index >= 0) {
+      token.bytes[index] = (byte) (sortableBits & 0xff);
+      index--;
+      sortableBits >>>= 8;
+    }
+    return token;
+  }
+
+  // Numbers are encoded in full binary (4 byte ints):
+  public void testBinaryNumericRanges() throws Exception {
+
+    if (VERBOSE) {
+      System.out.println("TEST: minItemsPerBlock=" + minItemsPerBlock);
+      System.out.println("TEST: maxItemsPerBlock=" + maxItemsPerBlock);
+      System.out.println("TEST: minTermsAutoPrefix=" + minTermsAutoPrefix);
+      System.out.println("TEST: maxTermsAutoPrefix=" + maxTermsAutoPrefix);
+    }
+
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(codec);
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    int numTerms = TestUtil.nextInt(random(), 3000, 50000);
+    Set<Integer> terms = new HashSet<>();
+    while (terms.size() < numTerms) {
+      terms.add(random().nextInt());
+    }
+
+    for(Integer term : terms) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", intToBytes(term));
+      doc.addInt("int", term.intValue());
+      w.addDocument(doc);
+    }
+
+    if (random().nextBoolean()) {
+      if (VERBOSE) System.out.println("TEST: now force merge");
+      w.forceMerge(1);
+    }
+
+    IndexReader r = DirectoryReader.open(w, true);
+
+    List<Integer> sortedTerms = new ArrayList<>(terms);
+    Collections.sort(sortedTerms);
+
+    if (VERBOSE) {
+      System.out.println("TEST: sorted terms:");
+      int idx = 0;
+      for(Integer term : sortedTerms) {
+        System.out.println(idx + ": " + term);
+        idx++;
+      }
+    }
+
+    int iters = atLeast(100);
+    for(int iter=0;iter<iters;iter++) {
+
+      int min, max;
+      while (true) {
+        min = random().nextInt();
+        max = random().nextInt();
+        if (min == max) {
+          continue;
+        } else if (min > max) {
+          int x = min;
+          min = max;
+          max = x;
+        }
+        break;
+      }
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " min=" + min + " (" + intToBytes(min) + ") max=" + max + " (" + intToBytes(max) + ")");
+      }
+      
+      boolean minInclusive = random().nextBoolean();
+      BytesRef minTerm = intToBytes(min);
+      boolean maxInclusive = random().nextBoolean();
+      BytesRef maxTerm = intToBytes(max);
+      CompiledAutomaton ca = new CompiledAutomaton(minTerm, minInclusive,
+                                                   maxTerm, maxInclusive);
+
+      TermsEnum te = ca.getTermsEnum(MultiFields.getTerms(r, "field"));
+      NumericDocValues docValues = MultiDocValues.getNumericValues(r, "int");
+      DocsEnum docsEnum = null;
+      VerifyAutoPrefixTerms verifier = new VerifyAutoPrefixTerms(r.maxDoc(), minTerm, maxTerm);
+      while (te.next() != null) {
+        if (VERBOSE) {
+          System.out.println("  got term=" + te.term() + " docFreq=" + te.docFreq());
+        }
+        verifier.sawTerm(te.term());        
+        docsEnum = te.docs(null, docsEnum);
+        int docID;
+        while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+          long v = docValues.get(docID);
+          assert v >= min && v <= max: "docID=" + docID + " v=" + v;
+          verifier.sawDoc(docID);
+        }
+      }
+      
+      int startLoc = Collections.binarySearch(sortedTerms, min);
+      if (startLoc < 0) {
+        startLoc = -startLoc-1;
+      } else if (minInclusive == false) {
+        startLoc++;
+      }
+      int endLoc = Collections.binarySearch(sortedTerms, max);
+      if (endLoc < 0) {
+        endLoc = -endLoc-2;
+      } else if (maxInclusive == false) {
+        endLoc--;
+      }
+      int expectedHits = endLoc-startLoc+1;
+      try {
+        verifier.finish(expectedHits, maxTermsAutoPrefix);
+      } catch (AssertionError ae) {
+        for(int i=0;i<numTerms;i++) {
+          if (verifier.allHits.get(i) == false) {
+            int v = (int) docValues.get(i);
+            boolean accept = (v > min || (v == min && minInclusive)) &&
+              (v < max || (v == max && maxInclusive));
+            if (accept) {
+              System.out.println("MISSING: docID=" + i + " v=" + v + " term=" + intToBytes(v));
+            }
+          }
+        }
+
+        throw ae;
+      }
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  // Non-numeric, simple prefix query
+  public void testBasicPrefixTerms() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(codec);
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    int numTerms = TestUtil.nextInt(random(), 3000, 50000);
+    Set<String> terms = new HashSet<>();
+    while (terms.size() < numTerms) {
+      terms.add(TestUtil.randomSimpleString(random()));
+    }
+    w.getFieldTypes().setDocValuesType("binary", DocValuesType.BINARY);
+
+    for(String term : terms) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", term);
+      doc.addBinary("binary", new BytesRef(term));
+      w.addDocument(doc);
+    }
+
+    if (random().nextBoolean()) {
+      w.forceMerge(1);
+    }
+
+    IndexReader r = DirectoryReader.open(w, true);
+
+    List<String> sortedTerms = new ArrayList<>(terms);
+    Collections.sort(sortedTerms);
+
+    if (VERBOSE) {
+      System.out.println("TEST: sorted terms:");
+      int idx = 0;
+      for(String term : sortedTerms) {
+        System.out.println(idx + ": " + term);
+        idx++;
+      }
+    }
+
+    if (VERBOSE) {
+      System.out.println("TEST: r=" + r);
+    }
+
+    int iters = atLeast(100);
+    for(int iter=0;iter<iters;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      String prefix;
+      if (random().nextInt(100) == 42) {
+        prefix = "";
+      } else {
+        prefix = TestUtil.randomSimpleString(random(), 1, 4);
+      }
+      BytesRef prefixBR = new BytesRef(prefix);
+      if (VERBOSE) {
+        System.out.println("  prefix=" + prefix);
+      }
+
+      CompiledAutomaton ca = new CompiledAutomaton(prefixBR);
+      TermsEnum te = ca.getTermsEnum(MultiFields.getTerms(r, "field"));
+      BinaryDocValues docValues = MultiDocValues.getBinaryValues(r, "binary");
+      DocsEnum docsEnum = null;
+
+      VerifyAutoPrefixTerms verifier = new VerifyAutoPrefixTerms(r.maxDoc(), prefixBR);
+
+      while (te.next() != null) {
+        if (VERBOSE) {
+          System.out.println("TEST: got term=" + te.term().utf8ToString() + " docFreq=" + te.docFreq());
+        }
+        verifier.sawTerm(te.term());        
+        docsEnum = te.docs(null, docsEnum);
+        int docID;
+        while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+          assertTrue("prefixBR=" + prefixBR + " docBR=" + docValues.get(docID), StringHelper.startsWith(docValues.get(docID), prefixBR));
+          // The auto-prefix terms should never "overlap" one another, so we should only ever see a given docID one time:
+          verifier.sawDoc(docID);
+        }
+      }
+      
+      int startLoc = Collections.binarySearch(sortedTerms, prefix);
+      if (startLoc < 0) {
+        startLoc = -startLoc-1;
+      }
+      int endLoc = Collections.binarySearch(sortedTerms, prefix + (char) ('z'+1));
+      if (endLoc < 0) {
+        endLoc = -endLoc-2;
+      }
+      int expectedHits = endLoc-startLoc+1;
+      try {
+        verifier.finish(expectedHits, maxTermsAutoPrefix);
+      } catch (AssertionError ae) {
+        for(int i=0;i<numTerms;i++) {
+          if (verifier.allHits.get(i) == false) {
+            String s = docValues.get(i).utf8ToString();
+            if (s.startsWith(prefix)) {
+              System.out.println("MISSING: docID=" + i + " term=" + s);
+            }
+          }
+        }
+
+        throw ae;
+      }
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDemoPrefixTerms() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(codec);
+    IndexWriter w = new IndexWriter(dir, iwc);
+    int numDocs = 30;
+
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", "" + (char) (97+i));
+      w.addDocument(doc);
+      doc = w.newDocument();
+      doc.addAtom("field", "a" + (char) (97+i));
+      w.addDocument(doc);
+    }
+
+    if (random().nextBoolean()) {
+      w.forceMerge(1);
+    }
+
+    IndexReader r = DirectoryReader.open(w, true);
+    Terms terms = MultiFields.getTerms(r, "field");
+    if (VERBOSE) {
+      System.out.println("\nTEST: now intersect");
+    }
+    CompiledAutomaton ca = new CompiledAutomaton(new BytesRef("a"));
+    TermsEnum te = ca.getTermsEnum(terms);
+    DocsEnum docsEnum = null;
+
+    VerifyAutoPrefixTerms verifier = new VerifyAutoPrefixTerms(r.maxDoc(), new BytesRef("a"));
+    //TermsEnum te = terms.intersect(new CompiledAutomaton(a, true, false), null);
+    while (te.next() != null) {
+      verifier.sawTerm(te.term());
+      docsEnum = te.docs(null, docsEnum);
+      int docID;
+      while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+        // The auto-prefix terms should never "overlap" one another, so we should only ever see a given docID one time:
+        verifier.sawDoc(docID);
+      }
+    }
+    // 1 document has exactly "a", and 30 documents had "a?"
+    verifier.finish(31, maxTermsAutoPrefix);
+    PrefixQuery q = new PrefixQuery(new Term("field", "a"));
+    q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+    assertEquals(31, newSearcher(r).search(q, 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  static final class BinaryTokenStream extends TokenStream {
+    private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);
+    private boolean available = true;
+  
+    public BinaryTokenStream(BytesRef bytes) {
+      bytesAtt.setBytesRef(bytes);
+    }
+  
+    @Override
+    public boolean incrementToken() {
+      if (available) {
+        clearAttributes();
+        available = false;
+        return true;
+      }
+      return false;
+    }
+  
+    @Override
+    public void reset() {
+      available = true;
+    }
+  
+    public interface ByteTermAttribute extends TermToBytesRefAttribute {
+      public void setBytesRef(BytesRef bytes);
+    }
+  
+    public static class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute,TermToBytesRefAttribute {
+      private BytesRef bytes;
+    
+      @Override
+      public void fillBytesRef() {
+        // no-op: the bytes was already filled by our owner's incrementToken
+      }
+    
+      @Override
+      public BytesRef getBytesRef() {
+        return bytes;
+      }
+
+      @Override
+      public void setBytesRef(BytesRef bytes) {
+        this.bytes = bytes;
+      }
+    
+      @Override
+      public void clear() {}
+    
+      @Override
+      public void copyTo(AttributeImpl target) {
+        ByteTermAttributeImpl other = (ByteTermAttributeImpl) target;
+        other.bytes = bytes;
+      }
+    }
+  }
+
+  /** Helper class to ensure auto-prefix terms 1) never overlap one another, and 2) are used when they should be. */
+  private static class VerifyAutoPrefixTerms {
+    final FixedBitSet allHits;
+    private final Map<BytesRef,Integer> prefixCounts = new HashMap<>();
+    private int totPrefixCount;
+    private final BytesRef[] bounds;
+    private int totTermCount;
+    private BytesRef lastTerm;
+
+    public VerifyAutoPrefixTerms(int maxDoc, BytesRef... bounds) {
+      allHits = new FixedBitSet(maxDoc);
+      assert bounds.length > 0;
+      this.bounds = bounds;
+    }
+
+    public void sawTerm(BytesRef term) {
+      //System.out.println("saw term=" + term);
+      if (lastTerm != null) {
+        assertTrue(lastTerm.compareTo(term) < 0);
+      }
+      lastTerm = BytesRef.deepCopyOf(term);
+      totTermCount++;
+      totPrefixCount += term.length;
+      for(int i=1;i<=term.length;i++) {
+        BytesRef prefix = BytesRef.deepCopyOf(term);
+        prefix.length = i;
+        Integer count = prefixCounts.get(prefix);
+        if (count == null) {
+          count = 1;
+        } else {
+          count += 1;
+        }
+        prefixCounts.put(prefix, count);
+      }
+    }
+
+    public void sawDoc(int docID) {
+      // The auto-prefix terms should never "overlap" one another, so we should only ever see a given docID one time:
+      assertFalse(allHits.getAndSet(docID));
+    }
+
+    public void finish(int expectedNumHits, int maxPrefixCount) {
+
+      if (maxPrefixCount != -1) {
+        // Auto-terms were used in this test
+        long allowedMaxTerms;
+
+        if (bounds.length == 1) {
+          // Simple prefix query: we should never see more than maxPrefixCount terms:
+          allowedMaxTerms = maxPrefixCount;
+        } else {
+          // Trickier: we need to allow for maxPrefixTerms for each different leading byte in the min and max:
+          assert bounds.length == 2;
+          BytesRef minTerm = bounds[0];
+          BytesRef maxTerm = bounds[1];
+
+          int commonPrefix = 0;
+          for(int i=0;i<minTerm.length && i<maxTerm.length;i++) {
+            if (minTerm.bytes[minTerm.offset+i] != maxTerm.bytes[maxTerm.offset+i]) {
+              commonPrefix = i;
+              break;
+            }
+          }
+
+          allowedMaxTerms = maxPrefixCount * (long) ((minTerm.length-commonPrefix) + (maxTerm.length-commonPrefix));
+        }
+
+        assertTrue("totTermCount=" + totTermCount + " is > allowedMaxTerms=" + allowedMaxTerms, totTermCount <= allowedMaxTerms);
+      }
+
+      assertEquals(expectedNumHits, allHits.cardinality());
+      int sum = 0;
+      for(Map.Entry<BytesRef,Integer> ent : prefixCounts.entrySet()) {
+
+        BytesRef prefix = ent.getKey();
+        if (VERBOSE) {
+          System.out.println("  verify prefix=" + TestUtil.brToString(prefix) + " count=" + ent.getValue());
+        }
+
+        if (maxPrefixCount != -1) {
+          // Auto-terms were used in this test
+
+          int sumLeftoverSuffix = 0;
+          for(BytesRef bound : bounds) {
+
+            int minSharedLength = Math.min(bound.length, prefix.length);
+            int commonPrefix = minSharedLength;
+            for(int i=0;i<minSharedLength;i++) {
+              if (bound.bytes[bound.offset+i] != prefix.bytes[prefix.offset+i]) {
+                commonPrefix = i;
+                break;
+              }
+            }
+            sumLeftoverSuffix += bound.length - commonPrefix;
+          }
+
+          long limit = (1+sumLeftoverSuffix) * (long) maxPrefixCount;
+
+          assertTrue("maxPrefixCount=" + maxPrefixCount + " prefix=" + prefix + " sumLeftoverSuffix=" + sumLeftoverSuffix + " limit=" + limit + " vs actual=" +ent.getValue(),
+                     ent.getValue() <= limit);
+        }
+
+        sum += ent.getValue();
+      }
+
+      // Make sure no test bug:
+      assertEquals(totPrefixCount, sum);
+    }
+  }
+
+  public void testDisableAutoPrefix() throws Exception {
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new Lucene50Codec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableFastRanges("num");
+    for(int i=0;i<10000;i++) {
+      Document doc = w.newDocument();
+      doc.addInt("num", i);
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    IndexReader r = DirectoryReader.open(w);
+    shouldFail(() -> fieldTypes.newIntRangeFilter("num", 0, true, 100, true),
+               "field \"num\": cannot create range filter: this field was not indexed for fast ranges");
+
+    AtomicInteger termCount = new AtomicInteger();
+    TermRangeQuery q = new TermRangeQuery("num",
+                                          NumericUtils.intToBytes(0),
+                                          NumericUtils.intToBytes(9999),
+                                          true,
+                                          false) {
+
+        @Override
+        protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
+          TermsEnum te = super.getTermsEnum(terms, atts);
+          while (te.next() != null) {
+            termCount.incrementAndGet();
+          }
+          return super.getTermsEnum(terms, atts);
+        }
+      };
+    
+    IndexSearcher s = newSearcher(r);
+    s.search(q, 1);
+    assertEquals(9999, termCount.get());
+    r.close();
+    w.close();
+  }
+
+  public void testEnableAutoPrefix() throws Exception {
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new Lucene50Codec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableFastRanges("num");
+    for(int i=0;i<10000;i++) {
+      Document doc = w.newDocument();
+      doc.addInt("num", i);
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    IndexReader r = DirectoryReader.open(w);
+
+    AtomicInteger termCount = new AtomicInteger();
+    TermRangeQuery q = new TermRangeQuery("num",
+                                          NumericUtils.intToBytes(0),
+                                          NumericUtils.intToBytes(9999),
+                                          true,
+                                          true) {
+
+        @Override
+        protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
+          TermsEnum te = super.getTermsEnum(terms, atts);
+          while (te.next() != null) {
+            termCount.incrementAndGet();
+          }
+          return super.getTermsEnum(terms, atts);
+        }
+      };
+    
+    IndexSearcher s = newSearcher(r);
+    s.search(q, 1);
+    assertTrue(termCount.get() < 9999);
+    r.close();
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
index 9bc9380..75eaadd 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.codecs.blocktree.FieldReader;
 import org.apache.lucene.codecs.blocktree.Stats;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BasePostingsFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
@@ -45,10 +45,13 @@
   public void testFinalBlock() throws Exception {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
     for(int i=0;i<25;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("field", Character.toString((char) (97+i)), Field.Store.NO));
-      doc.add(newStringField("field", "z" + Character.toString((char) (97+i)), Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("field", Character.toString((char) (97+i)));
+      doc.addAtom("field", "z" + Character.toString((char) (97+i)));
       w.addDocument(doc);
     }
     w.forceMerge(1);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat2.java
index 34a9caf..6bfa42c 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat2.java
@@ -19,13 +19,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -63,72 +61,58 @@
     super.tearDown();
   }
   
-  private Document newDocument() {
-    Document doc = new Document();
+  private Document newDocument(String contents) {
+    Document doc = iw.newDocument();
+    FieldTypes fieldTypes = iw.getFieldTypes();
     for (IndexOptions option : IndexOptions.values()) {
       if (option == IndexOptions.NONE) {
         continue;
       }
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
+      String fieldName = option.toString();
       // turn on tvs for a cross-check, since we rely upon checkindex in this test (for now)
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(true);
-      ft.setStoreTermVectorPositions(true);
-      ft.setStoreTermVectorPayloads(true);
-      ft.setIndexOptions(option);
-      doc.add(new Field(option.toString(), "", ft));
+      fieldTypes.disableHighlighting(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorPayloads(fieldName);
+      fieldTypes.setIndexOptions(fieldName, option);
+      doc.addLargeText(fieldName, contents.replaceAll("name", fieldName));
     }
     return doc;
   }
 
   /** tests terms with df = blocksize */
   public void testDFBlockSize() throws Exception {
-    Document doc = newDocument();
     for (int i = 0; i < Lucene50PostingsFormat.BLOCK_SIZE; i++) {
-      for (Field f : doc.getFields()) {
-        f.setStringValue(f.name() + " " + f.name() + "_2");
-      }
-      iw.addDocument(doc);
+      iw.addDocument(newDocument("name name_2"));
     }
   }
 
   /** tests terms with df % blocksize = 0 */
   public void testDFBlockSizeMultiple() throws Exception {
-    Document doc = newDocument();
     for (int i = 0; i < Lucene50PostingsFormat.BLOCK_SIZE * 16; i++) {
-      for (Field f : doc.getFields()) {
-        f.setStringValue(f.name() + " " + f.name() + "_2");
-      }
-      iw.addDocument(doc);
+      iw.addDocument(newDocument("name name_2"));
     }
   }
   
   /** tests terms with ttf = blocksize */
   public void testTTFBlockSize() throws Exception {
-    Document doc = newDocument();
     for (int i = 0; i < Lucene50PostingsFormat.BLOCK_SIZE/2; i++) {
-      for (Field f : doc.getFields()) {
-        f.setStringValue(f.name() + " " + f.name() + " " + f.name() + "_2 " + f.name() + "_2");
-      }
-      iw.addDocument(doc);
+      iw.addDocument(newDocument("name name name_2 name_2"));
     }
   }
   
   /** tests terms with ttf % blocksize = 0 */
   public void testTTFBlockSizeMultiple() throws Exception {
-    Document doc = newDocument();
+    String proto = "name name name name name_2 name_2 name_2 name_2";
+    StringBuilder val = new StringBuilder();
+    for (int j = 0; j < 16; j++) {
+      val.append(proto);
+      val.append(" ");
+    }
+    String pattern = val.toString();
     for (int i = 0; i < Lucene50PostingsFormat.BLOCK_SIZE/2; i++) {
-      for (Field f : doc.getFields()) {
-        String proto = (f.name() + " " + f.name() + " " + f.name() + " " + f.name() + " " 
-                       + f.name() + "_2 " + f.name() + "_2 " + f.name() + "_2 " + f.name() + "_2");
-        StringBuilder val = new StringBuilder();
-        for (int j = 0; j < 16; j++) {
-          val.append(proto);
-          val.append(" ");
-        }
-        f.setStringValue(val.toString());
-      }
-      iw.addDocument(doc);
+      iw.addDocument(newDocument(pattern));
     }
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
index 195746c..1305a61 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
@@ -29,9 +29,7 @@
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
@@ -87,51 +85,50 @@
     // TODO we could actually add more fields implemented with different PFs
     // or, just put this test into the usual rotation?
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
-    // turn this on for a cross-check
-    docsOnlyType.setStoreTermVectors(true);
-    docsOnlyType.setIndexOptions(IndexOptions.DOCS);
-    
-    FieldType docsAndFreqsType = new FieldType(TextField.TYPE_NOT_STORED);
-    // turn this on for a cross-check
-    docsAndFreqsType.setStoreTermVectors(true);
-    docsAndFreqsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    
-    FieldType positionsType = new FieldType(TextField.TYPE_NOT_STORED);
+
+    FieldTypes fieldTypes = iw.getFieldTypes();
+
     // turn these on for a cross-check
-    positionsType.setStoreTermVectors(true);
-    positionsType.setStoreTermVectorPositions(true);
-    positionsType.setStoreTermVectorOffsets(true);
-    positionsType.setStoreTermVectorPayloads(true);
-    FieldType offsetsType = new FieldType(positionsType);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field field1 = new Field("field1docs", "", docsOnlyType);
-    Field field2 = new Field("field2freqs", "", docsAndFreqsType);
-    Field field3 = new Field("field3positions", "", positionsType);
-    Field field4 = new Field("field4offsets", "", offsetsType);
-    Field field5 = new Field("field5payloadsFixed", "", positionsType);
-    Field field6 = new Field("field6payloadsVariable", "", positionsType);
-    Field field7 = new Field("field7payloadsFixedOffsets", "", offsetsType);
-    Field field8 = new Field("field8payloadsVariableOffsets", "", offsetsType);
-    doc.add(field1);
-    doc.add(field2);
-    doc.add(field3);
-    doc.add(field4);
-    doc.add(field5);
-    doc.add(field6);
-    doc.add(field7);
-    doc.add(field8);
+    fieldTypes.enableTermVectors("field1docs");
+    fieldTypes.disableHighlighting("field1docs");
+    fieldTypes.setIndexOptions("field1docs", IndexOptions.DOCS);
+
+    // turn these on for a cross-check
+    fieldTypes.enableTermVectors("field2freqs");
+    fieldTypes.disableHighlighting("field2freqs");
+    fieldTypes.setIndexOptions("field2freqs", IndexOptions.DOCS);
+
+    for(String fieldName : new String[] {"field3positons",
+                                         "field5payloadsFixed",
+                                         "field6payloadsVariable"}) {
+      // turn these on for a cross-check
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.disableHighlighting(fieldName);
+      fieldTypes.setIndexOptions(fieldName, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    }
+
+    for(String fieldName : new String[] {"field4offsets",
+                                         "field7payloadsFixedOffsets",
+                                         "field8payloadsVariableOffsets"}) {
+      // turn these on for a cross-check
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.setIndexOptions(fieldName, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    }
+
     for (int i = 0; i < MAXDOC; i++) {
       String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ') + " " + TestUtil.randomSimpleString(random());
-      field1.setStringValue(stringValue);
-      field2.setStringValue(stringValue);
-      field3.setStringValue(stringValue);
-      field4.setStringValue(stringValue);
-      field5.setStringValue(stringValue);
-      field6.setStringValue(stringValue);
-      field7.setStringValue(stringValue);
-      field8.setStringValue(stringValue);
+      Document doc = iw.newDocument();
+      for(String fieldName : new String[] {
+          "field1docs",
+          "field2freqs",
+          "field3positions",
+          "field4offsets",
+          "field5payloadsFixed",
+          "field6payloadsVariable",
+          "field7payloadsFixedOffsets",
+          "field8payloadsVariableOffsets"}) { 
+        doc.addLargeText(fieldName, stringValue);
+      }
       iw.addDocument(doc);
     }
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
index 60fbdec..0806679 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
@@ -27,21 +27,19 @@
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SerialMergeScheduler;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TestUtil;
@@ -138,12 +136,14 @@
       }
     });
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    fieldTypes.setMultiValued("indexed");
+
     // index some docs
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
-      doc.add(idField);
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("id", Integer.toString(i));
       final int length = TestUtil.nextInt(random(), minLength, maxLength);
       int numValues = random().nextInt(17);
       // create a random list of strings
@@ -156,14 +156,14 @@
       ArrayList<String> unordered = new ArrayList<>(values);
       Collections.shuffle(unordered, random());
       for (String v : values) {
-        doc.add(newStringField("indexed", v, Field.Store.NO));
+        doc.addAtom("indexed", v);
       }
 
       // add in any order to the dv field
       ArrayList<String> unordered2 = new ArrayList<>(values);
       Collections.shuffle(unordered2, random());
       for (String v : unordered2) {
-        doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
+        doc.addBinary("dv", new BytesRef(v));
       }
 
       writer.addDocument(doc);
@@ -208,6 +208,7 @@
       TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
       assertEquals(terms.size(), expected, actual);
     }
+
     ir.close();
     
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
index 371e309..84601d6 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
@@ -20,12 +20,10 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.store.Directory;
 
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
@@ -46,9 +44,9 @@
       IndexWriterConfig iwc = newIndexWriterConfig();
       iwc.setCodec(new Lucene50Codec(RandomPicks.randomFrom(random(), Mode.values())));
       IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig());
-      Document doc = new Document();
-      doc.add(new StoredField("field1", "value1"));
-      doc.add(new StoredField("field2", "value2"));
+      Document doc = iw.newDocument();
+      doc.addStoredString("field1", "value1");
+      doc.addStoredString("field2", "value2");
       iw.addDocument(doc);
       if (random().nextInt(4) == 0) {
         iw.forceMerge(1);
@@ -60,9 +58,9 @@
     DirectoryReader ir = DirectoryReader.open(dir);
     assertEquals(10, ir.numDocs());
     for (int i = 0; i < 10; i++) {
-      StoredDocument doc = ir.document(i);
-      assertEquals("value1", doc.get("field1"));
-      assertEquals("value2", doc.get("field2"));
+      Document doc = ir.document(i);
+      assertEquals("value1", doc.getString("field1"));
+      assertEquals("value2", doc.getString("field2"));
     }
     ir.close();
     // checkindex
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java
index 43ab7e2b..aef370a 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldDocValuesFormat.java
@@ -26,19 +26,17 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BaseDocValuesFormatTestCase;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomCodec;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -92,12 +90,14 @@
       }
     });
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setDocValuesType("dv2", DocValuesType.BINARY);
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 5));
-    doc.add(new BinaryDocValuesField("dv2", new BytesRef("hello world")));
+    doc.addLargeText("fieldname", text);
+    doc.addInt("dv1", 5);
+    doc.addBinary("dv2", new BytesRef("hello world"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -111,12 +111,13 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1");
       assertEquals(5, dv.get(hits.scoreDocs[i].doc));
       BinaryDocValues dv2 = ireader.leaves().get(0).reader().getBinaryDocValues("dv2");
+      assertNotNull(dv2);
       final BytesRef term = dv2.get(hits.scoreDocs[i].doc);
       assertEquals(new BytesRef("hello world"), term);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 3b19087..5afd51b 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -16,6 +16,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -26,9 +27,7 @@
 import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
 import org.apache.lucene.codecs.simpletext.SimpleTextPostingsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -67,25 +66,25 @@
 
   private void addDocs(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "aaa");
       writer.addDocument(doc);
     }
   }
 
   private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "bbb", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "bbb");
       writer.addDocument(doc);
     }
   }
 
   private void addDocs3(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "ccc", Field.Store.NO));
-      doc.add(newStringField("id", "" + i, Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "ccc");
+      doc.addUniqueInt("id", i);
       writer.addDocument(doc);
     }
   }
@@ -244,15 +243,14 @@
           new MockAnalyzer(random()));
       config.setOpenMode(OpenMode.CREATE_OR_APPEND);
       IndexWriter writer = newWriter(dir, config);
+      FieldTypes fieldTypes = writer.getFieldTypes();
       for (int j = 0; j < docsPerRound; j++) {
-        final Document doc = new Document();
+        final Document doc = writer.newDocument();
         for (int k = 0; k < num; k++) {
-          FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-          customType.setTokenized(random().nextBoolean());
-          customType.setOmitNorms(random().nextBoolean());
-          Field field = newField("" + k, TestUtil
-              .randomRealisticUnicodeString(random(), 128), customType);
-          doc.add(field);
+          if (random().nextBoolean()) {
+            fieldTypes.disableNorms("" + k);
+          }
+          doc.addLargeText("" + k, TestUtil.randomRealisticUnicodeString(random(), 128));
         }
         writer.addDocument(doc);
       }
@@ -303,19 +301,18 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(codec);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
+    FieldTypes fieldTypes = iw.getFieldTypes();
     // turn on vectors for the checkindex cross-check
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
-    Field idField = new Field("id", "", ft);
-    Field dateField = new Field("date", "", ft);
-    doc.add(idField);
-    doc.add(dateField);
+    fieldTypes.enableTermVectors("id");
+    fieldTypes.enableTermVectorOffsets("id");
+    fieldTypes.enableTermVectorPositions("id");
+    fieldTypes.enableTermVectors("date");
+    fieldTypes.enableTermVectorOffsets("date");
+    fieldTypes.enableTermVectorPositions("date");
     for (int i = 0; i < 100; i++) {
-      idField.setStringValue(Integer.toString(random().nextInt(50)));
-      dateField.setStringValue(Integer.toString(random().nextInt(100)));
+      Document doc = iw.newDocument();
+      doc.addLargeText("id", Integer.toString(random().nextInt(50)));
+      doc.addLargeText("date", Integer.toString(random().nextInt(100)));
       iw.addDocument(doc);
     }
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestAtomFields.java b/lucene/core/src/test/org/apache/lucene/document/TestAtomFields.java
new file mode 100644
index 0000000..3341fe3
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestAtomFields.java
@@ -0,0 +1,429 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestAtomFields extends LuceneTestCase {
+
+  /** Make sure that if we index an ATOM field, at search time we get KeywordAnalyzer for it. */
+  public void testFieldUsesKeywordAnalyzer() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    Document doc = w.newDocument();
+    doc.addAtom("id", "foo bar");
+    w.addDocument(doc);
+    BaseTokenStreamTestCase.assertTokenStreamContents(fieldTypes.getQueryAnalyzer().tokenStream("id", "foo bar"), new String[] {"foo bar"}, new int[1], new int[] {7});
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryAtomSort() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setSortMissingFirst("atom");
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", new BytesRef("z"));
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", new BytesRef("a"));
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("2", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("1", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("0", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setSortMissingFirst("atom");
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("2", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("1", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("0", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirstReversed() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableSorting("atom", true);
+    fieldTypes.setSortMissingFirst("atom");
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("2", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("0", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("1", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableSorting("atom");
+    fieldTypes.setSortMissingLast("atom");
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("1", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("0", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("2", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLastReversed() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableSorting("atom", true);
+    fieldTypes.setSortMissingLast("atom");
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("0", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("1", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("2", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingDefault() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("1", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("0", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("2", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingDefaultReversed() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "z");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("atom", "a");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+    
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("atom", true));
+    assertEquals(3, hits.totalHits);
+    assertEquals("0", s.doc(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("1", s.doc(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("2", s.doc(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMinMaxAtom() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMinMaxTokenLength("field", 2, 7);
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addAtom("field", "a");
+    doc.addAtom("field", "ab");
+    doc.addAtom("field", "goodbyeyou");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(0, hitCount(s, fieldTypes.newExactStringQuery("field", "a")));
+    assertEquals(1, hitCount(s, fieldTypes.newExactStringQuery("field", "ab")));
+    assertEquals(0, hitCount(s, fieldTypes.newExactStringQuery("field", "goodbyeyou")));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMinMaxBinaryAtom() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMinMaxTokenLength("field", 2, 7);
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addAtom("field", new BytesRef(new byte[1]));
+    doc.addAtom("field", new BytesRef(new byte[2]));
+    doc.addAtom("field", new BytesRef(new byte[10]));
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(0, hitCount(s, fieldTypes.newExactBinaryQuery("field", new byte[1])));
+    assertEquals(1, hitCount(s, fieldTypes.newExactBinaryQuery("field", new byte[2])));
+    assertEquals(0, hitCount(s, fieldTypes.newExactBinaryQuery("field", new byte[10])));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testReversedStringAtom() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setReversedTerms("field");
+    Document doc = w.newDocument();
+    doc.addAtom("field", "Foobar");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, hitCount(s, fieldTypes.newExactStringQuery("field", "rabooF")));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testReversedBinaryAtom() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setReversedTerms("field");
+    Document doc = w.newDocument();
+    doc.addAtom("field", new BytesRef("Foobar"));
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, hitCount(s, fieldTypes.newExactBinaryQuery("field", new BytesRef("rabooF"))));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    assumeTrue("DV format not supported", Arrays.asList("Memory", "SimpleText").contains(TestUtil.getDocValuesFormat("field")) == false);
+
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addAtom("field", "zzz");
+    doc.addAtom("field", "a");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addAtom("field", "d");
+    doc.addAtom("field", "m");
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("field"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedStringSortSelector("field", SortedSetSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("field"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestBigDecimalFields.java b/lucene/core/src/test/org/apache/lucene/document/TestBigDecimalFields.java
new file mode 100644
index 0000000..f7cd63e
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestBigDecimalFields.java
@@ -0,0 +1,474 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+// We need DV random-access ords support for multi-valued sorting:
+@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
+public class TestBigDecimalFields extends LuceneTestCase {
+
+  private BigDecimal make(String token, int scale) {
+    return new BigDecimal(new BigInteger(token), scale);
+  }
+
+  public void testRange1() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 30, 2);
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addBigDecimal("num", make("3000000000000000000", 2));
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigDecimal("num", make("2000000000000000000", 2));
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigDecimal("num", make("7000000000000000000", 2));
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigDecimalRangeFilter("num",
+                                                                 make("0", 2), true,
+                                                                 make("3000000000000000000", 2), true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigDecimalRangeFilter("num",
+                                                                 make("0", 2), true,
+                                                                 make("10000000000000000000", 2), true), 1).totalHits);
+    TopDocs hits = s.search(new MatchAllDocsQuery(),
+                            fieldTypes.newBigDecimalRangeFilter("num",
+                                                                make("1000000000000000000", 2), true,
+                                                                make("2500000000000000000", 2), true), 1);
+    assertEquals(1, hits.totalHits);
+    assertEquals(make("2000000000000000000", 2), s.doc(hits.scoreDocs[0].doc).getBigDecimal("num"));
+
+    doc = w.newDocument();
+    doc.addBigDecimal("num", make("17", 2));
+    doc.addAtom("id", "four");
+    w.addDocument(doc);
+    w.forceMerge(1);
+    r.close();
+    r = DirectoryReader.open(w, true);
+    s = newSearcher(r);
+
+    assertEquals(4, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigDecimalRangeFilter("num",
+                                                                 make("0", 2), true,
+                                                                 make("10000000000000000000", 2), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRange2() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 3, 1);
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addBigDecimal("num", make("-100", 1));
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigDecimal("num", make("200", 1));
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(1, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigDecimalRangeFilter("num",
+                                                                 make("-200", 1), true,
+                                                                 make("17", 1), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  private BigDecimal randomBigDecimal(int scale) {
+    BigInteger big = new BigInteger(TestUtil.nextInt(random(), 4, 100), random()); 
+    if (random().nextBoolean()) {
+      // nocommit why only positive?
+      big = big.negate();
+    }
+    return new BigDecimal(big, scale);
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", TestUtil.nextInt(random(), 20, 100), 4);
+    List<BigDecimal> values = new ArrayList<>();
+    Map<BigDecimal,Integer> valueCounts = new HashMap<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      BigDecimal big = randomBigDecimal(4);
+      values.add(big);
+      Integer cur = valueCounts.get(big);
+      if (cur == null) {
+        cur = 0;
+      }
+      cur++;
+      valueCounts.put(big, cur);
+      doc.addBigDecimal("num", big);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " big=" + big);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      BigDecimal x = randomBigDecimal(4);
+      BigDecimal y = randomBigDecimal(4);
+
+      BigDecimal min, max;
+      if (x.compareTo(y) < 0) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        BigDecimal value = values.get(i);
+        if (value.compareTo(min) >= 0 && value.compareTo(max) <= 0) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter = fieldTypes.newBigDecimalRangeFilter("num", min, true, max, true);
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      BigDecimal last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        BigDecimal v = doc.getBigDecimal("num");
+        wrongValues |= v.equals(((FieldDoc) hit).fields[0]) == false;
+        if (last != null) {
+          int cmp = last.compareTo(v);
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    for (BigDecimal value : values) {
+      assertEquals(valueCounts.get(value).intValue(), s.search(fieldTypes.newBigDecimalTermQuery("num", value), 1).totalHits);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 10);
+    fieldTypes.setSortMissingFirst("num");
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigDecimal("num", make("45", 10));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigDecimal("num", make("-2", 10));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(1, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[2].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, -4);
+    fieldTypes.setSortMissingLast("num");
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigDecimal("num", make("45", -4));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigDecimal("num", make("-2", -4));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 0);
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigDecimal("num", make("45", 0));
+    doc.addBigDecimal("num", make("-22", 0));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigDecimal("num", make("-2", 0));
+    doc.addBigDecimal("num", make("14", 0));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedStringSortSelector("num", SortedSetSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 4);
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigDecimal("num", make("45", 4));
+    doc.addBigDecimal("num", make("-22", 4));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigDecimal("num", make("-2", 4));
+    doc.addBigDecimal("num", make("14", 4));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newBigDecimalRangeFilter("num", make("-100", 4), true, make("100", 4), true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newBigDecimalRangeFilter("num", make("40", 4), true, make("45", 4), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 4);
+    Document doc = w.newDocument();
+    doc.addStoredBigDecimal("num", make("100", 4));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(make("100", 4), doc.getBigDecimal("num"));
+    r.close();
+    w.close();
+  }
+
+  public void testExcStoredNoScale() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addStoredBigDecimal("num", make("100", 4)),
+               "field \"num\": cannot addStored: you must first record the byte width and scale for this BIG_DECIMAL field");
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 4);
+    doc.addBigDecimal("num", make("100", 4));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredBigDecimal("num", make("200", 4)),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 7);
+    Document doc = w.newDocument();
+    doc.addStoredBigDecimal("num", make("100", 7));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addBigDecimal("num", make("200", 7)),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+
+  public void testTooLarge() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 10, 4);
+    doc.addBigDecimal("num", make("1000000000000000000000000", 4));
+    shouldFail(() -> w.addDocument(doc),
+               "field \"num\": BigInteger 1000000000000000000000000 exceeds allowed byte width 10");
+    w.close();
+  }
+
+  public void testWrongScale() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 10, 4);
+    doc.addBigDecimal("num", make("1000000000", 2));
+    shouldFail(() -> w.addDocument(doc),
+               "field \"num\": BIG_DECIMAL was configured with scale=4, but indexed value has scale=2");
+    w.close();
+  }
+
+  public void testWrongScaleJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigDecimalByteWidthAndScale("num", 20, 4);
+    Document doc = w.newDocument();
+    doc.addStoredBigDecimal("num", make("100", 5));
+    shouldFail(() -> w.addDocument(doc),
+               "field \"num\": BIG_DECIMAL was configured with scale=4, but stored value has scale=5");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestBigIntegerFields.java b/lucene/core/src/test/org/apache/lucene/document/TestBigIntegerFields.java
new file mode 100644
index 0000000..30c55ef
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestBigIntegerFields.java
@@ -0,0 +1,434 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+// We need DV random-access ords support for multi-valued sorting:
+@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
+public class TestBigIntegerFields extends LuceneTestCase {
+  public void testRange1() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 30);
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("3000000000000000000"));
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("2000000000000000000"));
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("7000000000000000000"));
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigIntRangeFilter("num",
+                                                             new BigInteger("0"), true,
+                                                             new BigInteger("3000000000000000000"), true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigIntRangeFilter("num",
+                                                             new BigInteger("0"), true,
+                                                             new BigInteger("10000000000000000000"), true), 1).totalHits);
+    TopDocs hits = s.search(new MatchAllDocsQuery(),
+                            fieldTypes.newBigIntRangeFilter("num",
+                                                            new BigInteger("1000000000000000000"), true,
+                                                            new BigInteger("2500000000000000000"), true), 1);
+    assertEquals(1, hits.totalHits);
+    assertEquals(new BigInteger("2000000000000000000"), s.doc(hits.scoreDocs[0].doc).getBigInteger("num"));
+
+    doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("17"));
+    doc.addAtom("id", "four");
+    w.addDocument(doc);
+    w.forceMerge(1);
+    r.close();
+    r = DirectoryReader.open(w, true);
+    s = newSearcher(r);
+
+    assertEquals(4, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigIntRangeFilter("num",
+                                                             new BigInteger("0"), true,
+                                                             new BigInteger("10000000000000000000"), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRange2() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 3);
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("-100"));
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addBigInteger("num", new BigInteger("200"));
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(1, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newBigIntRangeFilter("num",
+                                                             new BigInteger("-200"), true,
+                                                             new BigInteger("17"), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  private BigInteger randomBigInt() {
+    BigInteger big = new BigInteger(TestUtil.nextInt(random(), 4, 100), random()); 
+    if (random().nextBoolean()) {
+      // nocommit why only positive?
+      big = big.negate();
+    }
+    return big;
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", TestUtil.nextInt(random(), 20, 100));
+    List<BigInteger> values = new ArrayList<>();
+    Map<BigInteger,Integer> valueCounts = new HashMap<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      BigInteger big = randomBigInt();
+      values.add(big);
+      Integer cur = valueCounts.get(big);
+      if (cur == null) {
+        cur = 0;
+      }
+      cur++;
+      valueCounts.put(big, cur);
+      doc.addBigInteger("num", big);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " big=" + big);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      BigInteger x = randomBigInt();
+      BigInteger y = randomBigInt();
+
+      BigInteger min, max;
+      if (x.compareTo(y) < 0) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        BigInteger value = values.get(i);
+        if (value.compareTo(min) >= 0 && value.compareTo(max) <= 0) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter = fieldTypes.newBigIntRangeFilter("num", min, true, max, true);
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      BigInteger last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        BigInteger v = doc.getBigInteger("num");
+        wrongValues |= v.equals(((FieldDoc) hit).fields[0]) == false;
+        if (last != null) {
+          int cmp = last.compareTo(v);
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    for (BigInteger value : values) {
+      assertEquals(valueCounts.get(value).intValue(), s.search(fieldTypes.newBigIntTermQuery("num", value), 1).totalHits);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 20);
+    fieldTypes.setSortMissingFirst("num");
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigInteger("num", BigInteger.valueOf(45));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigInteger("num", BigInteger.valueOf(-2));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(1, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[2].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 20);
+    fieldTypes.setSortMissingLast("num");
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigInteger("num", BigInteger.valueOf(45));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigInteger("num", BigInteger.valueOf(-2));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+    fieldTypes.setBigIntByteWidth("num", 20);
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigInteger("num", BigInteger.valueOf(45));
+    doc.addBigInteger("num", BigInteger.valueOf(-22));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigInteger("num", BigInteger.valueOf(-2));
+    doc.addBigInteger("num", BigInteger.valueOf(14));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedStringSortSelector("num", SortedSetSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+    fieldTypes.setBigIntByteWidth("num", 20);
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addBigInteger("num", BigInteger.valueOf(45));
+    doc.addBigInteger("num", BigInteger.valueOf(-22));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addBigInteger("num", BigInteger.valueOf(-2));
+    doc.addBigInteger("num", BigInteger.valueOf(14));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newBigIntRangeFilter("num", BigInteger.valueOf(-100), true, BigInteger.valueOf(100), true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newBigIntRangeFilter("num", BigInteger.valueOf(40), true, BigInteger.valueOf(45), true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredBigInteger("num", BigInteger.valueOf(100));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(BigInteger.valueOf(100), doc.getBigInteger("num"));
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 20);
+    Document doc = w.newDocument();
+    doc.addBigInteger("num", BigInteger.valueOf(100));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredBigInteger("num", BigInteger.valueOf(200)),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredBigInteger("num", BigInteger.valueOf(100));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addBigInteger("num", BigInteger.valueOf(200)),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+
+  public void testTooLarge() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("num", 10);
+    doc.addBigInteger("num", new BigInteger("1000000000000000000000000"));
+    shouldFail(() -> w.addDocument(doc),
+               "field \"num\": BigInteger 1000000000000000000000000 exceeds allowed byte width 10");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java
index 90a3189..02e5cf3 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java
@@ -1,14 +1,5 @@
 package org.apache.lucene.document;
 
-import java.nio.charset.StandardCharsets;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -26,6 +17,14 @@
  * limitations under the License.
  */
 
+import java.nio.charset.StandardCharsets;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
 /**
  * Tests {@link Document} class.
  */
@@ -37,38 +36,33 @@
   public void testBinaryFieldInIndex()
     throws Exception
   {
-    FieldType ft = new FieldType();
-    ft.setStored(true);
-    StoredField binaryFldStored = new StoredField("binaryStored", binaryValStored.getBytes(StandardCharsets.UTF_8));
-    Field stringFldStored = new Field("stringStored", binaryValStored, ft);
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     
-    doc.add(binaryFldStored);
-    
-    doc.add(stringFldStored);
+    doc.addStoredBinary("binaryStored", binaryValStored.getBytes(StandardCharsets.UTF_8));
+    doc.addStoredString("stringStored", binaryValStored);
 
     /** test for field count */
     assertEquals(2, doc.getFields().size());
     
     /** add the doc to a ram index */
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     writer.addDocument(doc);
     
     /** open a reader and fetch the document */ 
     IndexReader reader = writer.getReader();
-    StoredDocument docFromReader = reader.document(0);
+    Document docFromReader = reader.document(0);
     assertTrue(docFromReader != null);
     
     /** fetch the binary stored field and compare its content with the original one */
-    BytesRef bytes = docFromReader.getBinaryValue("binaryStored");
+    BytesRef bytes = docFromReader.getBinary("binaryStored");
     assertNotNull(bytes);
     String binaryFldStoredTest = new String(bytes.bytes, bytes.offset, bytes.length, StandardCharsets.UTF_8);
     assertTrue(binaryFldStoredTest.equals(binaryValStored));
     
     /** fetch the string field and compare its content with the original one */
-    String stringFldStoredTest = docFromReader.get("stringStored");
+    String stringFldStoredTest = docFromReader.getString("stringStored");
     assertTrue(stringFldStoredTest.equals(binaryValStored));
     
     writer.close();
@@ -77,28 +71,26 @@
   }
   
   public void testCompressionTools() throws Exception {
-    StoredField binaryFldCompressed = new StoredField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes(StandardCharsets.UTF_8)));
-    StoredField stringFldCompressed = new StoredField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
-    
-    Document doc = new Document();
-    
-    doc.add(binaryFldCompressed);
-    doc.add(stringFldCompressed);
-    
-    /** add the doc to a ram index */
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+
+    Document doc = writer.newDocument();
+    
+    doc.addStoredBinary("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes(StandardCharsets.UTF_8)));
+    doc.addStoredBinary("stringCompressed", CompressionTools.compressString(binaryValCompressed));
+    
+    /** add the doc to a ram index */
     writer.addDocument(doc);
     
     /** open a reader and fetch the document */ 
     IndexReader reader = writer.getReader();
-    StoredDocument docFromReader = reader.document(0);
+    Document docFromReader = reader.document(0);
     assertTrue(docFromReader != null);
     
     /** fetch the binary compressed field and compare its content with the original one */
-    String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed")), StandardCharsets.UTF_8);
+    String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinary("binaryCompressed")), StandardCharsets.UTF_8);
     assertTrue(binaryFldCompressedTest.equals(binaryValCompressed));
-    assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed));
+    assertTrue(CompressionTools.decompressString(docFromReader.getBinary("stringCompressed")).equals(binaryValCompressed));
 
     writer.close();
     reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDateFields.java b/lucene/core/src/test/org/apache/lucene/document/TestDateFields.java
new file mode 100644
index 0000000..33e1516
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestDateFields.java
@@ -0,0 +1,213 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
+import java.util.TimeZone;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestDateFields extends LuceneTestCase {
+  static SimpleDateFormat parser = new SimpleDateFormat("MM/dd/yyyy", Locale.ROOT);
+  static {
+    parser.setTimeZone(TimeZone.getTimeZone("GMT"));
+  }
+
+  public void testDateSort() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    Date date0 = parser.parse("10/22/2014");
+    doc.addDate("date", date0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    Date date1 = parser.parse("10/21/2015");
+    doc.addDate("date", date1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    w.getFieldTypes().enableSorting("date", true);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("date"));
+    assertEquals(2, hits.totalHits);
+    Document hit = s.doc(hits.scoreDocs[0].doc);
+    assertEquals("1", hit.getString("id"));
+    assertEquals(date1, hit.getDate("date"));
+    hit = s.doc(hits.scoreDocs[1].doc);
+    assertEquals("0", hit.getString("id"));
+    assertEquals(date0, hit.getDate("date"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDateRangeFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    Date date0 = parser.parse("10/22/2014");
+    doc.addDate("date", date0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    Date date1 = parser.parse("10/21/2015");
+    doc.addDate("date", date1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("date", date0, true, date1, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("date", date0, true, date1, false), 1).totalHits);
+    assertEquals(0, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("date", date0, false, date1, false), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("date", parser.parse("10/21/2014"), false, parser.parse("10/23/2014"), false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDateDocValuesRangeFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    Date date0 = parser.parse("10/22/2014");
+    doc.addDate("date", date0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    Date date1 = parser.parse("10/21/2015");
+    doc.addDate("date", date1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newDocValuesRangeFilter("date", date0, true, date1, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newDocValuesRangeFilter("date", date0, true, date1, false), 1).totalHits);
+    assertEquals(0, s.search(new MatchAllDocsQuery(), fieldTypes.newDocValuesRangeFilter("date", date0, false, date1, false), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newDocValuesRangeFilter("date", parser.parse("10/21/2014"), false, parser.parse("10/23/2014"), false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addDate("num", parser.parse("10/25/2014"));
+    doc.addDate("num", parser.parse("10/2/2014"));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addDate("num", parser.parse("10/10/2014"));
+    doc.addDate("num", parser.parse("10/20/2014"));
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+  }
+
+  public void testJustStored() throws Exception {
+    Date date = parser.parse("10/22/2014");
+
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredDate("num", date);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(date, doc.getDate("num"));
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addDate("num", parser.parse("10/22/2014"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredDate("num", parser.parse("10/27/2014")),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredDate("num", parser.parse("10/22/2014"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addDate("num", parser.parse("10/27/2014")),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDateTools.java b/lucene/core/src/test/org/apache/lucene/document/TestDateTools.java
index 779f58e..d43727b 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestDateTools.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestDateTools.java
@@ -27,6 +27,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 public class TestDateTools extends LuceneTestCase {
   @Rule
   public TestRule testRules = 
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java
index e28170c..84f0077 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestDocument.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestDocument.java
@@ -19,368 +19,992 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
 
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.blocktree.Stats;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.index.CheckIndex;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.Version;
 
-
-/**
- * Tests {@link Document} class.
- */
 public class TestDocument extends LuceneTestCase {
-  
-  String binaryVal = "this text will be stored as a byte array in the index";
-  String binaryVal2 = "this text will be also stored as a byte array in the index";
-  
-  public void testBinaryField() throws Exception {
-    Document doc = new Document();
-    
-    FieldType ft = new FieldType();
-    ft.setStored(true);
-    Field stringFld = new Field("string", binaryVal, ft);
-    StoredField binaryFld = new StoredField("binary", binaryVal.getBytes(StandardCharsets.UTF_8));
-    StoredField binaryFld2 = new StoredField("binary", binaryVal2.getBytes(StandardCharsets.UTF_8));
-    
-    doc.add(stringFld);
-    doc.add(binaryFld);
-    
-    assertEquals(2, doc.getFields().size());
-    
-    assertTrue(binaryFld.binaryValue() != null);
-    assertTrue(binaryFld.fieldType().stored());
-    assertEquals(IndexOptions.NONE, binaryFld.fieldType().indexOptions());
-    
-    String binaryTest = doc.getBinaryValue("binary").utf8ToString();
-    assertTrue(binaryTest.equals(binaryVal));
-    
-    String stringTest = doc.get("string");
-    assertTrue(binaryTest.equals(stringTest));
-    
-    doc.add(binaryFld2);
-    
-    assertEquals(3, doc.getFields().size());
-    
-    BytesRef[] binaryTests = doc.getBinaryValues("binary");
-    
-    assertEquals(2, binaryTests.length);
-    
-    binaryTest = binaryTests[0].utf8ToString();
-    String binaryTest2 = binaryTests[1].utf8ToString();
-    
-    assertFalse(binaryTest.equals(binaryTest2));
-    
-    assertTrue(binaryTest.equals(binaryVal));
-    assertTrue(binaryTest2.equals(binaryVal2));
-    
-    doc.removeField("string");
-    assertEquals(2, doc.getFields().size());
-    
-    doc.removeFields("binary");
-    assertEquals(0, doc.getFields().size());
-  }
-  
-  /**
-   * Tests {@link Document#removeField(String)} method for a brand new Document
-   * that has not been indexed yet.
-   * 
-   * @throws Exception on error
-   */
-  public void testRemoveForNewDocument() throws Exception {
-    Document doc = makeDocumentWithFields();
-    assertEquals(10, doc.getFields().size());
-    doc.removeFields("keyword");
-    assertEquals(8, doc.getFields().size());
-    doc.removeFields("doesnotexists"); // removing non-existing fields is
-                                       // siltenlty ignored
-    doc.removeFields("keyword"); // removing a field more than once
-    assertEquals(8, doc.getFields().size());
-    doc.removeField("text");
-    assertEquals(7, doc.getFields().size());
-    doc.removeField("text");
-    assertEquals(6, doc.getFields().size());
-    doc.removeField("text");
-    assertEquals(6, doc.getFields().size());
-    doc.removeField("doesnotexists"); // removing non-existing fields is
-                                      // siltenlty ignored
-    assertEquals(6, doc.getFields().size());
-    doc.removeFields("unindexed");
-    assertEquals(4, doc.getFields().size());
-    doc.removeFields("unstored");
-    assertEquals(2, doc.getFields().size());
-    doc.removeFields("doesnotexists"); // removing non-existing fields is
-                                       // siltenlty ignored
-    assertEquals(2, doc.getFields().size());
-    
-    doc.removeFields("indexed_not_tokenized");
-    assertEquals(0, doc.getFields().size());
+
+  public void setUp() throws Exception {
+    super.setUp();
+    Codec.setDefault(new Lucene50Codec());
   }
 
-  public void testConstructorExceptions() throws Exception {
-    FieldType ft = new FieldType();
-    ft.setStored(true);
-    new Field("name", "value", ft); // okay
-    new StringField("name", "value", Field.Store.NO); // okay
-    try {
-      new Field("name", "value", new FieldType());
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected exception
-    }
-
+  public void testBasic() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    new Field("name", "value", ft); // okay
-    Document doc = new Document();
-    FieldType ft2 = new FieldType();
-    ft2.setStored(true);
-    ft2.setStoreTermVectors(true);
-    doc.add(new Field("name", "value", ft2));
-    try {
-      w.addDocument(doc);
-      fail();
-    } catch (IllegalArgumentException e) {
-      // expected exception
-    }
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    doc.addLargeText("body", "some text");
+    doc.addShortText("title", "a title");
+    doc.addAtom("id", "29jafnn");
+    doc.addStoredBinary("bytes", new BytesRef(new byte[7]));
+    doc.addInt("int", 17);
+    w.addDocument(doc);
     w.close();
     dir.close();
   }
 
-  public void testClearDocument() {
-    Document doc = makeDocumentWithFields();
-    assertEquals(10, doc.getFields().size());
-    doc.clear();
-    assertEquals(0, doc.getFields().size());
-  }
-
-  public void testGetFieldsImmutable() {
-    Document doc = makeDocumentWithFields();
-    assertEquals(10, doc.getFields().size());
-    List<Field> fields = doc.getFields();
-    try {
-      fields.add( new StringField("name", "value", Field.Store.NO) );
-      fail("Document.getFields() should return immutable List");
-    }
-    catch (UnsupportedOperationException e) {
-      // OK
-    }
-
-    try {
-      fields.clear();
-      fail("Document.getFields() should return immutable List");
-    }
-    catch (UnsupportedOperationException e) {
-      // OK
-    }
-  }
-  
-  /**
-   * Tests {@link Document#getValues(String)} method for a brand new Document
-   * that has not been indexed yet.
-   * 
-   * @throws Exception on error
-   */
-  public void testGetValuesForNewDocument() throws Exception {
-    doAssert(makeDocumentWithFields(), false);
-  }
-  
-  /**
-   * Tests {@link Document#getValues(String)} method for a Document retrieved
-   * from an index.
-   * 
-   * @throws Exception on error
-   */
-  public void testGetValuesForIndexedDocument() throws Exception {
+  public void testBinaryAtom() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    writer.addDocument(makeDocumentWithFields());
-    IndexReader reader = writer.getReader();
-    
-    IndexSearcher searcher = newSearcher(reader);
-    
-    // search for something that does exists
-    Query query = new TermQuery(new Term("keyword", "test1"));
-    
-    // ensure that queries return expected results without DateFilter first
-    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals(1, hits.length);
-    
-    doAssert(searcher.doc(hits[0].doc));
-    writer.close();
-    reader.close();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addAtom("binary", new BytesRef(new byte[5]));
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newExactBinaryQuery("binary", new byte[5]), 1).totalHits);
+    r.close();
+    w.close();
     dir.close();
   }
 
-  public void testGetValues() {
-    Document doc = makeDocumentWithFields();
-    assertEquals(new String[] {"test1", "test2"},
-                 doc.getValues("keyword"));
-    assertEquals(new String[] {"test1", "test2"},
-                 doc.getValues("text"));
-    assertEquals(new String[] {"test1", "test2"},
-                 doc.getValues("unindexed"));
-    assertEquals(new String[0],
-                 doc.getValues("nope"));
-  }
-  
-  public void testPositionIncrementMultiFields() throws Exception {
+  public void testBinaryAtomSort() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    writer.addDocument(makeDocumentWithFields());
-    IndexReader reader = writer.getReader();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("id");
+    // Sort reverse by default:
+    fieldTypes.enableSorting("binary", true);
+
+    Document doc = w.newDocument();
+    byte[] value = new byte[5];
+    value[0] = 1;
+    doc.addAtom("id", "0");
+    doc.addAtom("binary", new BytesRef(value));
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "1");
+    doc.addAtom("binary", new BytesRef(new byte[5]));
+    w.addDocument(doc);
     
-    IndexSearcher searcher = newSearcher(reader);
-    PhraseQuery query = new PhraseQuery();
-    query.add(new Term("indexed_not_tokenized", "test1"));
-    query.add(new Term("indexed_not_tokenized", "test2"));
-    
-    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals(1, hits.length);
-    
-    doAssert(searcher.doc(hits[0].doc));
-    writer.close();
-    reader.close();
-    dir.close();    
-  }
-  
-  private Document makeDocumentWithFields() {
-    Document doc = new Document();
-    FieldType stored = new FieldType();
-    stored.setStored(true);
-    FieldType indexedNotTokenized = new FieldType();
-    indexedNotTokenized.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    indexedNotTokenized.setTokenized(false);
-    doc.add(new StringField("keyword", "test1", Field.Store.YES));
-    doc.add(new StringField("keyword", "test2", Field.Store.YES));
-    doc.add(new TextField("text", "test1", Field.Store.YES));
-    doc.add(new TextField("text", "test2", Field.Store.YES));
-    doc.add(new Field("unindexed", "test1", stored));
-    doc.add(new Field("unindexed", "test2", stored));
-    doc.add(new TextField("unstored", "test1", Field.Store.NO));
-    doc.add(new TextField("unstored", "test2", Field.Store.NO));
-    doc.add(new Field("indexed_not_tokenized", "test1", indexedNotTokenized));
-    doc.add(new Field("indexed_not_tokenized", "test2", indexedNotTokenized));
-    return doc;
-  }
-  
-  private void doAssert(StoredDocument doc) {
-    doAssert(new Document(doc), true);
-  }
-  private void doAssert(Document doc, boolean fromIndex) {
-    StorableField[] keywordFieldValues = doc.getFields("keyword");
-    StorableField[] textFieldValues = doc.getFields("text");
-    StorableField[] unindexedFieldValues = doc.getFields("unindexed");
-    StorableField[] unstoredFieldValues = doc.getFields("unstored");
-    
-    assertTrue(keywordFieldValues.length == 2);
-    assertTrue(textFieldValues.length == 2);
-    assertTrue(unindexedFieldValues.length == 2);
-    // this test cannot work for documents retrieved from the index
-    // since unstored fields will obviously not be returned
-    if (!fromIndex) {
-      assertTrue(unstoredFieldValues.length == 2);
-    }
-    
-    assertTrue(keywordFieldValues[0].stringValue().equals("test1"));
-    assertTrue(keywordFieldValues[1].stringValue().equals("test2"));
-    assertTrue(textFieldValues[0].stringValue().equals("test1"));
-    assertTrue(textFieldValues[1].stringValue().equals("test2"));
-    assertTrue(unindexedFieldValues[0].stringValue().equals("test1"));
-    assertTrue(unindexedFieldValues[1].stringValue().equals("test2"));
-    // this test cannot work for documents retrieved from the index
-    // since unstored fields will obviously not be returned
-    if (!fromIndex) {
-      assertTrue(unstoredFieldValues[0].stringValue().equals("test1"));
-      assertTrue(unstoredFieldValues[1].stringValue().equals("test2"));
-    }
-  }
-  
-  public void testFieldSetValue() throws Exception {
-    
-    Field field = new StringField("id", "id1", Field.Store.YES);
-    Document doc = new Document();
-    doc.add(field);
-    doc.add(new StringField("keyword", "test", Field.Store.YES));
-    
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    writer.addDocument(doc);
-    field.setStringValue("id2");
-    writer.addDocument(doc);
-    field.setStringValue("id3");
-    writer.addDocument(doc);
-    
-    IndexReader reader = writer.getReader();
-    IndexSearcher searcher = newSearcher(reader);
-    
-    Query query = new TermQuery(new Term("keyword", "test"));
-    
-    // ensure that queries return expected results without DateFilter first
-    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals(3, hits.length);
-    int result = 0;
-    for (int i = 0; i < 3; i++) {
-      StoredDocument doc2 = searcher.doc(hits[i].doc);
-      Field f = (Field) doc2.getField("id");
-      if (f.stringValue().equals("id1")) result |= 1;
-      else if (f.stringValue().equals("id2")) result |= 2;
-      else if (f.stringValue().equals("id3")) result |= 4;
-      else fail("unexpected id field");
-    }
-    writer.close();
-    reader.close();
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("binary"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
     dir.close();
-    assertEquals("did not see all IDs", 7, result);
   }
-  
-  // LUCENE-3616
-  public void testInvalidFields() {
-    try {
-      Tokenizer tok = new MockTokenizer();
-      tok.setReader(new StringReader(""));
-      new Field("foo", tok, StringField.TYPE_STORED);
-      fail("did not hit expected exc");
-    } catch (IllegalArgumentException iae) {
-      // expected
-    } catch (IOException ioe) {
-      throw new RuntimeException(ioe);
-    }
-  }
-  
-  public void testNumericFieldAsString() throws Exception {
-    Document doc = new Document();
-    doc.add(new IntField("int", 5, Field.Store.YES));
-    assertEquals("5", doc.get("int"));
-    assertNull(doc.get("somethingElse"));
-    doc.add(new IntField("int", 4, Field.Store.YES));
-    assertArrayEquals(new String[] { "5", "4" }, doc.getValues("int"));
-    
+
+  public void testBinaryStored() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    iw.addDocument(doc);
-    DirectoryReader ir = iw.getReader();
-    StoredDocument sdoc = ir.document(0);
-    assertEquals("5", sdoc.get("int"));
-    assertNull(sdoc.get("somethingElse"));
-    assertArrayEquals(new String[] { "5", "4" }, sdoc.getValues("int"));
-    ir.close();
-    iw.close();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    doc.addStoredBinary("binary", new BytesRef(new byte[5]));
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    assertEquals(new BytesRef(new byte[5]), r.document(0).getBinary("binary"));
+    r.close();
+    w.close();
     dir.close();
   }
+
+  public void testSortedSetDocValues() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("sortedset");
+    fieldTypes.setDocValuesType("sortedset", DocValuesType.SORTED_SET);
+
+    Document doc = w.newDocument();
+    doc.addAtom("sortedset", "one");
+    doc.addAtom("sortedset", "two");
+    doc.addAtom("sortedset", "three");
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    SortedSetDocValues ssdv = MultiDocValues.getSortedSetValues(r, "sortedset");
+    ssdv.setDocument(0);
+
+    long ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("one"), ssdv.lookupOrd(ord));
+
+    ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("three"), ssdv.lookupOrd(ord));
+
+    ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("two"), ssdv.lookupOrd(ord));
+
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, ssdv.nextOrd());
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  private TopDocs search(IndexSearcher s, Filter filter, int count) throws IOException {
+    return s.search(new ConstantScoreQuery(filter), count);
+  }
+
+  public void testExcAnalyzerForAtomField() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    Document doc = w.newDocument();
+    doc.addAtom("atom", "foo");
+    shouldFail(() -> fieldTypes.setAnalyzer("atom", new MockAnalyzer(random())),
+               "field \"atom\": type ATOM cannot have an indexAnalyzer");
+    w.close();
+    dir.close();
+  }
+
+  // Can't ask for SORTED dv but then add the field as a number
+  public void testExcInvalidDocValuesTypeFirst() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("string", DocValuesType.SORTED);
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addInt("string", 17),
+               "field \"string\": type INT must use NUMERIC or SORTED_NUMERIC docValuesType; got: SORTED");
+    doc.addAtom("string", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Can't ask for BINARY dv but then add the field as a number
+  public void testExcInvalidBinaryDocValuesTypeFirst() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("binary", DocValuesType.BINARY);
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addInt("binary", 17),
+               "field \"binary\": type INT must use NUMERIC or SORTED_NUMERIC docValuesType; got: BINARY");
+    doc.addAtom("binary", new BytesRef(new byte[7]));
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Cannot store Reader:
+  public void testExcStoreReaderFields() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("body");
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addLargeText("body", new StringReader("a small string")),
+               "field \"body\": can only store String large text fields");
+    doc.addLargeText("body", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Cannot store TokenStream:
+  public void testExcStorePreTokenizedFields() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("body");
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addLargeText("body", new CannedTokenStream()),
+               "field \"body\": can only store String large text fields");
+    doc.addLargeText("body", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testSortable() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    // Normally sorting is not enabled for atom fields:
+    fieldTypes.enableSorting("id", true);
+    fieldTypes.enableStored("id");
+
+    Document doc = w.newDocument();
+
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+    doc = w.newDocument();
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("id"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedString() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    fieldTypes.setMultiValued("strings");
+    fieldTypes.enableSorting("strings");
+    fieldTypes.enableStored("id");
+
+    Document doc = w.newDocument();
+    doc.addAtom("strings", "abc");
+    doc.addAtom("strings", "baz");
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("strings", "aaa");
+    doc.addAtom("strings", "bbb");
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("strings"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.BINARY
+  public void testExcMultiValuedDVBinary() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("binary", DocValuesType.BINARY);
+    shouldFail(() -> fieldTypes.setMultiValued("binary"),
+               "field \"binary\": DocValuesType=BINARY cannot be multi-valued");
+    assertFalse(fieldTypes.getMultiValued("binary"));
+    Document doc = w.newDocument();
+    doc.addStoredBinary("binary", new BytesRef(new byte[7]));
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.SORTED
+  public void testExcMultiValuedDVSorted() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("sorted", DocValuesType.SORTED);
+    shouldFail(() -> fieldTypes.setMultiValued("sorted"),
+               "field \"sorted\": DocValuesType=SORTED cannot be multi-valued");
+    assertFalse(fieldTypes.getMultiValued("sorted"));
+    Document doc = w.newDocument();
+    doc.addStoredBinary("binary", new BytesRef(new byte[7]));
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.NUMERIC
+  public void testExcMultiValuedDVNumeric() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("numeric", DocValuesType.NUMERIC);
+    shouldFail(() -> fieldTypes.setMultiValued("numeric"),
+               "field \"numeric\": DocValuesType=NUMERIC cannot be multi-valued");
+    assertFalse(fieldTypes.getMultiValued("numeric"));
+    Document doc = w.newDocument();
+    doc.addInt("numeric", 17);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testPostingsFormat() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setPostingsFormat("id", "Memory");
+    fieldTypes.enableStored("id");
+    fieldTypes.disableFastRanges("id");
+
+    Document doc = w.newDocument();
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(fieldTypes.newExactStringQuery("id", "0"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
+    hits = s.search(fieldTypes.newExactStringQuery("id", "1"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+
+    Document doc = w.newDocument();
+    doc.addAtom("id", new BytesRef(new byte[1]));
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    TopDocs hits = s.search(fieldTypes.newExactBinaryQuery("id", new byte[1]), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDocValuesFormat() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    fieldTypes.setDocValuesFormat("id", "Memory");
+    fieldTypes.enableStored("id");
+    fieldTypes.enableSorting("id");
+
+    Document doc = w.newDocument();
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(fieldTypes.newExactStringQuery("id", "0"), 1, fieldTypes.newSort("id"));
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
+    hits = s.search(fieldTypes.newExactStringQuery("id", "1"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermsDictTermsPerBlock() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("id", IndexOptions.DOCS);
+
+    fieldTypes.setTermsDictBlockSize("id", 10);
+    for(int i=0;i<10;i++) {
+      Document doc = w.newDocument();
+      doc.addAtom("id", "0" + i);
+      w.addDocument(doc);
+    }
+    for(int i=0;i<10;i++) {
+      Document doc = w.newDocument();
+      doc.addAtom("id", "1" + i);
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    w.close();
+
+    // Use CheckIndex to verify we got 2 terms blocks:
+    CheckIndex.Status checked = TestUtil.checkIndex(dir);
+    assertEquals(1, checked.segmentInfos.size());
+    CheckIndex.Status.SegmentInfoStatus segment = checked.segmentInfos.get(0);
+    assertNotNull(segment.termIndexStatus.blockTreeStats);
+    Stats btStats = (Stats) segment.termIndexStatus.blockTreeStats.get("id");
+    assertNotNull(btStats);
+    assertEquals(2, btStats.termsOnlyBlockCount);
+    assertEquals(1, btStats.subBlocksOnlyBlockCount);
+    assertEquals(3, btStats.totalBlockCount);
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidDocValuesFormat() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    shouldFail(() -> fieldTypes.setDocValuesFormat("id", "foobar"),
+               "field \"id\": An SPI class of type org.apache.lucene.codecs.DocValuesFormat with name 'foobar' does not exist");
+    fieldTypes.setDocValuesFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidDocValuesType() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("id", DocValuesType.BINARY);
+    Document doc = w.newDocument();
+    shouldFail(() -> doc.addInt("id", 17),
+               "field \"id\": type INT must use NUMERIC or SORTED_NUMERIC docValuesType; got: BINARY");
+    fieldTypes.setPostingsFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidPostingsFormat() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    shouldFail(() -> fieldTypes.setPostingsFormat("id", "foobar"),
+               "field \"id\": An SPI class of type org.apache.lucene.codecs.PostingsFormat with name 'foobar' does not exist");
+    fieldTypes.setPostingsFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  public void testHighlightOffsets() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("no_highlight");
+
+    Document doc = w.newDocument();
+    doc.addLargeText("highlight", "here is some content");
+    doc.addLargeText("no_highlight", "here is some content");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    assertTrue(MultiFields.getTerms(r, "highlight").hasOffsets());
+    assertFalse(MultiFields.getTerms(r, "no_highlight").hasOffsets());
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testAnalyzerPositionGap() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("nogap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.setMultiValued("nogap");
+    fieldTypes.disableHighlighting("nogap");
+    fieldTypes.setAnalyzerPositionGap("nogap", 0);
+
+    fieldTypes.setIndexOptions("onegap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.setMultiValued("onegap");
+    fieldTypes.disableHighlighting("onegap");
+    fieldTypes.setAnalyzerPositionGap("onegap", 1);
+
+    Document doc = w.newDocument();
+    doc.addLargeText("nogap", "word1");
+    doc.addLargeText("nogap", "word2");
+    doc.addLargeText("onegap", "word1");
+    doc.addLargeText("onegap", "word2");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);   
+    
+    PhraseQuery q = new PhraseQuery();
+    q.add(new Term("nogap", "word1"));
+    q.add(new Term("nogap", "word2"));
+    assertEquals(1, s.search(q, 1).totalHits);
+
+    q = new PhraseQuery();
+    q.add(new Term("onegap", "word1"));
+    q.add(new Term("onegap", "word2"));
+    assertEquals(0, s.search(q, 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcFieldTypesAreSaved() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addAtom("id", new BytesRef(new byte[5]));
+    w.addDocument(doc);
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("id", 7),
+               "field \"id\": cannot change from value type ATOM to INT");
+    doc2.addAtom("id", new BytesRef(new byte[7]));
+    w.addDocument(doc2);
+    w.close();
+    dir.close();
+  }
+
+  public void testDisableIndexing() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("foo", IndexOptions.NONE);
+
+    Document doc = w.newDocument();
+    doc.addAtom("foo", "bar");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    shouldFail(() -> fieldTypes.newExactStringQuery("foo", "bar"),
+               "field \"foo\": cannot create term query: this field was not indexed");
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcDisableDocValues() throws Exception {
+
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("foo", DocValuesType.NONE);
+
+    Document doc = w.newDocument();
+    doc.addInt("foo", 17);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    shouldFail(() -> fieldTypes.newSort("foo"),
+               "field \"foo\": this field was not indexed for sorting");
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcRangeQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableFastRanges("int");
+    Document doc = w.newDocument();
+    doc.addInt("int", 17);
+    w.addDocument(doc);
+    shouldFail(() -> fieldTypes.newIntRangeFilter("int", 0, true, 7, true),
+               "field \"int\": cannot create range filter: this field was not indexed for fast ranges");
+    w.close();
+    dir.close();
+  }
+
+  public void testIndexCreatedVersion() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    assertEquals(Version.LATEST, w.getFieldTypes().getIndexCreatedVersion());
+    w.close();
+    dir.close();
+  }
+
+  public void testBooleanType() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addBoolean("onsale", true);
+    w.addDocument(doc);
+
+    //w.close();
+    //DirectoryReader r = DirectoryReader.open(dir);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(fieldTypes.newExactBooleanQuery("onsale", true), 1);
+    assertEquals(1, hits.totalHits);
+    doc = s.doc(hits.scoreDocs[0].doc);
+    assertEquals(true, doc.getBoolean("onsale"));
+    assertEquals(0, s.search(fieldTypes.newExactBooleanQuery("onsale", false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testOnlyChangeFieldTypes() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.commit();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableSorting("sorted");
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig());
+    fieldTypes = w.getFieldTypes();
+    assertTrue(fieldTypes.getSorted("sorted"));
+    w.close();
+    dir.close();
+  }
+
+  public void testMinMaxTokenLength() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    w.getFieldTypes().setMinMaxTokenLength("field", 2, 7);
+    w.commit();
+
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "hello a toobigterm");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+    assertEquals(2, fieldTypes.getMinTokenLength("field").intValue());
+    assertEquals(7, fieldTypes.getMaxTokenLength("field").intValue());
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "hello"), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactStringQuery("field", "a"), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactStringQuery("field", "toobigterm"),1 ).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMaxTokenCount() throws Exception {
+    Directory dir = newDirectory();
+    MockAnalyzer a = new MockAnalyzer(random());
+    // MockAnalyzer is angry that we don't consume all tokens:
+    a.setEnableChecks(false);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMaxTokenCount("field", 3);
+    w.commit();
+
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "hello a toobigterm goodbye");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    assertEquals(3, fieldTypes.getMaxTokenCount("field").intValue());
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "hello"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "a"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "toobigterm"), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactStringQuery("field", "goodbye"), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMaxTokenCountConsumeAll() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMaxTokenCount("field", 3, true);
+
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "hello a toobigterm goodbye");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    assertEquals(3, fieldTypes.getMaxTokenCount("field").intValue());
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "hello"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "a"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "toobigterm"), 1).totalHits);
+    assertEquals(0, s.search(fieldTypes.newExactStringQuery("field", "goodbye"), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcSuddenlyEnableDocValues() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("field", DocValuesType.NONE);
+
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+
+    shouldFail(() -> fieldTypes.setDocValuesType("field", DocValuesType.NUMERIC),
+               "field \"field\": cannot change docValuesType from NONE to NUMERIC");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcCannotStoreTokenStream() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableNorms("field");
+    fieldTypes.enableStored("field");
+
+    Document doc = w.newDocument();
+    shouldFail(() ->
+      doc.addLargeText("field", new TokenStream() {
+          @Override
+          public boolean incrementToken() {
+            return false;
+          }
+        }),
+               "field \"field\": can only store String large text fields");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addStoredString("field", "bar");
+    shouldFail(() -> doc.addLargeText("field", "bar"),
+               "field \"field\": this field is already disabled for indexing");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "bar");
+    shouldFail(() -> doc.addStoredString("field", "bar"),
+               "field \"field\": this field is already indexed with indexOptions=DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testStoredAfterLargeText() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "ABC");
+    shouldFail(() -> doc.addStoredString("field", "foo"),
+               "field \"field\": this field is already indexed with indexOptions=DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS");
+    w.close();
+    dir.close();
+  }
+
+  public void testSortKey() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+
+    Document doc = w.newDocument();
+    doc.addAtom("sev", "cosmetic");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("sev", "major");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("sev", "critical");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("sev", "minor");
+    w.addDocument(doc);
+
+    // missing
+    doc = w.newDocument();
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 5, fieldTypes.newSort("sev"));
+    assertEquals(5, hits.totalHits);
+    assertEquals("cosmetic", s.doc(hits.scoreDocs[0].doc).getString("sev"));
+    assertEquals("critical", s.doc(hits.scoreDocs[1].doc).getString("sev"));
+    assertEquals("major", s.doc(hits.scoreDocs[2].doc).getString("sev"));
+    assertEquals("minor", s.doc(hits.scoreDocs[3].doc).getString("sev"));
+    assertNull(s.doc(hits.scoreDocs[4].doc).getInt("sev"));
+
+    final Map<BytesRef,Integer> sortMap = new HashMap<>();
+    sortMap.put(new BytesRef("critical"), 0);
+    sortMap.put(new BytesRef("major"), 1);
+    sortMap.put(new BytesRef("minor"), 2);
+    sortMap.put(new BytesRef("cosmetic"), 3);
+    fieldTypes.setSortKey("sev", v -> sortMap.get(v));
+
+    hits = s.search(new MatchAllDocsQuery(), 5, fieldTypes.newSort("sev"));
+    assertEquals(5, hits.totalHits);
+    assertEquals("critical", s.doc(hits.scoreDocs[0].doc).getString("sev"));
+    assertEquals("major", s.doc(hits.scoreDocs[1].doc).getString("sev"));
+    assertEquals("minor", s.doc(hits.scoreDocs[2].doc).getString("sev"));
+    assertEquals("cosmetic", s.doc(hits.scoreDocs[3].doc).getString("sev"));
+    assertNull(s.doc(hits.scoreDocs[4].doc).getInt("sev"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcMixedBinaryStringAtom() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    Document doc = w.newDocument();
+    doc.addAtom("field", "bar");
+
+    Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addAtom("field", new BytesRef("bar")),
+               "field \"field\": cannot change from string to binary ATOM");
+    w.close();
+  }
+
+  public void testPerFieldAnalyzers() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexAnalyzer("foo", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true));
+    fieldTypes.setIndexAnalyzer("bar", new MockAnalyzer(random(), MockTokenizer.KEYWORD, false));
+
+    Document doc = w.newDocument();
+    doc.addShortText("foo", "DOO dah");
+    doc.addShortText("bar", "doo DAH");
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w);
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("foo", "doo"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("bar", "doo DAH"), 1).totalHits);
+    r.close();
+    w.close();
+  }
+
+  public void testPreAnalyzed() throws Exception {
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addLargeText("body", new CannedTokenStream(new Token[] {new Token("foo", 0, 3),
+                                                                new Token("BAR", 4, 6)}));
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = r.getFieldTypes();
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("body", "BAR"), 1).totalHits);
+    r.close();
+    w.close();
+  }
+
+  public void testBasicRoundTrip() throws Exception {
+    IndexWriter w = newIndexWriter();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setBigIntByteWidth("bigInt", 17);
+    fieldTypes.setBigDecimalByteWidthAndScale("bigDec", 17, 4);
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addAtom("binaryAtom", new BytesRef(new byte[7]));
+    doc.addAtom("stringAtom", "foo");
+    doc.addInt("int", 17);
+    doc.addLong("long", 17017L);
+    doc.addShortText("title", "this is a title");
+    doc.addDouble("double", 7.0);
+    doc.addHalfFloat("halfFloat", 7.0f);
+    doc.addFloat("float", 7.0f);
+    doc.addInetAddress("inet", InetAddress.getByName("10.17.4.11"));
+    BigInteger bigInt = BigInteger.valueOf(1708);
+    doc.addBigInteger("bigInt", bigInt);
+    BigDecimal bigDec = new BigDecimal(BigInteger.valueOf(1708), 4);
+    doc.addBigDecimal("bigDec", bigDec);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    Document doc2 = w.newDocument();
+    // nocommit can we relax IW's check so that this reader's FieldTypes is accepted?
+    doc2.addAll(doc);
+    w.updateDocument(fieldTypes.newIntTerm("id", 0),
+                     doc2);
+
+    r.close();
+    r = DirectoryReader.open(w, true);
+    assertEquals(1, r.numDocs());
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(fieldTypes.newExactIntQuery("id", 0), 1);
+    assertEquals(1, hits.totalHits);
+    Document doc3 = r.document(hits.scoreDocs[0].doc);
+    assertEquals(bigDec, doc3.getBigDecimal("bigDec"));
+    assertEquals(bigInt, doc3.getBigInteger("bigInt"));
+    assertEquals(17, doc3.getInt("int").intValue());
+    assertEquals(17017L, doc3.getLong("long").longValue());
+    assertEquals(new BytesRef(new byte[7]), doc3.getBinary("binaryAtom"));
+    assertEquals("foo", doc3.getString("stringAtom"));
+    r.close();
+    w.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestDoubleFields.java b/lucene/core/src/test/org/apache/lucene/document/TestDoubleFields.java
new file mode 100644
index 0000000..b258035
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestDoubleFields.java
@@ -0,0 +1,436 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestDoubleFields extends LuceneTestCase {
+
+  static double IOTA = 0.0;
+
+  public void testBasicRange() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addDouble("num", 3d);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 2d);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 7d);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleRangeFilter("num", 0d, true, 3d, true), 1).totalHits);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleDocValuesRangeFilter("num", 0d, true, 3d, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleRangeFilter("num", 0d, true, 10d, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleDocValuesRangeFilter("num", 0d, true, 10d, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleRangeFilter("num", 1d, true, 2.5d, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleDocValuesRangeFilter("num", 1d, true, 2.5d, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandom() throws Exception {
+    int iters = atLeast(10000);
+    for(int iter=0;iter<iters;iter++) {
+      double v = random().nextDouble();
+      long x = NumericUtils.doubleToLong(v);
+      double v2 = NumericUtils.longToDouble(x);
+      assertEquals(v, v2, IOTA);
+    }
+  }
+
+  public void testNaN() throws Exception {
+    assertEquals(Double.NaN, NumericUtils.longToDouble(NumericUtils.doubleToLong(Double.NaN)), 0.0d);
+  }
+
+  public void testPositiveInfinity() throws Exception {
+    assertEquals(Double.POSITIVE_INFINITY, NumericUtils.longToDouble(NumericUtils.doubleToLong(Double.POSITIVE_INFINITY)), 0.0d);
+  }
+
+  public void testNegativeInfinity() throws Exception {
+    assertEquals(Double.NEGATIVE_INFINITY, NumericUtils.longToDouble(NumericUtils.doubleToLong(Double.NEGATIVE_INFINITY)), 0.0d);
+  }
+
+  public void testBasicSort() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addDouble("num", 3d);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 2d);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 7d);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addDouble("num", 3d);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 7d);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingFirst("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addDouble("num", 3d);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addDouble("num", 7d);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingLast("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("two", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    List<Double> values = new ArrayList<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      Double num = random().nextDouble();
+      values.add(num);
+      doc.addDouble("num", num);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " num=" + num);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      double x = random().nextDouble();
+      double y = random().nextDouble();
+
+      double min, max;
+      if (x < y) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        float value = values.get(i).floatValue();
+        if (value >= min && value <= max) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter;
+      if (random().nextBoolean()) {
+        filter = fieldTypes.newDoubleRangeFilter("num", min, true, max, true);
+      } else {
+        filter = fieldTypes.newDoubleDocValuesRangeFilter("num", min, true, max, true);
+      }
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      Double last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        Double v = doc.getDouble("num");
+        if (isClose(v, (Double) ((FieldDoc) hit).fields[0]) == false) {
+          System.out.println("  wrong: " + v + " vs " + ((FieldDoc) hit).fields[0]);
+          wrongValues = true;
+        }
+        if (VERBOSE) {
+          System.out.println("   hit doc=" + doc);
+        }
+        if (last != null) {
+          int cmp;
+          if (isClose(last, v)) {
+            cmp = 0;
+          } else {
+            cmp = last.compareTo(v);
+          }
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  static boolean isClose(double v1, double v2) {
+    return Math.abs(v1 - v2) <= IOTA;
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addDouble("num", 45d);
+    doc.addDouble("num", -22d);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addDouble("num", -2d);
+    doc.addDouble("num", 14d);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addDouble("num", 45d);
+    doc.addDouble("num", -22d);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addDouble("num", -2d);
+    doc.addDouble("num", 14d);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleRangeFilter("num", -100d, true, 100d, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newDoubleRangeFilter("num", 40d, true, 45d, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addDouble("num", 180d);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, s.search(fieldTypes.newExactDoubleQuery("num", 180d), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredDouble("num", 180d);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(180d, doc.getDouble("num"), 0.0d);
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addDouble("num", 100.);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredDouble("num", 200.),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredDouble("num", 100d);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addDouble("num", 200d),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestField.java b/lucene/core/src/test/org/apache/lucene/document/TestField.java
deleted file mode 100644
index 62be66a..0000000
--- a/lucene/core/src/test/org/apache/lucene/document/TestField.java
+++ /dev/null
@@ -1,520 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.StringReader;
-import java.nio.charset.StandardCharsets;
-
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-
-// sanity check some basics of fields
-public class TestField extends LuceneTestCase {
-  
-  public void testDoubleField() throws Exception {
-    Field fields[] = new Field[] {
-        new DoubleField("foo", 5d, Field.Store.NO),
-        new DoubleField("foo", 5d, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      field.setDoubleValue(6d); // ok
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-    
-      assertEquals(6d, field.numericValue().doubleValue(), 0.0d);
-    }
-  }
-  
-  public void testDoubleDocValuesField() throws Exception {
-    DoubleDocValuesField field = new DoubleDocValuesField("foo", 5d);
-
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    field.setDoubleValue(6d); // ok
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(6d, Double.longBitsToDouble(field.numericValue().longValue()), 0.0d);
-  }
-  
-  public void testFloatDocValuesField() throws Exception {
-    FloatDocValuesField field = new FloatDocValuesField("foo", 5f);
-
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    field.setFloatValue(6f); // ok
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(6f, Float.intBitsToFloat(field.numericValue().intValue()), 0.0f);
-  }
-  
-  public void testFloatField() throws Exception {
-    Field fields[] = new Field[] {
-        new FloatField("foo", 5f, Field.Store.NO),
-        new FloatField("foo", 5f, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      field.setFloatValue(6f); // ok
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6f, field.numericValue().floatValue(), 0.0f);
-    }
-  }
-  
-  public void testIntField() throws Exception {
-    Field fields[] = new Field[] {
-        new IntField("foo", 5, Field.Store.NO),
-        new IntField("foo", 5, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      field.setIntValue(6); // ok
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6, field.numericValue().intValue());
-    }
-  }
-  
-  public void testNumericDocValuesField() throws Exception {
-    NumericDocValuesField field = new NumericDocValuesField("foo", 5L);
-
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    field.setLongValue(6); // ok
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(6L, field.numericValue().longValue());
-  }
-  
-  public void testLongField() throws Exception {
-    Field fields[] = new Field[] {
-        new LongField("foo", 5L, Field.Store.NO),
-        new LongField("foo", 5L, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      field.setLongValue(6); // ok
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6L, field.numericValue().longValue());
-    }
-  }
-  
-  public void testSortedBytesDocValuesField() throws Exception {
-    SortedDocValuesField field = new SortedDocValuesField("foo", new BytesRef("bar"));
-
-    trySetBoost(field);
-    trySetByteValue(field);
-    field.setBytesValue("fubar".getBytes(StandardCharsets.UTF_8));
-    field.setBytesValue(new BytesRef("baz"));
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(new BytesRef("baz"), field.binaryValue());
-  }
-  
-  public void testBinaryDocValuesField() throws Exception {
-    BinaryDocValuesField field = new BinaryDocValuesField("foo", new BytesRef("bar"));
-
-    trySetBoost(field);
-    trySetByteValue(field);
-    field.setBytesValue("fubar".getBytes(StandardCharsets.UTF_8));
-    field.setBytesValue(new BytesRef("baz"));
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(new BytesRef("baz"), field.binaryValue());
-  }
-  
-  public void testStringField() throws Exception {
-    Field fields[] = new Field[] {
-        new StringField("foo", "bar", Field.Store.NO),
-        new StringField("foo", "bar", Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      field.setStringValue("baz");
-      trySetTokenStreamValue(field);
-      
-      assertEquals("baz", field.stringValue());
-    }
-  }
-  
-  public void testTextFieldString() throws Exception {
-    Field fields[] = new Field[] {
-        new TextField("foo", "bar", Field.Store.NO),
-        new TextField("foo", "bar", Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      field.setBoost(5f);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      field.setStringValue("baz");
-      field.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
-      
-      assertEquals("baz", field.stringValue());
-      assertEquals(5f, field.boost(), 0f);
-    }
-  }
-  
-  public void testTextFieldReader() throws Exception {
-    Field field = new TextField("foo", new StringReader("bar"));
-
-    field.setBoost(5f);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    field.setReaderValue(new StringReader("foobar"));
-    trySetShortValue(field);
-    trySetStringValue(field);
-    field.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
-      
-    assertNotNull(field.readerValue());
-    assertEquals(5f, field.boost(), 0f);
-  }
-  
-  /* TODO: this is pretty expert and crazy
-   * see if we can fix it up later
-  public void testTextFieldTokenStream() throws Exception {
-  }
-  */
-  
-  public void testStoredFieldBytes() throws Exception {
-    Field fields[] = new Field[] {
-        new StoredField("foo", "bar".getBytes(StandardCharsets.UTF_8)),
-        new StoredField("foo", "bar".getBytes(StandardCharsets.UTF_8), 0, 3),
-        new StoredField("foo", new BytesRef("bar")),
-    };
-    
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      field.setBytesValue("baz".getBytes(StandardCharsets.UTF_8));
-      field.setBytesValue(new BytesRef("baz"));
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(new BytesRef("baz"), field.binaryValue());
-    }
-  }
-  
-  public void testStoredFieldString() throws Exception {
-    Field field = new StoredField("foo", "bar");
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    field.setStringValue("baz");
-    trySetTokenStreamValue(field);
-    
-    assertEquals("baz", field.stringValue());
-  }
-  
-  public void testStoredFieldInt() throws Exception {
-    Field field = new StoredField("foo", 1);
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    field.setIntValue(5);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(5, field.numericValue().intValue());
-  }
-  
-  public void testStoredFieldDouble() throws Exception {
-    Field field = new StoredField("foo", 1D);
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    field.setDoubleValue(5D);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(5D, field.numericValue().doubleValue(), 0.0D);
-  }
-  
-  public void testStoredFieldFloat() throws Exception {
-    Field field = new StoredField("foo", 1F);
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    field.setFloatValue(5f);
-    trySetLongValue(field);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(5f, field.numericValue().floatValue(), 0.0f);
-  }
-  
-  public void testStoredFieldLong() throws Exception {
-    Field field = new StoredField("foo", 1L);
-    trySetBoost(field);
-    trySetByteValue(field);
-    trySetBytesValue(field);
-    trySetBytesRefValue(field);
-    trySetDoubleValue(field);
-    trySetIntValue(field);
-    trySetFloatValue(field);
-    field.setLongValue(5);
-    trySetReaderValue(field);
-    trySetShortValue(field);
-    trySetStringValue(field);
-    trySetTokenStreamValue(field);
-    
-    assertEquals(5L, field.numericValue().longValue());
-  }
-  
-  private void trySetByteValue(Field f) {
-    try {
-      f.setByteValue((byte) 10);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-
-  private void trySetBytesValue(Field f) {
-    try {
-      f.setBytesValue(new byte[] { 5, 5 });
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetBytesRefValue(Field f) {
-    try {
-      f.setBytesValue(new BytesRef("bogus"));
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetDoubleValue(Field f) {
-    try {
-      f.setDoubleValue(Double.MAX_VALUE);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetIntValue(Field f) {
-    try {
-      f.setIntValue(Integer.MAX_VALUE);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetLongValue(Field f) {
-    try {
-      f.setLongValue(Long.MAX_VALUE);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetFloatValue(Field f) {
-    try {
-      f.setFloatValue(Float.MAX_VALUE);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetReaderValue(Field f) {
-    try {
-      f.setReaderValue(new StringReader("BOO!"));
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetShortValue(Field f) {
-    try {
-      f.setShortValue(Short.MAX_VALUE);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetStringValue(Field f) {
-    try {
-      f.setStringValue("BOO!");
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetTokenStreamValue(Field f) {
-    try {
-      f.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-  private void trySetBoost(Field f) {
-    try {
-      f.setBoost(5.0f);
-      fail();
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-  }
-  
-}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldExists.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldExists.java
new file mode 100644
index 0000000..95755d2
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFieldExists.java
@@ -0,0 +1,98 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestFieldExists extends LuceneTestCase {
+  public void testFieldExistsFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addAtom("field1", "field");
+    doc.addAtom("field2", "field");
+    doc.addAtom("field3", "field");
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("field1", "field");
+    doc.addAtom("field3", "field");
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("field2", "field");
+    doc.addAtom("field3", "field");
+    doc.addAtom("id", "2");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(),
+                            fieldTypes.newFieldExistsFilter("field1"), 2);
+    assertEquals(2, hits.totalHits);
+    Set<String> ids = getIDs(r, hits);
+    assertTrue(ids.contains("0"));
+    assertTrue(ids.contains("1"));
+
+    hits = s.search(new MatchAllDocsQuery(),
+                    fieldTypes.newFieldExistsFilter("field2"), 2);
+    assertEquals(2, hits.totalHits);
+    ids = getIDs(r, hits);
+    assertTrue(ids.contains("0"));
+    assertTrue(ids.contains("2"));
+
+    hits = s.search(new MatchAllDocsQuery(),
+                    fieldTypes.newFieldExistsFilter("field3"), 3);
+    assertEquals(3, hits.totalHits);
+    ids = getIDs(r, hits);
+    assertTrue(ids.contains("0"));
+    assertTrue(ids.contains("1"));
+    assertTrue(ids.contains("2"));
+
+    assertEquals(0, s.search(new MatchAllDocsQuery(),
+                             fieldTypes.newFieldExistsFilter("field4"), 1).totalHits);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  private Set<String> getIDs(IndexReader r, TopDocs hits) throws IOException {
+    Set<String> ids = new HashSet<>();
+    for(ScoreDoc scoreDoc : hits.scoreDocs) {
+      ids.add(r.document(scoreDoc.doc).getString("id"));
+    }
+    return ids;
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
deleted file mode 100644
index b26a10d..0000000
--- a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package org.apache.lucene.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.util.LuceneTestCase;
-
-/** simple testcases for concrete impl of IndexableFieldType */
-public class TestFieldType extends LuceneTestCase {
-  
-  public void testEquals() throws Exception {
-    FieldType ft = new FieldType();
-    assertEquals(ft, ft);
-    assertFalse(ft.equals(null));
-    
-    FieldType ft2 = new FieldType();
-    assertEquals(ft, ft2);
-    assertEquals(ft.hashCode(), ft2.hashCode());
-    
-    FieldType ft3 = new FieldType();
-    ft3.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    assertFalse(ft3.equals(ft));
-    
-    FieldType ft4 = new FieldType();
-    ft4.setDocValuesType(DocValuesType.BINARY);
-    assertFalse(ft4.equals(ft));
-    
-    FieldType ft5 = new FieldType();
-    ft5.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    assertFalse(ft5.equals(ft));
-    
-    FieldType ft6 = new FieldType();
-    ft6.setStored(true);
-    assertFalse(ft6.equals(ft));
-    
-    FieldType ft7 = new FieldType();
-    ft7.setOmitNorms(true);
-    assertFalse(ft7.equals(ft));
-    
-    FieldType ft8 = new FieldType();
-    ft8.setNumericType(NumericType.DOUBLE);
-    assertFalse(ft8.equals(ft));
-    
-    FieldType ft9 = new FieldType();
-    ft9.setNumericPrecisionStep(3);
-    assertFalse(ft9.equals(ft));
-    
-    FieldType ft10 = new FieldType();
-    ft10.setStoreTermVectors(true);
-    assertFalse(ft10.equals(ft));
-
-  }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldTypes.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldTypes.java
new file mode 100644
index 0000000..fe10ab0
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFieldTypes.java
@@ -0,0 +1,124 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.SlowCodecReaderWrapper;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestFieldTypes extends LuceneTestCase {
+
+  public void testExcAddAll() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = newDirectory();
+    w = newIndexWriter(dir2);
+    doc = w.newDocument();
+    doc.addAtom("field", "foo");
+    doc.addAtom("a", "foo");
+    doc.addAtom("z", "foo");
+    w.addDocument(doc);
+    w.close();
+
+    FieldTypes ft1 = FieldTypes.getFieldTypes(dir, null);
+    FieldTypes ft2 = FieldTypes.getFieldTypes(dir2, null);
+    try {
+      ft1.addAll(ft2);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      // expected
+      assertEquals("field \"field\": cannot change value type from INT to ATOM",
+                   ise.getMessage());
+    }
+
+    ft1.getFieldType("field");
+    try {
+      ft1.getFieldType("a");
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    try {
+      ft1.getFieldType("z");
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    dir2.close();
+  }
+
+  public void testInconsistentMultiReaderFieldTypes() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addInt("field", 5);
+    w.addDocument(doc);
+    w.close();
+    IndexReader sub1 = DirectoryReader.open(dir);
+
+    w = newIndexWriter(newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+    doc = w.newDocument();
+    doc.addShortText("field", "hello");
+    w.addDocument(doc);
+    w.close();
+    IndexReader sub2 = DirectoryReader.open(dir);
+
+    try {
+      new MultiReader(sub1, sub2);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      assertEquals("field \"field\": cannot change value type from INT to SHORT_TEXT", ise.getMessage());
+    }
+
+    sub1.close();
+    sub2.close();
+  }
+
+  public void testInconsistentAddIndexesFieldTypes() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addInt("field", 5);
+    w.addDocument(doc);
+    w.close();
+    DirectoryReader sub = DirectoryReader.open(dir);
+
+    w = newIndexWriter(newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+    doc = w.newDocument();
+    doc.addShortText("field", "hello");
+    w.addDocument(doc);
+
+    try {
+      TestUtil.addIndexesSlowly(w, sub);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      assertEquals("field \"field\": cannot change value type from SHORT_TEXT to INT", ise.getMessage());
+    }
+
+    w.close();
+    sub.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFloatFields.java b/lucene/core/src/test/org/apache/lucene/document/TestFloatFields.java
new file mode 100644
index 0000000..41e9b0d
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFloatFields.java
@@ -0,0 +1,425 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestFloatFields extends LuceneTestCase {
+
+  public void testBasicRange() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 2f);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatRangeFilter("num", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatDocValuesRangeFilter("num", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatRangeFilter("num", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatDocValuesRangeFilter("num", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatRangeFilter("num", 1f, true,2.5f, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatDocValuesRangeFilter("num", 1f, true,2.5f, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandom() throws Exception {
+    int iters = atLeast(10000);
+    for(int iter=0;iter<iters;iter++) {
+      float v = random().nextFloat();
+      int x = NumericUtils.floatToInt(v);
+      float v2 = NumericUtils.intToFloat(x);
+      assertEquals(v, v2, 0.0f);
+    }
+  }
+
+  public void testNaN() throws Exception {
+    assertEquals(Float.NaN, NumericUtils.intToFloat(NumericUtils.floatToInt(Float.NaN)), 0.0f);
+  }
+
+  public void testPositiveInfinity() throws Exception {
+    assertEquals(Float.POSITIVE_INFINITY, NumericUtils.intToFloat(NumericUtils.floatToInt(Float.POSITIVE_INFINITY)), 0.0f);
+  }
+
+  public void testNegativeInfinity() throws Exception {
+    assertEquals(Float.NEGATIVE_INFINITY, NumericUtils.intToFloat(NumericUtils.floatToInt(Float.NEGATIVE_INFINITY)), 0.0f);
+  }
+
+  public void testBasicSort() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 2f);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingFirst("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingLast("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("two", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    List<Float> values = new ArrayList<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      Float num = random().nextFloat();
+      values.add(num);
+      doc.addFloat("num", num);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " num=" + num);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      float x = random().nextFloat();
+      float y = random().nextFloat();
+
+      float min, max;
+      if (x < y) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        float value = values.get(i).floatValue();
+        if (value >= min && value <= max) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter;
+      if (random().nextBoolean()) {
+        filter = fieldTypes.newFloatRangeFilter("num", min, true, max, true);
+      } else {
+        filter = fieldTypes.newFloatDocValuesRangeFilter("num", min, true, max, true);
+      }
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      Float last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        Float v = doc.getFloat("num");
+        if (v.equals(((FieldDoc) hit).fields[0]) == false) {
+          System.out.println("  wrong: " + v + " vs " + ((FieldDoc) hit).fields[0]);
+          wrongValues = true;
+        }
+        if (VERBOSE) {
+          System.out.println("   hit doc=" + doc);
+        }
+        if (last != null) {
+          int cmp = last.compareTo(v);
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addFloat("num", 45F);
+    doc.addFloat("num", -22F);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addFloat("num", -2F);
+    doc.addFloat("num", 14F);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addFloat("num", 45F);
+    doc.addFloat("num", -22F);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addFloat("num", -2F);
+    doc.addFloat("num", 14F);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatRangeFilter("num", -100F, true, 100F, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newFloatRangeFilter("num", 40F, true, 45F, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addFloat("num", 180f);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, s.search(fieldTypes.newExactFloatQuery("num", 180f), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredFloat("num", 180f);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(180f, doc.getFloat("num"), 0.0f);
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addFloat("num", 100f);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredFloat("num", 200f),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredFloat("num", 100f);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addFloat("num", 200f),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestHalfFloatFields.java b/lucene/core/src/test/org/apache/lucene/document/TestHalfFloatFields.java
new file mode 100644
index 0000000..af90148
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestHalfFloatFields.java
@@ -0,0 +1,418 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestHalfFloatFields extends LuceneTestCase {
+
+  // We can use constant IOTA because all our random floats are 0.0 - 1.0:
+  static float IOTA = .0005F;
+
+  public void testBasicRange() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addHalfFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 2f);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatRangeFilter("num", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatDocValuesRangeFilter("num", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatRangeFilter("num", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatDocValuesRangeFilter("num", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatRangeFilter("num", 1f, true,2.5f, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatDocValuesRangeFilter("num", 1f, true,2.5f, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandom() throws Exception {
+    int iters = atLeast(10000);
+    for(int iter=0;iter<iters;iter++) {
+      float v = random().nextFloat();
+      short x = NumericUtils.halfFloatToShort(v);
+      float v2 = NumericUtils.shortToHalfFloat(x);
+      assertEquals(v, v2, IOTA);
+    }
+  }
+
+  public void testNaN() throws Exception {
+    assertEquals(Float.NaN, NumericUtils.shortToHalfFloat(NumericUtils.halfFloatToShort(Float.NaN)), 0.0f);
+  }
+
+  public void testPositiveInfinity() throws Exception {
+    assertEquals(Float.POSITIVE_INFINITY, NumericUtils.shortToHalfFloat(NumericUtils.halfFloatToShort(Float.POSITIVE_INFINITY)), 0.0f);
+  }
+
+  public void testNegativeInfinity() throws Exception {
+    assertEquals(Float.NEGATIVE_INFINITY, NumericUtils.shortToHalfFloat(NumericUtils.halfFloatToShort(Float.NEGATIVE_INFINITY)), 0.0f);
+  }
+
+  public void testBasicSort() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addHalfFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 2f);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addHalfFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingFirst("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addHalfFloat("num", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addHalfFloat("num", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingLast("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("two", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    List<Float> values = new ArrayList<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      Float num = random().nextFloat();
+      values.add(num);
+      doc.addHalfFloat("num", num);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " num=" + num);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      float x = random().nextFloat();
+      float y = random().nextFloat();
+
+      float min, max;
+      if (x < y) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        float value = values.get(i).floatValue();
+        if (value >= min && value <= max) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter = fieldTypes.newHalfFloatRangeFilter("num", min, true, max, true);
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      Float last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        Float v = doc.getHalfFloat("num");
+        if (isClose(v, (Float) ((FieldDoc) hit).fields[0]) == false) {
+          System.out.println("  wrong: " + v + " vs " + ((FieldDoc) hit).fields[0]);
+          wrongValues = true;
+        }
+        if (VERBOSE) {
+          System.out.println("   hit doc=" + doc);
+        }
+        if (last != null) {
+          int cmp;
+          if (isClose(last, v)) {
+            cmp = 0;
+          } else {
+            cmp = last.compareTo(v);
+          }
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+
+      for (Integer id : expected) {
+        if (actual.contains(id) == false) {
+          float value = values.get(id);
+          assertTrue("extra expected: value=" + value + " vs min=" + min + " max=" + max, isClose(value, min) || isClose(value, max));
+        }
+      }
+
+      for (Integer id : actual) {
+        if (expected.contains(id) == false) {
+          float value = values.get(id);
+          assertTrue("extra actual: value=" + value + " vs min=" + min + " max=" + max, isClose(value, min) || isClose(value, max));
+        }
+      }
+
+      assertFalse(wrongValues);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  static boolean isClose(float v1, float v2) {
+    return Math.abs(v1-v2) < IOTA;
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addHalfFloat("num", 45F);
+    doc.addHalfFloat("num", -22F);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addHalfFloat("num", -2F);
+    doc.addHalfFloat("num", 14F);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addHalfFloat("num", 45F);
+    doc.addHalfFloat("num", -22F);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addHalfFloat("num", -2F);
+    doc.addHalfFloat("num", 14F);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatRangeFilter("num", -100F, true, 100F, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newHalfFloatRangeFilter("num", 40F, true, 45F, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addHalfFloat("num", 100f);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredHalfFloat("num", 200f),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredHalfFloat("num", 100f);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addHalfFloat("num", 200f),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestInetAddressFields.java b/lucene/core/src/test/org/apache/lucene/document/TestInetAddressFields.java
new file mode 100644
index 0000000..3a39d4c
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestInetAddressFields.java
@@ -0,0 +1,174 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.net.InetAddress;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestInetAddressFields extends LuceneTestCase {
+
+  // nocommit are we sorting in a "sane" order
+  public void testInetAddressSort() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    InetAddress inet0 = InetAddress.getByName("10.17.4.10");
+    doc.addInetAddress("inet", inet0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    InetAddress inet1 = InetAddress.getByName("10.17.4.22");
+    doc.addInetAddress("inet", inet1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("inet"));
+    assertEquals(2, hits.totalHits);
+    Document hit = s.doc(hits.scoreDocs[0].doc);
+    assertEquals("0", hit.getString("id"));
+    assertEquals(inet0, hit.getInetAddress("inet"));
+    hit = s.doc(hits.scoreDocs[1].doc);
+    assertEquals("1", hit.getString("id"));
+    assertEquals(inet1, hit.getInetAddress("inet"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testInetAddressRangeFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    InetAddress inet0 = InetAddress.getByName("10.17.4.10");
+    doc.addInetAddress("inet", inet0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    InetAddress inet1 = InetAddress.getByName("10.17.4.22");
+    doc.addInetAddress("inet", inet1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, false), 1).totalHits);
+    assertEquals(0, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, false, inet1, false), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", InetAddress.getByName("10.17.0.0"), true, InetAddress.getByName("10.17.4.20"), false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testInetV6AddressRangeFilter() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    InetAddress inet0 = InetAddress.getByName("1080:0:0:0:8:700:200C:417A");
+    doc.addInetAddress("inet", inet0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    InetAddress inet1 = InetAddress.getByName("1080:0:0:0:8:800:200C:417A");
+    doc.addInetAddress("inet", inet1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, false), 1).totalHits);
+    assertEquals(0, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, false, inet1, false), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", InetAddress.getByName("1080:0:0:0:0:0:0:0"), true, InetAddress.getByName("1080:0:0:0:8:750:0:0"), false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMixed() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+
+    Document doc = w.newDocument();
+    InetAddress inet0 = InetAddress.getByName("10.17.5.22");
+    doc.addInetAddress("inet", inet0);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    InetAddress inet1 = InetAddress.getByName("1080:0:0:0:8:800:200C:417A");
+    doc.addInetAddress("inet", inet1);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, true, inet1, false), 1).totalHits);
+    assertEquals(0, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", inet0, false, inet1, false), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newRangeFilter("inet", InetAddress.getByName("10.17.5.0"), true, InetAddress.getByName("1080:0:0:0:8:750:0:0"), false), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addInetAddress("num", InetAddress.getByName("10.17.5.22"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredInetAddress("num", InetAddress.getByName("10.17.5.22")),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredInetAddress("num", InetAddress.getByName("10.17.5.22"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInetAddress("num", InetAddress.getByName("10.17.5.22")),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestIntFields.java b/lucene/core/src/test/org/apache/lucene/document/TestIntFields.java
new file mode 100644
index 0000000..afc3e7c
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestIntFields.java
@@ -0,0 +1,407 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestIntFields extends LuceneTestCase {
+
+  public void testBasicRange() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addInt("num", 3);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 2);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 7);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newIntRangeFilter("num", 0, true, 3, true), 1).totalHits);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newIntDocValuesRangeFilter("num", 0, true, 3, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newIntRangeFilter("num", 0, true, 10, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newIntDocValuesRangeFilter("num", 0, true, 10, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBasicSort() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addInt("num", 3);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 2);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 7);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addInt("num", 3);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 7);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingFirst("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addInt("num", 3);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addInt("num", 7);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingLast("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("two", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    List<Integer> values = new ArrayList<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      Integer num = random().nextInt();
+      values.add(num);
+      doc.addInt("num", num);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " num=" + num);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      int x = random().nextInt();
+      int y = random().nextInt();
+
+      int min, max;
+      if (x < y) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        float value = values.get(i).floatValue();
+        if (value >= min && value <= max) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter;
+      if (random().nextBoolean()) {
+        filter = fieldTypes.newIntRangeFilter("num", min, true, max, true);
+      } else {
+        filter = fieldTypes.newIntDocValuesRangeFilter("num", min, true, max, true);
+      }
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      Integer last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        Integer v = doc.getInt("num");
+        if (v.intValue() != ((Integer) ((FieldDoc) hit).fields[0]).intValue()) {
+          System.out.println("  wrong: " + v + " vs " + ((FieldDoc) hit).fields[0]);
+          wrongValues = true;
+        }
+        if (VERBOSE) {
+          System.out.println("   hit doc=" + doc);
+        }
+        if (last != null) {
+          int cmp;
+          if (v.equals(last) == false) {
+            cmp = 0;
+          } else {
+            cmp = last.compareTo(v);
+          }
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addInt("num", 45);
+    doc.addInt("num", -22);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addInt("num", -2);
+    doc.addInt("num", 14);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addInt("num", 45);
+    doc.addInt("num", -22);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addInt("num", -2);
+    doc.addInt("num", 14);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newIntRangeFilter("num", -100, true, 100, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newIntRangeFilter("num", 40, true, 45, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addInt("num", 180);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, s.search(fieldTypes.newExactIntQuery("num", 180), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredInt("num", 180);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(180, doc.getInt("num").intValue());
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addInt("num", 100);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredInt("num", 200),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredInt("num", 100);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("num", 200),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLongFields.java b/lucene/core/src/test/org/apache/lucene/document/TestLongFields.java
new file mode 100644
index 0000000..7754a48
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestLongFields.java
@@ -0,0 +1,407 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.HalfFloat;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+
+public class TestLongFields extends LuceneTestCase {
+
+  public void testBasicRange() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addLong("num", 3l);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 2l);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 7l);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newLongRangeFilter("num", 0l, true, 3l, true), 1).totalHits);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newLongDocValuesRangeFilter("num", 0l, true, 3l, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newLongRangeFilter("num", 0l, true, 10l, true), 1).totalHits);
+    assertEquals(3, s.search(new MatchAllDocsQuery(), fieldTypes.newLongDocValuesRangeFilter("num", 0l, true, 10l, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBasicSort() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
+
+    Document doc = w.newDocument();
+    doc.addLong("num", 3l);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 2l);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 7l);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingFirst() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addLong("num", 3l);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 7l);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingFirst("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortMissingLast() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document doc = w.newDocument();
+    doc.addLong("num", 3l);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addLong("num", 7l);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    fieldTypes = s.getFieldTypes();
+    fieldTypes.setSortMissingLast("num");
+
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("num"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).getString("id"));
+    assertEquals("three", r.document(hits.scoreDocs[1].doc).getString("id"));
+    assertEquals("two", r.document(hits.scoreDocs[2].doc).getString("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomRangeAndSort() throws Exception {
+    Directory dir = newDirectory();
+    int numDocs = atLeast(100);
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    List<Long> values = new ArrayList<>();
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      Long num = random().nextLong();
+      values.add(num);
+      doc.addLong("num", num);
+      w.addDocument(doc);
+      if (VERBOSE) {
+        System.out.println("TEST: id=" + i + " num=" + num);
+      }
+    }
+
+    IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    int iters = atLeast(1000);
+    for(int iter=0;iter<iters;iter++) {
+      long x = random().nextLong();
+      long y = random().nextLong();
+
+      long min, max;
+      if (x < y) {
+        min = x;
+        max = y;
+      } else {
+        min = y;
+        max = x;
+      }
+      Set<Integer> expected = new HashSet<>();
+      for(int i=0;i<values.size();i++) {
+        float value = values.get(i).floatValue();
+        if (value >= min && value <= max) {
+          expected.add(i);
+        }
+      }
+      if (VERBOSE) {
+        System.out.println("TEST: iter " + iter + " count=" + expected.size() + " min=" + min + " max=" + max);
+        for(int value : expected) {
+          System.out.println("  " + value);
+        }
+      }
+      
+      Set<Integer> actual = new HashSet<>();
+      Filter filter;
+      if (random().nextBoolean()) {
+        filter = fieldTypes.newLongRangeFilter("num", min, true, max, true);
+      } else {
+        filter = fieldTypes.newLongDocValuesRangeFilter("num", min, true, max, true);
+      }
+
+      boolean reversed = random().nextBoolean();
+      Sort sort = fieldTypes.newSort("num", reversed);
+      if (VERBOSE) {
+        System.out.println("TEST: filter=" + filter + " reversed=" + reversed + " sort=" + sort);
+      }
+      TopDocs hits = s.search(new MatchAllDocsQuery(), filter, numDocs, sort);
+      Long last = null;
+      boolean wrongValues = false;
+      for(ScoreDoc hit : hits.scoreDocs) {
+        Document doc = s.doc(hit.doc);
+        actual.add(doc.getInt("id"));
+        Long v = doc.getLong("num");
+        if (v.longValue() != ((Long) ((FieldDoc) hit).fields[0]).longValue()) {
+          System.out.println("  wrong: " + v + " vs " + ((FieldDoc) hit).fields[0]);
+          wrongValues = true;
+        }
+        if (VERBOSE) {
+          System.out.println("   hit doc=" + doc);
+        }
+        if (last != null) {
+          int cmp;
+          if (v.equals(last) == false) {
+            cmp = 0;
+          } else {
+            cmp = last.compareTo(v);
+          }
+          assertTrue((reversed && cmp >= 0) || (reversed == false && cmp <= 0));
+        }
+        last = v;
+      }
+
+      assertEquals(expected, actual);
+      assertFalse(wrongValues);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedSort() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addLong("num", 45l);
+    doc.addLong("num", -22l);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addLong("num", -2l);
+    doc.addLong("num", 14l);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+
+    // Default selector is MIN:
+    assertEquals(0, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(2, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    fieldTypes.setMultiValuedNumericSortSelector("num", SortedNumericSelector.Type.MAX);
+    hits = s.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("num"));
+    assertEquals(2, s.doc(hits.scoreDocs[0].doc).get("id"));
+    assertEquals(0, s.doc(hits.scoreDocs[1].doc).get("id"));
+    assertEquals(1, s.doc(hits.scoreDocs[2].doc).get("id"));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedRange() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("num");
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", 0);
+    doc.addLong("num", 45l);
+    doc.addLong("num", -22l);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 1);
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addUniqueInt("id", 2);
+    doc.addLong("num", -2l);
+    doc.addLong("num", 14l);
+    w.addDocument(doc);
+
+    IndexReader r = w.getReader();
+    fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(2, s.search(new MatchAllDocsQuery(), fieldTypes.newLongRangeFilter("num", -100l, true, 100l, true), 1).totalHits);
+    assertEquals(1, s.search(new MatchAllDocsQuery(), fieldTypes.newLongRangeFilter("num", 40l, true, 45l, true), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addLong("num", 180l);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, s.search(fieldTypes.newExactLongQuery("num", 180l), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testJustStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredLong("num", 180l);
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    doc = s.doc(0);
+    assertEquals(180l, doc.getLong("num").longValue());
+    r.close();
+    w.close();
+  }
+
+  public void testExcIndexedThenStored() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addLong("num", 100);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addStoredLong("num", 200),
+               "field \"num\": cannot addStored: field was already added non-stored");
+    w.close();
+  }
+
+  public void testExcStoredThenIndexed() throws Exception {
+    IndexWriter w = newIndexWriter(dir);
+    Document doc = w.newDocument();
+    doc.addStoredLong("num", 100L);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addLong("num", 200L),
+               "field \"num\": this field is only stored; use addStoredXXX instead");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestNumericFields.java b/lucene/core/src/test/org/apache/lucene/document/TestNumericFields.java
new file mode 100644
index 0000000..d1d58db
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/document/TestNumericFields.java
@@ -0,0 +1,89 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestNumericFields extends LuceneTestCase {
+
+  public void testSortedNumericDocValues() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("sortednumeric", DocValuesType.SORTED_NUMERIC);
+    fieldTypes.setMultiValued("sortednumeric");
+
+    Document doc = w.newDocument();
+    doc.addInt("sortednumeric", 3);
+    doc.addInt("sortednumeric", 1);
+    doc.addInt("sortednumeric", 2);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    SortedNumericDocValues sndv = MultiDocValues.getSortedNumericValues(r, "sortednumeric");
+    sndv.setDocument(0);
+
+    assertEquals(3, sndv.count());
+    assertEquals(1, sndv.valueAt(0));
+    assertEquals(2, sndv.valueAt(1));
+    assertEquals(3, sndv.valueAt(2));
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  // Cannot change a field from INT to DOUBLE
+  public void testInvalidNumberTypeChange() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addInt("int", 3);
+    shouldFail(() -> doc.addDouble("int", 4d),
+               "field \"int\": cannot change from value type INT to DOUBLE");
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java
index d41e575..8814e6f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java
@@ -18,19 +18,18 @@
  */
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.ByteArrayDataOutput;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Monster;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.TimeUnits;
-
 import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
 
 @SuppressCodecs({"SimpleText", "Memory", "Direct"})
@@ -55,18 +54,18 @@
         .setMergePolicy(newLogMergePolicy(false, 10))
         .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
         .setCodec(TestUtil.getDefaultCodec()));
-
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("dv");
     byte bytes[] = new byte[4];
     BytesRef data = new BytesRef(bytes);
-    BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data);
-    doc.add(dvField);
     
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
+      Document doc = w.newDocument();
       bytes[0] = (byte)(i >> 24);
       bytes[1] = (byte)(i >> 16);
       bytes[2] = (byte)(i >> 8);
       bytes[3] = (byte) i;
+      doc.addBinary("dv", data);
       w.addDocument(doc);
       if (i % 100000 == 0) {
         System.out.println("indexed: " + i);
@@ -116,17 +115,19 @@
         .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
         .setCodec(TestUtil.getDefaultCodec()));
 
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+
     byte bytes[] = new byte[4];
     ByteArrayDataOutput encoder = new ByteArrayDataOutput(bytes);
     BytesRef data = new BytesRef(bytes);
-    BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data);
-    doc.add(dvField);
     
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
       encoder.reset(bytes);
       encoder.writeVInt(i % 65535); // 1, 2, or 3 bytes
       data.length = encoder.getPosition();
+      Document doc = w.newDocument();
+      doc.addBinary("dv", data);
       w.addDocument(doc);
       if (i % 100000 == 0) {
         System.out.println("indexed: " + i);
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java
index 081f435..1e9169a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java
@@ -19,15 +19,13 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase.Monster;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.TimeUnits;
-import org.apache.lucene.util.LuceneTestCase.Monster;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-
 import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
 
 @SuppressCodecs({"SimpleText", "Memory", "Direct"})
@@ -53,12 +51,9 @@
         .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
         .setCodec(TestUtil.getDefaultCodec()));
 
-    Document doc = new Document();
-    NumericDocValuesField dvField = new NumericDocValuesField("dv", 0);
-    doc.add(dvField);
-    
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
-      dvField.setLongValue(i);
+      Document doc = w.newDocument();
+      doc.addLong("dv", i);
       w.addDocument(doc);
       if (i % 100000 == 0) {
         System.out.println("indexed: " + i);
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java
index a4a7504..2dc4ecf 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java
@@ -22,18 +22,13 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.TimeUnits;
 import org.apache.lucene.util.LuceneTestCase.Monster;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-
-import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 
 /**
  * Test indexes ~82M docs with 52 positions each, so you get &gt; Integer.MAX_VALUE positions
@@ -64,15 +59,14 @@
      ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
     }
 
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setOmitNorms(true);
-    Field field = new Field("field", new MyTokenStream(), ft);
-    doc.add(field);
-    
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableNorms("field");
+
     final int numDocs = (Integer.MAX_VALUE / 26) + 1;
+    Document doc = w.newDocument();
+    doc.addLargeText("field", new MyTokenStream());
+    w.addDocument(doc);
     for (int i = 0; i < numDocs; i++) {
-      w.addDocument(doc);
       if (VERBOSE && i % 100000 == 0) {
         System.out.println(i + " of " + numDocs + "...");
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java
index e4fb303..294de0c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java
@@ -21,9 +21,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -60,15 +58,15 @@
      // 1 petabyte:
      ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
     }
-
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setOmitNorms(true);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    Field field = new Field("field", new MyTokenStream(), ft);
-    doc.add(field);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS);
+    fieldTypes.disableStored("field");
+    fieldTypes.disableNorms("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", new MyTokenStream());
     
     final int numDocs = (Integer.MAX_VALUE / 26) + 1;
+    byte[] bytes = new byte[1];
     for (int i = 0; i < numDocs; i++) {
       w.addDocument(doc);
       if (VERBOSE && i % 100000 == 0) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
index c0e1844..ef71b0a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
@@ -23,15 +23,13 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.LuceneTestCase.Monster;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 
 /**
  * Test indexes 2B docs with 65k freqs each, 
@@ -63,13 +61,13 @@
      ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
     }
 
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    ft.setOmitNorms(true);
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableNorms("field");
+    fieldTypes.disableHighlighting("field");
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS);
     MyTokenStream tokenStream = new MyTokenStream();
-    Field field = new Field("field", tokenStream, ft);
-    doc.add(field);
+    doc.addLargeText("field", tokenStream);
     
     final int numDocs = 1000;
     for (int i = 0; i < numDocs; i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
index fd78705..33adebb 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
@@ -19,16 +19,14 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase.Monster;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.TimeUnits;
-import org.apache.lucene.util.LuceneTestCase.Monster;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-
 import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
 
 @SuppressCodecs({"SimpleText", "Memory", "Direct"})
@@ -54,11 +52,10 @@
         .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
         .setCodec(TestUtil.getDefaultCodec()));
 
-    Document doc = new Document();
+    Document doc = w.newDocument();
     byte bytes[] = new byte[2];
     BytesRef data = new BytesRef(bytes);
-    SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
-    doc.add(dvField);
+    doc.addBinary("dv", data);
     
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
       bytes[0] = (byte)(i >> 8);
@@ -110,11 +107,10 @@
         .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
         .setCodec(TestUtil.getDefaultCodec()));
 
-    Document doc = new Document();
+    Document doc = w.newDocument();
     byte bytes[] = new byte[4];
     BytesRef data = new BytesRef(bytes);
-    SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
-    doc.add(dvField);
+    doc.addBinary("dv", data);
     
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
       bytes[0] = (byte)(i >> 24);
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
index dfa0e01..41e14e3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
@@ -28,9 +28,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -191,14 +189,19 @@
         ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
       }
 
-      Document doc = new Document();
+      FieldTypes fieldTypes = w.getFieldTypes();
+
+      Document doc = w.newDocument();
+
+      List<Long> savedTokens = new ArrayList<>();
+
       final MyTokenStream ts = new MyTokenStream(random(), TERMS_PER_DOC);
 
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setIndexOptions(IndexOptions.DOCS);
-      customType.setOmitNorms(true);
-      Field field = new Field("field", ts, customType);
-      doc.add(field);
+      fieldTypes.disableStored("field");
+      fieldTypes.disableHighlighting("field");
+      fieldTypes.disableNorms("field");
+      fieldTypes.setIndexOptions("field", IndexOptions.DOCS);
+      doc.addLargeText("field", ts);
       //w.setInfoStream(System.out);
       final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC);
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java
index f81be8c..367153c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java
@@ -19,15 +19,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.store.MMapDirectory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TimeUnits;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-
 import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 
@@ -57,18 +54,14 @@
      ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
     }
 
-    final Document doc = new Document();
-    final FieldType ft = new FieldType();
-    ft.setStored(true);
-    ft.freeze();
+    final Document doc = w.newDocument();
     final int valueLength = RandomInts.randomIntBetween(random(), 1 << 13, 1 << 20);
     final byte[] value = new byte[valueLength];
     for (int i = 0; i < valueLength; ++i) {
       // random so that even compressing codecs can't compress it
       value[i] = (byte) random().nextInt(256);
     }
-    final Field f = new Field("fld", value, ft);
-    doc.add(f);
+    doc.addStoredBinary("fld", value);
 
     final int numDocs = (int) ((1L << 32) / valueLength + 100);
     for (int i = 0; i < numDocs; ++i) {
@@ -96,10 +89,10 @@
     }
 
     DirectoryReader rd = DirectoryReader.open(dir);
-    StoredDocument sd = rd.document(numDocs - 1);
+    Document sd = rd.document(numDocs - 1);
     assertNotNull(sd);
     assertEquals(1, sd.getFields().size());
-    BytesRef valueRef = sd.getBinaryValue("fld");
+    BytesRef valueRef = sd.getBinary("fld");
     assertNotNull(valueRef);
     assertEquals(new BytesRef(value), valueRef);
     rd.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java b/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java
new file mode 100644
index 0000000..c6ad09d
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java
@@ -0,0 +1,1083 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LowSchemaField;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+/** Holds test cases that make schema changes only "allowed" by the low schema. */
+
+public class TestAbuseSchema extends LuceneTestCase {
+
+  // LUCENE-1010
+  public void testNoTermVectorAfterTermVectorMerge() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(a));
+    List<LowSchemaField> document = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "tvtest", "a b c", IndexOptions.DOCS, false);
+    field.enableTermVectors(false, false, false);
+    document.add(field);
+    iw.addDocument(document);
+    iw.commit();
+
+    document = new ArrayList<>();
+    document.add(new LowSchemaField(a, "tvtest", "a b c", IndexOptions.DOCS, false));
+    iw.addDocument(document);
+    // Make first segment
+    iw.commit();
+
+    iw.forceMerge(1);
+
+    document = new ArrayList<>();
+    document.add(field);
+    iw.addDocument(document);
+    // Make 2nd segment
+    iw.commit();
+    iw.forceMerge(1);
+
+    iw.close();
+    dir.close();
+  }
+
+  /** 
+   * In a single doc, for the same field, mix the term vectors up 
+   */
+  public void testInconsistentTermVectorOptions() throws IOException {
+
+    LowSchemaField f1, f2;
+
+    // no vectors + vectors
+    Analyzer a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2.enableTermVectors(false, false, false);
+    doTestMixup(f1, f2);
+    
+    // vectors + vectors with pos
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f1.enableTermVectors(false, false, false);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2.enableTermVectors(true, false, false);
+    doTestMixup(f1, f2);
+    
+    // vectors + vectors with off
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f1.enableTermVectors(false, false, false);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2.enableTermVectors(false, true, false);
+    doTestMixup(f1, f2);
+    
+    // vectors with pos + vectors with pos + off
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f1.enableTermVectors(true, false, false);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2.enableTermVectors(true, true, false);
+    doTestMixup(f1, f2);
+
+    // vectors with pos + vectors with pos + pay
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f1.enableTermVectors(true, false, false);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2.enableTermVectors(true, false, true);
+    doTestMixup(f1, f2);
+  }
+  
+  private void doTestMixup(LowSchemaField f1, LowSchemaField f2) throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    
+    // add 3 good docs
+    for (int i = 0; i < 3; i++) {
+      Document doc = iw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
+      iw.addDocument(doc);
+    }
+
+    // add broken doc
+    List<LowSchemaField> doc = new ArrayList<>();
+    doc.add(f1);
+    doc.add(f2);
+    
+    // ensure broken doc hits exception
+    try {
+      iw.addDocument(doc);
+      fail("didn't hit expected exception");
+    } catch (IllegalArgumentException iae) {
+      assertNotNull(iae.getMessage());
+      assertTrue(iae.getMessage().startsWith("all instances of a given field name must have the same term vectors settings"));
+    }
+    
+    // ensure good docs are still ok
+    IndexReader ir = iw.getReader();
+    assertEquals(3, ir.numDocs());
+    
+    ir.close();
+    iw.close();
+    dir.close();
+  }
+
+  // LUCENE-5611: don't abort segment when term vector settings are wrong
+  public void testNoAbortOnBadTVSettings() throws Exception {
+    Directory dir = newDirectory();
+    // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig iwc = new IndexWriterConfig(a);
+    IndexWriter iw = new IndexWriter(dir, iwc);
+
+    List<LowSchemaField> doc = new ArrayList<>();
+    iw.addDocument(doc);
+    LowSchemaField field = new LowSchemaField(a, "field", "value", IndexOptions.NONE, false);
+    field.enableTermVectors(false, false, false);
+    doc.add(field);
+    try {
+      iw.addDocument(doc);
+      fail("should have hit exc");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    IndexReader r = DirectoryReader.open(iw, true);
+
+    // Make sure the exc didn't lose our first document:
+    assertEquals(1, r.numDocs());
+    iw.close();
+    r.close();
+    dir.close();
+  }
+
+  public void testPostingsOffsetsWithUnindexedFields() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    RandomIndexWriter riw = newRandomIndexWriter(dir, a);
+    for (int i = 0; i < 100; i++) {
+      // ensure at least one doc is indexed with offsets
+      LowSchemaField field;
+      if (i < 99 && random().nextInt(2) == 0) {
+        // stored only
+        field = new LowSchemaField(a, "foo", "boo!", IndexOptions.NONE, false);
+      } else {
+        field = new LowSchemaField(a, "foo", "boo!", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, true);
+        if (random().nextBoolean()) {
+          // store some term vectors for the checkindex cross-check
+          field.enableTermVectors(random().nextBoolean(), random().nextBoolean(), false);
+        }
+      }
+      riw.addDocument(Collections.singletonList(field));
+    }
+    CompositeReader ir = riw.getReader();
+    LeafReader slow = SlowCompositeReaderWrapper.wrap(ir);
+    FieldInfos fis = slow.getFieldInfos();
+    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, fis.fieldInfo("foo").getIndexOptions());
+    slow.close();
+    ir.close();
+    riw.close();
+    dir.close();
+  }
+  
+  /**
+   * Tests various combinations of omitNorms=true/false, the field not existing at all,
+   * ensuring that only omitNorms is 'viral'.
+   * Internally checks that MultiNorms.norms() is consistent (returns the same bytes)
+   * as the fully merged equivalent.
+   */
+  public void testOmitNormsCombos() throws IOException {
+    Analyzer a = new MockAnalyzer(random());
+    // indexed with norms
+    LowSchemaField norms = new LowSchemaField(a, "foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+
+    // indexed without norms
+    LowSchemaField noNorms = new LowSchemaField(a, "foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    noNorms.disableNorms();
+
+    // not indexed, but stored
+    LowSchemaField noIndex = new LowSchemaField(a, "foo", "a", IndexOptions.NONE, false);
+
+    // not indexed but stored, omitNorms is set
+    LowSchemaField noNormsNoIndex = new LowSchemaField(a, "foo", "a", IndexOptions.NONE, false);
+    noNormsNoIndex.disableNorms();
+
+    // not indexed nor stored (doesnt exist at all, we index a different field instead)
+    LowSchemaField emptyNorms = new LowSchemaField(a, "bar", "a", IndexOptions.NONE, false);
+    
+    assertNotNull(getNorms("foo", norms, norms));
+    assertNull(getNorms("foo", norms, noNorms));
+    assertNotNull(getNorms("foo", norms, noIndex));
+    assertNotNull(getNorms("foo", norms, noNormsNoIndex));
+    assertNotNull(getNorms("foo", norms, emptyNorms));
+    assertNull(getNorms("foo", noNorms, noNorms));
+    assertNull(getNorms("foo", noNorms, noIndex));
+    assertNull(getNorms("foo", noNorms, noNormsNoIndex));
+    assertNull(getNorms("foo", noNorms, emptyNorms));
+    assertNull(getNorms("foo", noIndex, noIndex));
+    assertNull(getNorms("foo", noIndex, noNormsNoIndex));
+    assertNull(getNorms("foo", noIndex, emptyNorms));
+    assertNull(getNorms("foo", noNormsNoIndex, noNormsNoIndex));
+    assertNull(getNorms("foo", noNormsNoIndex, emptyNorms));
+    assertNull(getNorms("foo", emptyNorms, emptyNorms));
+  }
+
+  /**
+   * Indexes at least 1 document with f1, and at least 1 document with f2.
+   * returns the norms for "field".
+   */
+  NumericDocValues getNorms(String field, LowSchemaField f1, LowSchemaField f2) throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
+                              .setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+    
+    // add f1
+    riw.addDocument(Collections.singletonList(f1));
+    
+    // add f2
+    riw.addDocument(Collections.singletonList(f2));
+    
+    // add a mix of f1's and f2's
+    int numExtraDocs = TestUtil.nextInt(random(), 1, 1000);
+    for (int i = 0; i < numExtraDocs; i++) {
+      riw.addDocument(Collections.singletonList(random().nextBoolean() ? f1 : f2));
+    }
+
+    IndexReader ir1 = riw.getReader();
+    // todo: generalize
+    NumericDocValues norms1 = MultiDocValues.getNormValues(ir1, field);
+    
+    // fully merge and validate MultiNorms against single segment.
+    riw.forceMerge(1);
+    DirectoryReader ir2 = riw.getReader();
+    NumericDocValues norms2 = getOnlySegmentReader(ir2).getNormValues(field);
+
+    if (norms1 == null) {
+      assertNull(norms2);
+    } else {
+      for(int docID=0;docID<ir1.maxDoc();docID++) {
+        assertEquals(norms1.get(docID), norms2.get(docID));
+      }
+    }
+    ir1.close();
+    ir2.close();
+    riw.close();
+    dir.close();
+    return norms1;
+  }
+
+  public void testSameFieldNameForPostingAndDocValue() throws Exception {
+    // LUCENE-5192: FieldInfos.Builder neglected to update
+    // globalFieldNumbers.docValuesType map if the field existed, resulting in
+    // potentially adding the same field with different DV types.
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(a);
+    IndexWriter writer = new IndexWriter(dir, conf);
+    List<LowSchemaField> doc = new ArrayList<>();
+
+    LowSchemaField field = new LowSchemaField(a, "f", "mock-value", IndexOptions.DOCS, false);
+    field.disableNorms();
+    field.doNotStore();
+    doc.add(field);
+
+    field = new LowSchemaField(a, "f", 5, IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.NUMERIC);
+    doc.add(field);
+    writer.addDocument(doc);
+    writer.commit();
+    
+    doc = new ArrayList<>();
+    field = new LowSchemaField(a, "f", new BytesRef("mock"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.BINARY);
+    doc.add(field);
+
+    try {
+      writer.addDocument(doc);
+      fail("should not have succeeded to add a field with different DV type than what already exists");
+    } catch (IllegalArgumentException e) {
+      writer.rollback();
+    }
+    
+    dir.close();
+  }
+
+  // LUCENE-6049
+  public void testExcIndexingDocBeforeDocValues() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig iwc = new IndexWriterConfig(a);
+    IndexWriter w = new IndexWriter(dir, iwc);
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "test", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    field.setDocValuesType(DocValuesType.SORTED);
+    field.doNotStore();
+    field.setTokenStream(new TokenStream() {
+        @Override
+        public boolean incrementToken() {
+          throw new RuntimeException("no");
+        }
+      });
+    doc.add(field);
+    try {
+      w.addDocument(doc);
+      fail("did not hit exception");
+    } catch (RuntimeException re) {
+      // expected
+    }
+    w.addDocument(w.newDocument());
+    w.close();
+    dir.close();
+  }
+
+
+  public void testSameFieldNumbersAcrossSegments() throws Exception {
+    for (int i = 0; i < 2; i++) {
+      Directory dir = newDirectory();
+      Analyzer a = new MockAnalyzer(random());
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(a)
+                                                   .setMergePolicy(NoMergePolicy.INSTANCE));
+
+      List<LowSchemaField> d1 = new ArrayList<>();
+      d1.add(new LowSchemaField(a, "f1", "first field", IndexOptions.DOCS, false));
+      d1.add(new LowSchemaField(a, "f2", "second field", IndexOptions.DOCS, false));
+      writer.addDocument(d1);
+
+      if (i == 1) {
+        writer.close();
+        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                                         .setMergePolicy(NoMergePolicy.INSTANCE));
+      } else {
+        writer.commit();
+      }
+
+      List<LowSchemaField> d2 = new ArrayList<>();
+      d2.add(new LowSchemaField(a, "f2", "second field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      LowSchemaField field = new LowSchemaField(a, "f1", "first field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+      field.enableTermVectors(false, false, false);
+      d2.add(field);
+      d2.add(new LowSchemaField(a, "f3", "third field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      d2.add(new LowSchemaField(a, "f4", "fourth field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      writer.addDocument(d2);
+
+      writer.close();
+
+      SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
+      assertEquals(2, sis.size());
+
+      FieldInfos fis1 = IndexWriter.readFieldInfos(sis.info(0));
+      FieldInfos fis2 = IndexWriter.readFieldInfos(sis.info(1));
+
+      assertEquals("f1", fis1.fieldInfo(0).name);
+      assertEquals("f2", fis1.fieldInfo(1).name);
+      assertEquals("f1", fis2.fieldInfo(0).name);
+      assertEquals("f2", fis2.fieldInfo(1).name);
+      assertEquals("f3", fis2.fieldInfo(2).name);
+      assertEquals("f4", fis2.fieldInfo(3).name);
+
+      writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+      writer.forceMerge(1);
+      writer.close();
+
+      sis = SegmentInfos.readLatestCommit(dir);
+      assertEquals(1, sis.size());
+
+      FieldInfos fis3 = IndexWriter.readFieldInfos(sis.info(0));
+
+      assertEquals("f1", fis3.fieldInfo(0).name);
+      assertEquals("f2", fis3.fieldInfo(1).name);
+      assertEquals("f3", fis3.fieldInfo(2).name);
+      assertEquals("f4", fis3.fieldInfo(3).name);
+
+
+      dir.close();
+    }
+  }
+
+  public void testEnablingNorms() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(a)
+                                          .setMaxBufferedDocs(10));
+    // Enable norms for only 1 doc, pre flush
+    for(int j=0;j<10;j++) {
+      List<LowSchemaField> doc = new ArrayList<>();
+      LowSchemaField f;
+      if (j != 8) {
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f.disableNorms();
+      } else {
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f.doNotStore();
+      }
+      doc.add(f);
+      writer.addDocument(doc);
+    }
+    writer.close();
+
+    Term searchTerm = new Term("field", "aaa");
+
+    IndexReader reader = DirectoryReader.open(dir);
+    IndexSearcher searcher = newSearcher(reader);
+    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals(10, hits.length);
+    reader.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig(a)
+                             .setOpenMode(IndexWriterConfig.OpenMode.CREATE).setMaxBufferedDocs(10));
+    // Enable norms for only 1 doc, post flush
+    for(int j=0;j<27;j++) {
+      List<LowSchemaField> doc = new ArrayList<>();
+      LowSchemaField f;
+      if (j != 26) {
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f.disableNorms();
+      } else {
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f.doNotStore();
+      }
+      doc.add(f);
+      writer.addDocument(doc);
+    }
+    writer.close();
+    reader = DirectoryReader.open(dir);
+    searcher = newSearcher(reader);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals(27, hits.length);
+    reader.close();
+
+    reader = DirectoryReader.open(dir);
+    reader.close();
+
+    dir.close();
+  }
+
+  public void testVariableSchema() throws Exception {
+    Directory dir = newDirectory();
+    for(int i=0;i<20;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + i);
+      }
+      Analyzer a = new MockAnalyzer(random());
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(a)
+                                                  .setMaxBufferedDocs(2)
+                                                  .setMergePolicy(newLogMergePolicy()));
+      //LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+      //lmp.setMergeFactor(2);
+      //lmp.setNoCFSRatio(0.0);
+      List<LowSchemaField> doc = new ArrayList<>();
+      String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+
+      if (i == 7) {
+        // Add empty docs here
+        LowSchemaField field = new LowSchemaField(a, "content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        field.doNotStore();
+        doc.add(field);
+      } else {
+        if (i%2 == 0) {
+          doc.add(new LowSchemaField(a, "content4", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+          doc.add(new LowSchemaField(a, "content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+        } else {
+          LowSchemaField field = new LowSchemaField(a, "content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+          field.doNotStore();
+          doc.add(field);
+        }
+        LowSchemaField field = new LowSchemaField(a, "content1", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        field.doNotStore();
+        doc.add(field);
+        doc.add(new LowSchemaField(a, "content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      }
+
+      for(int j=0;j<4;j++) {
+        writer.addDocument(doc);
+      }
+
+      writer.close();
+
+      if (0 == i % 4) {
+        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+        //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
+        //lmp2.setNoCFSRatio(0.0);
+        writer.forceMerge(1);
+        writer.close();
+      }
+    }
+    dir.close();
+  }
+
+  public void testIndexStoreCombos() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
+    byte[] b = new byte[50];
+    for(int i=0;i<50;i++) {
+      b[i] = (byte) (i+77);
+    }
+
+    List<LowSchemaField> doc = new ArrayList<>();
+
+    LowSchemaField f = new LowSchemaField(a, "binary", new BytesRef(b, 10, 17), IndexOptions.DOCS, true);
+    final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc1field1.setReader(new StringReader("doc1field1"));
+    f.setTokenStream(doc1field1);
+
+    LowSchemaField f2 = new LowSchemaField(a, "string", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    final MockTokenizer doc1field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc1field2.setReader(new StringReader("doc1field2"));
+    f2.setTokenStream(doc1field2);
+    doc.add(f);
+    doc.add(f2);
+    w.addDocument(doc);
+
+    // add 2 docs to test in-memory merging
+    final MockTokenizer doc2field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc2field1.setReader(new StringReader("doc2field1"));
+    f.setTokenStream(doc2field1);
+    final MockTokenizer doc2field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc2field2.setReader(new StringReader("doc2field2"));
+    f2.setTokenStream(doc2field2);
+    w.addDocument(doc);
+
+    // force segment flush so we can force a segment merge with doc3 later.
+    w.commit();
+
+    final MockTokenizer doc3field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc3field1.setReader(new StringReader("doc3field1"));
+    f.setTokenStream(doc3field1);
+    final MockTokenizer doc3field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    doc3field2.setReader(new StringReader("doc3field2"));
+    f2.setTokenStream(doc3field2);
+
+    w.addDocument(doc);
+    w.commit();
+    w.forceMerge(1);   // force segment merge.
+    w.close();
+
+    IndexReader ir = DirectoryReader.open(dir);
+    Document doc2 = ir.document(0);
+    IndexableField f3 = doc2.getField("binary");
+    b = f3.binaryValue().bytes;
+    assertTrue(b != null);
+    assertEquals(17, b.length, 17);
+    assertEquals(87, b[0]);
+
+    assertTrue(ir.document(0).getField("binary").binaryValue()!=null);
+    assertTrue(ir.document(1).getField("binary").binaryValue()!=null);
+    assertTrue(ir.document(2).getField("binary").binaryValue()!=null);
+
+    assertEquals("value", ir.document(0).get("string"));
+    assertEquals("value", ir.document(1).get("string"));
+    assertEquals("value", ir.document(2).get("string"));
+
+
+    // test that the terms were indexed.
+    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+
+    ir.close();
+    dir.close();
+  }
+
+  // Tests whether the DocumentWriter correctly enable the
+  // omitTermFreqAndPositions bit in the FieldInfo
+  public void testPositions() throws Exception {
+    Directory ram = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
+    List<LowSchemaField> d = new ArrayList<>();
+        
+    // f1,f2,f3: docs only
+    d.add(new LowSchemaField(analyzer, "f1", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f2", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f3", "This field has docs only", IndexOptions.DOCS, true));
+
+    d.add(new LowSchemaField(analyzer, "f4", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f6", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    
+    d.add(new LowSchemaField(analyzer, "f7", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f8", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+        
+    writer.addDocument(d);
+    writer.forceMerge(1);
+
+    // now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8, 
+    // and docs/freqs/positions for f3, f6, f9
+    d = new ArrayList<>();
+    
+    // f1,f4,f7: docs only
+    d.add(new LowSchemaField(analyzer, "f1", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f4", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f7", "This field has docs only", IndexOptions.DOCS, true));
+
+    // f2, f5, f8: docs and freqs
+    d.add(new LowSchemaField(analyzer, "f2", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f8", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    
+    // f3, f6, f9: docs and freqs and positions
+    d.add(new LowSchemaField(analyzer, "f3", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f6", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    writer.addDocument(d);
+
+    // force merge
+    writer.forceMerge(1);
+    // flush
+    writer.close();
+
+    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(ram));
+    FieldInfos fi = reader.getFieldInfos();
+    // docs + docs = docs
+    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f1").getIndexOptions());
+    // docs + docs/freqs = docs
+    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f2").getIndexOptions());
+    // docs + docs/freqs/pos = docs
+    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f3").getIndexOptions());
+    // docs/freqs + docs = docs
+    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f4").getIndexOptions());
+    // docs/freqs + docs/freqs = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f5").getIndexOptions());
+    // docs/freqs + docs/freqs/pos = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f6").getIndexOptions());
+    // docs/freqs/pos + docs = docs
+    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f7").getIndexOptions());
+    // docs/freqs/pos + docs/freqs = docs/freqs
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f8").getIndexOptions());
+    // docs/freqs/pos + docs/freqs/pos = docs/freqs/pos
+    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f9").getIndexOptions());
+    
+    reader.close();
+    ram.close();
+  }
+  
+  // Verifies no *.prx exists when all fields omit term positions:
+  public void testNoPrxFile() throws Throwable {
+    Directory ram = newDirectory();
+    if (ram instanceof MockDirectoryWrapper) {
+      // we verify some files get deleted
+      ((MockDirectoryWrapper)ram).setEnableVirusScanner(false);
+    }
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
+                                                .setMaxBufferedDocs(3)
+                                                .setMergePolicy(newLogMergePolicy()));
+    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+    lmp.setMergeFactor(2);
+    lmp.setNoCFSRatio(0.0);
+
+    List<LowSchemaField> d = new ArrayList<>();
+    d.add(new LowSchemaField(analyzer, "f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS, true));
+    for(int i=0;i<30;i++) {
+      writer.addDocument(d);
+    }
+
+    writer.commit();
+
+    assertNoPrx(ram);
+    
+    // now add some documents with positions, and check there is no prox after optimization
+    d = new ArrayList<>();
+    d.add(new LowSchemaField(analyzer, "f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    
+    for(int i=0;i<30;i++) {
+      writer.addDocument(d);
+    }
+
+    // force merge
+    writer.forceMerge(1);
+    // flush
+    writer.close();
+
+    assertNoPrx(ram);
+    ram.close();
+  }
+
+  private void assertNoPrx(Directory dir) throws Throwable {
+    final String[] files = dir.listAll();
+    for(int i=0;i<files.length;i++) {
+      assertFalse(files[i].endsWith(".prx"));
+      assertFalse(files[i].endsWith(".pos"));
+    }
+  }
+  
+  /** make sure we downgrade positions and payloads correctly */
+  public void testMixing() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+
+    RandomIndexWriter iw = newRandomIndexWriter(dir, a);
+    
+    for (int i = 0; i < 20; i++) {
+      List<LowSchemaField> doc = new ArrayList<>();
+      if (i < 19 && random().nextBoolean()) {
+        for (int j = 0; j < 50; j++) {
+          doc.add(new LowSchemaField(a, "foo", "i have positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+        }
+      } else {
+        for (int j = 0; j < 50; j++) {
+          doc.add(new LowSchemaField(a, "foo", "i have no positions", IndexOptions.DOCS_AND_FREQS, true));
+        }
+      }
+      iw.addDocument(doc);
+      iw.commit();
+    }
+    
+    if (random().nextBoolean()) {
+      iw.forceMerge(1);
+    }
+    
+    DirectoryReader ir = iw.getReader();
+    FieldInfos fis = MultiFields.getMergedFieldInfos(ir);
+    assertEquals(IndexOptions.DOCS_AND_FREQS, fis.fieldInfo("foo").getIndexOptions());
+    assertFalse(fis.fieldInfo("foo").hasPayloads());
+    iw.close();
+    ir.close();
+    dir.close(); // checkindex
+  }
+
+  public void testTypeChangeViaAddIndexesIR2() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(a);
+    IndexWriter writer = new IndexWriter(dir, conf);
+    LowSchemaField field = new LowSchemaField(a, "dv", 0L, IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.NUMERIC);
+    List<LowSchemaField> doc = new ArrayList<>();
+    doc.add(field);
+    writer.addDocument(doc);
+    writer.close();
+
+    Directory dir2 = newDirectory();
+    conf = newIndexWriterConfig(new MockAnalyzer(random()));
+    writer = new IndexWriter(dir2, conf);
+    DirectoryReader reader = DirectoryReader.open(dir);
+    TestUtil.addIndexesSlowly(writer, reader);
+    reader.close();
+    field = new LowSchemaField(a, "dv", new BytesRef("foo"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.BINARY);
+    doc = new ArrayList<>();
+    doc.add(field);
+    try {
+      writer.addDocument(doc);
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    writer.close();
+    dir2.close();
+    dir.close();
+  }
+
+  // LUCENE-1008
+  public void testNoTermVectorAfterTermVector() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());    
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+
+    List<LowSchemaField> document = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "tvtest", "a b c", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    field.enableTermVectors(true, true, true);
+    document.add(field);
+    iw.addDocument(document);
+
+    document = new ArrayList<>();
+    field = new LowSchemaField(a, "tvtest", "x y z", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    field.enableTermVectors(true, true, true);
+    document.add(field);
+    iw.addDocument(document);
+
+    // Make first segment
+    iw.commit();
+
+    document = new ArrayList<>();
+    field = new LowSchemaField(a, "tvtest", "a b c", IndexOptions.NONE, false);
+    document.add(field);
+    iw.addDocument(document);
+    // Make 2nd segment
+    iw.commit();
+
+    iw.forceMerge(1);
+    iw.close();
+    dir.close();
+  }
+
+  /**
+   * Test adding two fields with the same name, one indexed
+   * the other stored only. The omitNorms and omitTermFreqAndPositions setting
+   * of the stored field should not affect the indexed one (LUCENE-1590)
+   */
+  public void testLUCENE_1590() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    Analyzer a = new MockAnalyzer(random());
+
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "f1", "v1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    field.disableNorms();
+    doc.add(field);
+
+    field = new LowSchemaField(a, "f1", "v2", IndexOptions.NONE, false);
+    doc.add(field);
+
+    // f2 has no TF
+    field = new LowSchemaField(a, "f2", "v1", IndexOptions.DOCS, true);
+    doc.add(field);
+
+    field = new LowSchemaField(a, "f2", "v2", IndexOptions.NONE, false);
+    doc.add(field);
+
+    writer.addDocument(doc);
+    writer.forceMerge(1); // be sure to have a single segment
+    writer.close();
+
+    TestUtil.checkIndex(dir);
+
+    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(dir));
+    FieldInfos fi = reader.getFieldInfos();
+    // f1
+    assertFalse("f1 should have no norms", fi.fieldInfo("f1").hasNorms());
+    assertEquals("omitTermFreqAndPositions field bit should not be set for f1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f1").getIndexOptions());
+    // f2
+    assertTrue("f2 should have norms", fi.fieldInfo("f2").hasNorms());
+    assertEquals("omitTermFreqAndPositions field bit should be set for f2", IndexOptions.DOCS, fi.fieldInfo("f2").getIndexOptions());
+    reader.close();
+    dir.close();
+  }
+
+
+  public void testMixedTypesAfterReopenAppend1() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "foo", 0, IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.NUMERIC);
+    doc.add(field);
+    w.addDocument(doc);
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    doc = new ArrayList<>();
+    field = new LowSchemaField(a, "foo", new BytesRef("hello"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.SORTED);
+    doc.add(field);
+    try {
+      w.addDocument(doc);
+      fail("did not get expected exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testMixedTypesAfterReopenAppend2() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "foo", new BytesRef("foo"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.SORTED_SET);
+    doc.add(field);
+    w.addDocument(doc);
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    doc = new ArrayList<>();
+    field = new LowSchemaField(a, "foo", "bar", IndexOptions.DOCS, false);
+    doc.add(field);
+    field = new LowSchemaField(a, "foo", new BytesRef("foo"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.BINARY);
+    doc.add(field);
+    try {
+      w.addDocument(doc);
+      fail("did not get expected exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testMixedTypesAfterReopenAppend3() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "foo", new BytesRef("foo"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.SORTED_SET);
+    doc.add(field);
+    w.addDocument(doc);
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    doc = new ArrayList<>();
+    field = new LowSchemaField(a, "foo", "bar", IndexOptions.DOCS, false);
+    doc.add(field);
+    field = new LowSchemaField(a, "foo", new BytesRef("foo"), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.BINARY);
+    doc.add(field);
+    try {
+      w.addDocument(doc);
+      fail("did not get expected exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    // Also add another document so there is a segment to write here:
+    w.addDocument(w.newDocument());
+    w.close();
+    dir.close();
+  }
+
+  public void testUntokenizedReader() throws Exception {
+    IndexWriter w = newIndexWriter();
+    List<LowSchemaField> doc = new ArrayList<>();
+    doc.add(new LowSchemaField(new MockAnalyzer(random()), "field", new StringReader("string"), IndexOptions.DOCS, false));
+    shouldFail(() -> w.addDocument(doc),
+               "field \"field\" is stored but does not have binaryValue, stringValue nor numericValue");
+    w.close();
+  }
+
+  public void testUpdateNumericDVFieldWithSameNameAsPostingField() throws Exception {
+    // this used to fail because FieldInfos.Builder neglected to update
+    // globalFieldMaps.docValuesTypes map
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(a);
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "f", "mock-value", IndexOptions.DOCS, false);
+    doc.add(field);
+    
+    field = new LowSchemaField(a, "f", 5, IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.NUMERIC);
+    doc.add(field);
+    writer.addDocument(doc);
+
+    writer.commit();
+    writer.updateNumericDocValue(new Term("f", "mock-value"), "f", 17L);
+    writer.close();
+    
+    DirectoryReader r = DirectoryReader.open(dir);
+    NumericDocValues ndv = r.leaves().get(0).reader().getNumericDocValues("f");
+    assertEquals(17, ndv.get(0));
+    r.close();
+  }
+
+  static BytesRef toBytes(long value) {
+    BytesRef bytes = new BytesRef(10); // negative longs may take 10 bytes
+    while ((value & ~0x7FL) != 0L) {
+      bytes.bytes[bytes.length++] = (byte) ((value & 0x7FL) | 0x80L);
+      value >>>= 7;
+    }
+    bytes.bytes[bytes.length++] = (byte) value;
+    return bytes;
+  }
+
+  static long getValue(BinaryDocValues bdv, int idx) {
+    BytesRef term = bdv.get(idx);
+    idx = term.offset;
+    byte b = term.bytes[idx++];
+    long value = b & 0x7FL;
+    for (int shift = 7; (b & 0x80L) != 0; shift += 7) {
+      b = term.bytes[idx++];
+      value |= (b & 0x7FL) << shift;
+    }
+    return value;
+  }
+
+  public void testUpdateBinaryDVFieldWithSameNameAsPostingField() throws Exception {
+    // this used to fail because FieldInfos.Builder neglected to update
+    // globalFieldMaps.docValuesTypes map
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(a, "f", "mock-value", IndexOptions.DOCS, false);
+    doc.add(field);
+
+    field = new LowSchemaField(a, "f", toBytes(5L), IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.BINARY);
+    doc.add(field);
+
+    writer.addDocument(doc);
+    writer.commit();
+    writer.updateBinaryDocValue(new Term("f", "mock-value"), "f", toBytes(17L));
+    writer.close();
+    
+    DirectoryReader r = DirectoryReader.open(dir);
+    BinaryDocValues bdv = r.leaves().get(0).reader().getBinaryDocValues("f");
+    assertEquals(17, getValue(bdv, 0));
+    r.close();
+  }
+
+  public void testHasUncommittedChangesAfterException() throws IOException {
+    Analyzer analyzer = new MockAnalyzer(random());
+
+    Directory directory = newDirectory();
+    // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    IndexWriter iwriter = new IndexWriter(directory, iwc);
+    List<LowSchemaField> doc = new ArrayList<>();
+    LowSchemaField field = new LowSchemaField(analyzer, "dv", "foo!", IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.SORTED);
+    doc.add(field);
+
+    field = new LowSchemaField(analyzer, "dv", "bar!", IndexOptions.NONE, false);
+    field.setDocValuesType(DocValuesType.SORTED);
+    doc.add(field);
+
+    try {
+      iwriter.addDocument(doc);
+      fail("didn't hit expected exception");
+    } catch (IllegalArgumentException expected) {
+      // expected
+    }
+    iwriter.commit();
+    assertFalse(iwriter.hasUncommittedChanges());
+    iwriter.close();
+    directory.close();
+  }
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 43caaae..7e4c542 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -30,10 +30,7 @@
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.PhraseQuery;
@@ -160,15 +157,15 @@
     Directory aux = newDirectory();
 
     setUpDirs(dir, aux);
-    IndexWriter writer = newWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
+    IndexWriter writer = newWriter(dir, newIndexWriterConfig().setOpenMode(OpenMode.APPEND));
     writer.addIndexes(aux);
 
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     // Deletes one of the 10 added docs, leaving 9:
@@ -201,9 +198,9 @@
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     
@@ -239,9 +236,9 @@
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
 
@@ -514,16 +511,16 @@
 
   private void addDocs(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "aaa");
       writer.addDocument(doc);
     }
   }
 
   private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "bbb", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "bbb");
       writer.addDocument(doc);
     }
   }
@@ -599,28 +596,29 @@
     LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
     lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(100);
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-        new MockAnalyzer(random()))
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()
         .setMaxBufferedDocs(5).setMergePolicy(lmp));
 
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
-    for(int i=0;i<60;i++)
-      writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document doc = writer.newDocument();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
+    fieldTypes.setMultiValued("content");
 
-    Document doc2 = new Document();
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    for(int i=0;i<10;i++)
-      writer.addDocument(doc2);
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    for(int i=0;i<60;i++) {
+      writer.addDocument(doc);
+    }
+
+    doc = writer.newDocument();
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    for(int i=0;i<10;i++) {
+      writer.addDocument(doc);
+    }
     writer.close();
 
     Directory dir2 = newDirectory();
@@ -638,11 +636,10 @@
 
   // TODO: these are also in TestIndexWriter... add a simple doc-writing method
   // like this to LuceneTestCase?
-  private void addDoc(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
-      writer.addDocument(doc);
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    writer.addDocument(doc);
   }
   
   private abstract class RunAddIndexesThreads {
@@ -977,8 +974,8 @@
       dirs[i] = newDirectory();
       IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
       IndexWriter writer = new IndexWriter(dirs[i], conf);
-      Document doc = new Document();
-      doc.add(new StringField("id", "myid", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "myid");
       writer.addDocument(doc);
       writer.close();
     }
@@ -1004,9 +1001,9 @@
   // just like addDocs but with ID, starting from docStart
   private void addDocsWithID(IndexWriter writer, int numDocs, int docStart) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
-      doc.add(newTextField("id", "" + (docStart + i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("content", "aaa");
+      doc.addLargeText("id", "" + (docStart + i));
       writer.addDocument(doc);
     }
   }
@@ -1096,10 +1093,10 @@
     for (int i = 0; i < dirs.length; i++) {
       dirs[i] = new RAMDirectory();
       IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(new MockAnalyzer(random())));
-      Document d = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      d.add(new Field("c", "v", customType));
+      Document d = w.newDocument();
+      FieldTypes fieldTypes = w.getFieldTypes();
+      fieldTypes.enableTermVectors("c");
+      d.addLargeText("c", "v");
       w.addDocument(d);
       w.close();
     }
@@ -1142,10 +1139,8 @@
       IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
       conf.setCodec(new UnRegisteredCodec());
       IndexWriter w = new IndexWriter(toAdd, conf);
-      Document doc = new Document();
-      FieldType customType = new FieldType();
-      customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); 
-      doc.add(newField("foo", "bar", customType));
+      Document doc = w.newDocument();
+      doc.addLargeText("foo", "bar");
       w.addDocument(doc);
       w.close();
     }
@@ -1181,18 +1176,18 @@
   public void testFieldNamesChanged() throws IOException {
     Directory d1 = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d1);
-    Document doc = new Document();
-    doc.add(newStringField("f1", "doc1 field1", Field.Store.YES));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addAtom("f1", "doc1 field1");
+    doc.addAtom("id", "1");
     w.addDocument(doc);
     DirectoryReader r1 = w.getReader();
     w.close();
 
     Directory d2 = newDirectory();
     w = new RandomIndexWriter(random(), d2);
-    doc = new Document();
-    doc.add(newStringField("f2", "doc2 field2", Field.Store.YES));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addAtom("f2", "doc2 field2");
+    doc.addAtom("id", "2");
     w.addDocument(doc);
     DirectoryReader r2 = w.getReader();
     w.close();
@@ -1209,11 +1204,11 @@
     w.close();
     assertEquals(2, r3.numDocs());
     for(int docID=0;docID<2;docID++) {
-      StoredDocument d = r3.document(docID);
-      if (d.get("id").equals("1")) {
-        assertEquals("doc1 field1", d.get("f1"));
+      Document d = r3.document(docID);
+      if (d.getString("id").equals("1")) {
+        assertEquals("doc1 field1", d.getString("f1"));
       } else {
-        assertEquals("doc2 field2", d.get("f2"));
+        assertEquals("doc2 field2", d.getString("f2"));
       }
     }
     r3.close();
@@ -1240,7 +1235,7 @@
   public void testFakeAllDeleted() throws Exception {
     Directory src = newDirectory(), dest = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), src);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     LeafReader allDeletedReader = new AllDeletedFilterReader(w.getReader().leaves().get(0).reader());
     w.close();
     
@@ -1262,7 +1257,7 @@
   public void testLocksBlock() throws Exception {
     Directory src = newDirectory();
     RandomIndexWriter w1 = new RandomIndexWriter(random(), src);
-    w1.addDocument(new Document());
+    w1.addDocument(w1.newDocument());
     w1.commit();
 
     Directory dest = newDirectory();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java
index 66eb343..b062dc0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java
@@ -22,8 +22,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.LuceneTestCase;
@@ -38,17 +36,12 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setCodec(TestUtil.getDefaultCodec());
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    // these fields should sometimes get term vectors, etc
-    Field idField = newStringField("id", "", Field.Store.NO);
-    Field bodyField = newTextField("body", "", Field.Store.NO);
-    Field dvField = new NumericDocValuesField("dv", 5);
-    doc.add(idField);
-    doc.add(bodyField);
-    doc.add(dvField);
     for (int i = 0; i < 100; i++) {
-      idField.setStringValue(Integer.toString(i));
-      bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
+      Document doc = riw.newDocument();
+      doc.addUniqueInt("id", i);
+      // these fields should sometimes get term vectors, etc
+      doc.addLargeText("body", TestUtil.randomUnicodeString(random()));
+      doc.addInt("dv", 5);
       riw.addDocument(doc);
       if (random().nextInt(7) == 0) {
         riw.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
index c2b515d..868a8c3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
@@ -24,10 +24,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.LuceneTestCase;
@@ -43,23 +40,16 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setCodec(TestUtil.getDefaultCodec());
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    Field bodyField = newTextField("body", "", Field.Store.YES);
-    FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
-    vectorsType.setStoreTermVectors(true);
-    vectorsType.setStoreTermVectorPositions(true);
-    Field vectorsField = new Field("vectors", "", vectorsType);
-    Field dvField = new NumericDocValuesField("dv", 5);
-    doc.add(idField);
-    doc.add(bodyField);
-    doc.add(vectorsField);
-    doc.add(dvField);
+    FieldTypes fieldTypes = riw.getFieldTypes();
+    fieldTypes.enableTermVectors("vectors");
+    fieldTypes.enableTermVectorPositions("vectors");
+
     for (int i = 0; i < 100; i++) {
-      idField.setStringValue(Integer.toString(i));
-      bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
-      dvField.setLongValue(random().nextInt(5));
-      vectorsField.setStringValue(TestUtil.randomUnicodeString(random()));
+      Document doc = riw.newDocument();
+      doc.addInt("id", i);
+      doc.addLargeText("body", TestUtil.randomUnicodeString(random()));
+      doc.addLargeText("vectors", TestUtil.randomUnicodeString(random()));
+      doc.addInt("dv", random().nextInt(5));
       riw.addDocument(doc);
       if (random().nextInt(7) == 0) {
         riw.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
index db6bbb6..922beef 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -75,12 +75,14 @@
 
     @Override
     public void doWork() throws Exception {
+      FieldTypes fieldTypes = writer.getFieldTypes();
+
       // Update all 100 docs...
       for(int i=0; i<100; i++) {
-        Document d = new Document();
-        d.add(new StringField("id", Integer.toString(i), Field.Store.YES));
-        d.add(new TextField("contents", English.intToEnglish(i+10*count), Field.Store.NO));
-        writer.updateDocument(new Term("id", Integer.toString(i)), d);
+        Document d = writer.newDocument();
+        d.addUniqueInt("id", i);
+        d.addLargeText("contents", English.intToEnglish(i+10*count));
+        writer.updateDocument(fieldTypes.newIntTerm("id", i), d);
       }
     }
   }
@@ -116,9 +118,9 @@
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
-      Document d = new Document();
-      d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
-      d.add(newTextField("contents", English.intToEnglish(i), Field.Store.NO));
+      Document d = writer.newDocument();
+      d.addUniqueInt("id", i);
+      d.addLargeText("contents", English.intToEnglish(i));
       if ((i-1)%7 == 0) {
         writer.commit();
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
index e6877d5..e71fd8a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
@@ -26,8 +26,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -76,29 +75,29 @@
       System.out.println("config: " + iw.w.getConfig());
       System.out.println("threadCount=" + threadCount);
     }
-    
-    Field prototype = newTextField("field", "", Field.Store.NO);
-    FieldType fieldType = new FieldType(prototype.fieldType());
+
+    FieldTypes fieldTypes = iw.getFieldTypes();
     if (random().nextBoolean()) {
-      fieldType.setOmitNorms(true);
+      fieldTypes.disableNorms("field");
     }
     int options = random().nextInt(3);
     if (options == 0) {
-      fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); // we dont actually need positions
-      fieldType.setStoreTermVectors(true); // but enforce term vectors when we do this so we check SOMETHING
+      fieldTypes.disableHighlighting("field");
+      fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS); // we dont actually need positions
+      fieldTypes.enableTermVectors("field"); // but enforce term vectors when we do this so we check SOMETHING
     } else if (options == 1) {
-      fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+      fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    } else {
+      // else just positions
+      fieldTypes.disableHighlighting("field");
+      fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
     }
-    // else just positions
 
     Thread[] threads = new Thread[threadCount];
     final CountDownLatch startingGun = new CountDownLatch(1);
 
     for(int threadID=0;threadID<threadCount;threadID++) {
       final Random threadRandom = new Random(random().nextLong());
-      final Document document = new Document();
-      final Field field = new Field("field", "", fieldType);
-      document.add(field);
       threads[threadID] = new Thread() {
           @Override
           public void run() {
@@ -115,7 +114,8 @@
                   text.append(' ');
                   text.append(token);
                 }
-                field.setStringValue(text.toString());
+                Document document = iw.newDocument();
+                document.addLargeText("field", text.toString());
                 iw.addDocument(document);
               }
             } catch (Exception e) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
index 831e634..a18842d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
@@ -27,7 +27,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -86,9 +85,6 @@
           @Override
           public void run() {
             try {
-              Document document = new Document();
-              Field field = newTextField("field", "", Field.Store.NO);
-              document.add(field);
               startingGun.await();
               while (!postings.isEmpty()) {
                 StringBuilder text = new StringBuilder();
@@ -107,7 +103,8 @@
                   text.append(token);
                   visited.add(token);
                 }
-                field.setStringValue(text.toString());
+                Document document = iw.newDocument();
+                document.addLargeText("field", text.toString());
                 iw.addDocument(document);
               }
             } catch (Exception e) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
index 39ea7a6..199bc66 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
@@ -12,13 +12,8 @@
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.asserting.AssertingDocValuesFormat;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NRTCachingDirectory;
@@ -26,10 +21,8 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 /*
@@ -76,10 +69,10 @@
     return bytes;
   }
   
-  private Document doc(int id) {
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc-" + id, Store.NO));
-    doc.add(new BinaryDocValuesField("val", toBytes(id + 1)));
+  private Document doc(IndexWriter w, int id) {
+    Document doc = w.newDocument();
+    doc.addAtom("id", "doc-" + id);
+    doc.addBinary("val", toBytes(id + 1));
     return doc;
   }
   
@@ -87,20 +80,22 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
                                                 .setRAMBufferSizeMB(0.00000001));
-    writer.addDocument(doc(0)); // val=1
-    writer.addDocument(doc(1)); // val=2
-    writer.addDocument(doc(3)); // val=2
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
+    writer.addDocument(doc(writer, 0)); // val=1
+    writer.addDocument(doc(writer, 1)); // val=2
+    writer.addDocument(doc(writer, 3)); // val=2
     writer.commit();
-    assertEquals(1, writer.getFlushDeletesCount());
-    writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(5));
     assertEquals(2, writer.getFlushDeletesCount());
-    writer.updateBinaryDocValue(new Term("id", "doc-1"), "val", toBytes(6));
+    writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(5));
     assertEquals(3, writer.getFlushDeletesCount());
-    writer.updateBinaryDocValue(new Term("id", "doc-2"), "val", toBytes(7)); 
+    writer.updateBinaryDocValue(new Term("id", "doc-1"), "val", toBytes(6));
     assertEquals(4, writer.getFlushDeletesCount());
+    writer.updateBinaryDocValue(new Term("id", "doc-2"), "val", toBytes(7)); 
+    assertEquals(5, writer.getFlushDeletesCount());
     writer.getConfig().setRAMBufferSizeMB(1000d);
     writer.updateBinaryDocValue(new Term("id", "doc-2"), "val", toBytes(7));
-    assertEquals(4, writer.getFlushDeletesCount());
+    assertEquals(5, writer.getFlushDeletesCount());
     writer.close();
     dir.close();
   }
@@ -112,8 +107,10 @@
     conf.setMaxBufferedDocs(10);
     conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(doc(0)); // val=1
-    writer.addDocument(doc(1)); // val=2
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
+    writer.addDocument(doc(writer, 0)); // val=1
+    writer.addDocument(doc(writer, 1)); // val=2
     if (random().nextBoolean()) { // randomly commit before the update is sent
       writer.commit();
     }
@@ -144,10 +141,12 @@
     conf.setMaxBufferedDocs(2); // generate few segments
     conf.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges for this test
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
     int numDocs = 10;
     long[] expectedValues = new long[numDocs];
     for (int i = 0; i < numDocs; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
       expectedValues[i] = i + 1;
     }
     writer.commit();
@@ -189,8 +188,11 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(doc(0));
-    writer.addDocument(doc(1));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
+
+    writer.addDocument(doc(writer, 0));
+    writer.addDocument(doc(writer, 1));
     
     final boolean isNRT = random().nextBoolean();
     final DirectoryReader reader1;
@@ -229,9 +231,11 @@
     conf.setMaxBufferedDocs(10); // control segment flushing
     conf.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges for this test
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
+
     for (int i = 0; i < 6; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
       if (i % 2 == 1) {
         writer.commit(); // create 2-docs segments
       }
@@ -277,9 +281,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(10); // control segment flushing
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
     
-    writer.addDocument(doc(0));
-    writer.addDocument(doc(1));
+    writer.addDocument(doc(writer, 0));
+    writer.addDocument(doc(writer, 1));
     
     if (random().nextBoolean()) {
       writer.commit();
@@ -310,15 +316,18 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(10); // prevent merges
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("ssdv");
+    fieldTypes.disableSorting("bdv");
+
     for (int i = 0; i < 4; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
-      doc.add(new NumericDocValuesField("ndv", i));
-      doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedDocValuesField("sdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i * 2))));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
+      doc.addInt("ndv", i);
+      doc.addBinary("bdv", new BytesRef(Integer.toString(i)));
+      doc.addShortText("sdv", Integer.toString(i));
+      doc.addShortText("ssdv", Integer.toString(i));
+      doc.addShortText("ssdv", Integer.toString(i * 2));
       writer.addDocument(doc);
     }
     writer.commit();
@@ -359,12 +368,15 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(10); // prevent merges
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv1");
+    fieldTypes.disableSorting("bdv2");
     
     for (int i = 0; i < 2; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
-      doc.add(new BinaryDocValuesField("bdv1", toBytes(i)));
-      doc.add(new BinaryDocValuesField("bdv2", toBytes(i)));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
+      doc.addBinary("bdv1", toBytes(i));
+      doc.addBinary("bdv2", toBytes(i));
       writer.addDocument(doc);
     }
     writer.commit();
@@ -391,12 +403,14 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+
     for (int i = 0; i < 2; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
       if (i == 0) { // index only one document with value
-        doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
+        doc.addBinary("bdv", toBytes(5L));
       }
       writer.addDocument(doc);
     }
@@ -424,9 +438,9 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new StringField("foo", "bar", Store.NO));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addAtom("foo", "bar");
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -461,11 +475,13 @@
       }
     });
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
-    doc.add(new SortedDocValuesField("sorted", new BytesRef("value")));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addBinary("bdv", toBytes(5L));
+    doc.addShortText("sorted", "value");
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -492,10 +508,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addBinary("bdv", toBytes(5L));
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -519,17 +537,18 @@
     Random random = random();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
     
     int docid = 0;
     int numRounds = atLeast(10);
     for (int rnd = 0; rnd < numRounds; rnd++) {
-      Document doc = new Document();
-      doc.add(new StringField("key", "doc", Store.NO));
-      doc.add(new BinaryDocValuesField("bdv", toBytes(-1)));
       int numDocs = atLeast(30);
       for (int i = 0; i < numDocs; i++) {
-        doc.removeField("id");
-        doc.add(new StringField("id", Integer.toString(docid++), Store.NO));
+        Document doc = writer.newDocument();
+        doc.addAtom("key", "doc");
+        doc.addBinary("bdv", toBytes(-1));
+        doc.addUniqueInt("id", docid++);
         writer.addDocument(doc);
       }
       
@@ -537,7 +556,7 @@
       writer.updateBinaryDocValue(new Term("key", "doc"), "bdv", toBytes(value));
       
       if (random.nextDouble() < 0.2) { // randomly delete some docs
-        writer.deleteDocuments(new Term("id", Integer.toString(random.nextInt(docid))));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", random.nextInt(docid)));
       }
       
       // randomly commit or reopen-IW (or nothing), before forceMerge
@@ -555,10 +574,10 @@
       // forceMerge is called, the index will be with one segment and deletes
       // and some MPs might now merge it, thereby invalidating test's
       // assumption that the reader has no deletes).
-      doc = new Document();
-      doc.add(new StringField("id", Integer.toString(docid++), Store.NO));
-      doc.add(new StringField("key", "doc", Store.NO));
-      doc.add(new BinaryDocValuesField("bdv", toBytes(value)));
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", docid++);
+      doc.addAtom("key", "doc");
+      doc.addBinary("bdv", toBytes(value));
       writer.addDocument(doc);
 
       writer.forceMerge(1, true);
@@ -590,11 +609,13 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
     
-    Document doc = new Document();
-    doc.add(new StringField("k1", "v1", Store.NO));
-    doc.add(new StringField("k2", "v2", Store.NO));
-    doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("k1", "v1");
+    doc.addAtom("k2", "v2");
+    doc.addBinary("bdv", toBytes(5L));
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -621,6 +642,7 @@
     lmp.setMergeFactor(3); // merge often
     conf.setMergePolicy(lmp);
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     final boolean isNRT = random.nextBoolean();
     DirectoryReader reader;
@@ -636,6 +658,7 @@
     final long[] fieldValues = new long[numFields];
     for (int i = 0; i < fieldValues.length; i++) {
       fieldValues[i] = 1;
+      fieldTypes.disableSorting("f" + i);
     }
     
     int numRounds = atLeast(15);
@@ -644,12 +667,12 @@
       int numDocs = atLeast(5);
       //System.out.println("[" + Thread.currentThread().getName() + "]: round=" + i + ", numDocs=" + numDocs);
       for (int j = 0; j < numDocs; j++) {
-        Document doc = new Document();
-        doc.add(new StringField("id", "doc-" + docID, Store.NO));
-        doc.add(new StringField("key", "all", Store.NO)); // update key
+        Document doc = writer.newDocument();
+        doc.addAtom("id", "doc-" + docID);
+        doc.addAtom("key", "all"); // update key
         // add all fields with their current value
         for (int f = 0; f < fieldValues.length; f++) {
-          doc.add(new BinaryDocValuesField("f" + f, toBytes(fieldValues[f])));
+          doc.addBinary("f" + f, toBytes(fieldValues[f]));
         }
         writer.addDocument(doc);
         ++docID;
@@ -703,7 +726,7 @@
     writer.close();
     IOUtils.close(reader, dir);
   }
-  
+
   public void testUpdateSegmentWithNoDocValues() throws Exception {
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
@@ -712,23 +735,25 @@
     // legit.
     conf.setMergePolicy(NoMergePolicy.INSTANCE);
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+
     // first segment with BDV
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc0", Store.NO));
-    doc.add(new BinaryDocValuesField("bdv", toBytes(3L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc0");
+    doc.addBinary("bdv", toBytes(3L));
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc4", Store.NO)); // document without 'bdv' field
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc4"); // document without 'bdv' field
     writer.addDocument(doc);
     writer.commit();
     
     // second segment with no BDV
-    doc = new Document();
-    doc.add(new StringField("id", "doc1", Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc2", Store.NO)); // document that isn't updated
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc2"); // document that isn't updated
     writer.addDocument(doc);
     writer.commit();
     
@@ -757,7 +782,7 @@
 
     dir.close();
   }
-  
+
   public void testUpdateSegmentWithPostingButNoDocValues() throws Exception {
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
@@ -766,19 +791,21 @@
     // legit.
     conf.setMergePolicy(NoMergePolicy.INSTANCE);
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+
     // first segment with BDV
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc0", Store.NO));
-    doc.add(new StringField("bdv", "mock-value", Store.NO));
-    doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc0");
+    doc.addAtom("bdvmock", "mock-value");
+    doc.addBinary("bdv", toBytes(5L));
     writer.addDocument(doc);
     writer.commit();
     
     // second segment with no BDV
-    doc = new Document();
-    doc.add(new StringField("id", "doc1", Store.NO));
-    doc.add(new StringField("bdv", "mock-value", Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc1");
+    doc.addAtom("bdvmock", "mock-value");
     writer.addDocument(doc);
     writer.commit();
     
@@ -799,51 +826,38 @@
     dir.close();
   }
   
-  public void testUpdateBinaryDVFieldWithSameNameAsPostingField() throws Exception {
-    // this used to fail because FieldInfos.Builder neglected to update
-    // globalFieldMaps.docValuesTypes map
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
-    IndexWriter writer = new IndexWriter(dir, conf);
-    
-    Document doc = new Document();
-    doc.add(new StringField("f", "mock-value", Store.NO));
-    doc.add(new BinaryDocValuesField("f", toBytes(5L)));
-    writer.addDocument(doc);
-    writer.commit();
-    writer.updateBinaryDocValue(new Term("f", "mock-value"), "f", toBytes(17L));
-    writer.close();
-    
-    DirectoryReader r = DirectoryReader.open(dir);
-    BinaryDocValues bdv = r.leaves().get(0).reader().getBinaryDocValues("f");
-    assertEquals(17, getValue(bdv, 0));
-    r.close();
-    
-    dir.close();
-  }
-  
   public void testStressMultiThreading() throws Exception {
     final Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter writer = new IndexWriter(dir, conf);
     
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+    fieldTypes.disableSorting("control");
+
     // create index
     final int numFields = TestUtil.nextInt(random(), 1, 4);
+
+    for(int i=0;i<numFields;i++) {
+      fieldTypes.disableSorting("f" + i);
+      fieldTypes.disableSorting("cf" + i);
+    }
+
     final int numDocs = atLeast(2000);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("id", "doc" + i);
       double group = random().nextDouble();
       String g;
       if (group < 0.1) g = "g0";
       else if (group < 0.5) g = "g1";
       else if (group < 0.8) g = "g2";
       else g = "g3";
-      doc.add(new StringField("updKey", g, Store.NO));
+      doc.addAtom("updKey", g);
       for (int j = 0; j < numFields; j++) {
         long value = random().nextInt();
-        doc.add(new BinaryDocValuesField("f" + j, toBytes(value)));
-        doc.add(new BinaryDocValuesField("cf" + j, toBytes(value * 2))); // control, always updated to f * 2
+        doc.addBinary("f" + j, toBytes(value));
+        doc.addBinary("cf" + j, toBytes(value * 2)); // control, always updated to f * 2
       }
       writer.addDocument(doc);
     }
@@ -875,7 +889,11 @@
               final String cf = "cf" + field;
 //              System.out.println("[" + Thread.currentThread().getName() + "] numUpdates=" + numUpdates + " updateTerm=" + t + " field=" + field);
               long updValue = random.nextInt();
-              writer.updateDocValues(t, new BinaryDocValuesField(f, toBytes(updValue)), new BinaryDocValuesField(cf, toBytes(updValue*2)));
+              Document update = writer.newDocument();
+              update.disableExistsField();
+              update.addBinary(f, toBytes(updValue));
+              update.addBinary(cf, toBytes(updValue*2));
+              writer.updateDocValues(t, update);
               
               if (random.nextDouble() < 0.2) {
                 // delete a random document
@@ -956,13 +974,16 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(4);
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f");
+    fieldTypes.disableSorting("cf");
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("id", "doc" + i);
       long value = random().nextInt();
-      doc.add(new BinaryDocValuesField("f", toBytes(value)));
-      doc.add(new BinaryDocValuesField("cf", toBytes(value * 2)));
+      doc.addBinary("f", toBytes(value));
+      doc.addBinary("cf", toBytes(value * 2));
       writer.addDocument(doc);
     }
     
@@ -971,7 +992,11 @@
       int doc = random().nextInt(numDocs);
       Term t = new Term("id", "doc" + doc);
       long value = random().nextLong();
-      writer.updateDocValues(t, new BinaryDocValuesField("f", toBytes(value)), new BinaryDocValuesField("cf", toBytes(value*2)));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addBinary("f", toBytes(value));
+      update.addBinary("cf", toBytes(value*2));
+      writer.updateDocValues(t, update);
       DirectoryReader reader = DirectoryReader.open(writer, true);
       for (LeafReaderContext context : reader.leaves()) {
         LeafReader r = context.reader();
@@ -998,10 +1023,13 @@
       }
     });
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "d0", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(5L)));
-    doc.add(new BinaryDocValuesField("f2", toBytes(13L)));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f1");
+    fieldTypes.disableSorting("f2");
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "d0");
+    doc.addBinary("f1", toBytes(5L));
+    doc.addBinary("f2", toBytes(13L));
     writer.addDocument(doc);
     writer.close();
     
@@ -1015,10 +1043,10 @@
       }
     });
     writer = new IndexWriter(dir, conf);
-    doc = new Document();
-    doc.add(new StringField("id", "d1", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(17L)));
-    doc.add(new BinaryDocValuesField("f2", toBytes(2L)));
+    doc = writer.newDocument();
+    doc.addAtom("id", "d1");
+    doc.addBinary("f1", toBytes(17L));
+    doc.addBinary("f2", toBytes(2L));
     writer.addDocument(doc);
     writer.updateBinaryDocValue(new Term("id", "d0"), "f1", toBytes(12L));
     writer.close();
@@ -1039,6 +1067,9 @@
     Directory dir1 = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir1, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+    fieldTypes.disableSorting("control");
     
     final int numDocs = atLeast(50);
     final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5);
@@ -1049,10 +1080,10 @@
 
     // create first index
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", RandomPicks.randomFrom(random(), randomTerms), Store.NO));
-      doc.add(new BinaryDocValuesField("bdv", toBytes(4L)));
-      doc.add(new BinaryDocValuesField("control", toBytes(8L)));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", RandomPicks.randomFrom(random(), randomTerms));
+      doc.addBinary("bdv", toBytes(4L));
+      doc.addBinary("control", toBytes(8L));
       writer.addDocument(doc);
     }
     
@@ -1063,7 +1094,11 @@
     // update some docs to a random value
     long value = random().nextInt();
     Term term = new Term("id", RandomPicks.randomFrom(random(), randomTerms));
-    writer.updateDocValues(term, new BinaryDocValuesField("bdv", toBytes(value)), new BinaryDocValuesField("control", toBytes(value * 2)));
+    Document update = writer.newDocument();
+    update.disableExistsField();
+    update.addBinary("bdv", toBytes(value));
+    update.addBinary("control", toBytes(value*2));
+    writer.updateDocValues(term, update);
     writer.close();
     
     Directory dir2 = newDirectory();
@@ -1100,11 +1135,14 @@
     }
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f1");
+    fieldTypes.disableSorting("f2");
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "d0", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
-    doc.add(new BinaryDocValuesField("f2", toBytes(1L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "d0");
+    doc.addBinary("f1", toBytes(1L));
+    doc.addBinary("f2", toBytes(1L));
     writer.addDocument(doc);
 
     // update each field twice to make sure all unneeded files are deleted
@@ -1134,7 +1172,9 @@
     conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
     conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); // don't flush by doc
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("upd");
+
     // test data: lots of documents (few 10Ks) and lots of update terms (few hundreds)
     final int numDocs = atLeast(20000);
     final int numBinaryFields = atLeast(5);
@@ -1144,19 +1184,24 @@
       updateTerms.add(TestUtil.randomSimpleString(random));
     }
 
+    for(int i=0;i<numBinaryFields;i++) {
+      fieldTypes.disableSorting("f" + i);
+      fieldTypes.disableSorting("cf" + i);
+    }
+
 //    System.out.println("numDocs=" + numDocs + " numBinaryFields=" + numBinaryFields + " numTerms=" + numTerms);
     
     // build a large index with many BDV fields and update terms
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       int numUpdateTerms = TestUtil.nextInt(random, 1, numTerms / 10);
       for (int j = 0; j < numUpdateTerms; j++) {
-        doc.add(new StringField("upd", RandomPicks.randomFrom(random, updateTerms), Store.NO));
+        doc.addAtom("upd", RandomPicks.randomFrom(random, updateTerms));
       }
       for (int j = 0; j < numBinaryFields; j++) {
         long val = random.nextInt();
-        doc.add(new BinaryDocValuesField("f" + j, toBytes(val)));
-        doc.add(new BinaryDocValuesField("cf" + j, toBytes(val * 2)));
+        doc.addBinary("f" + j, toBytes(val));
+        doc.addBinary("cf" + j, toBytes(val * 2));
       }
       writer.addDocument(doc);
     }
@@ -1172,8 +1217,11 @@
       int field = random.nextInt(numBinaryFields);
       Term updateTerm = new Term("upd", RandomPicks.randomFrom(random, updateTerms));
       long value = random.nextInt();
-      writer.updateDocValues(updateTerm, new BinaryDocValuesField("f" + field, toBytes(value)), 
-          new BinaryDocValuesField("cf" + field, toBytes(value * 2)));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addBinary("f" + field, toBytes(value));
+      update.addBinary("cf" + field, toBytes(value*2));
+      writer.updateDocValues(updateTerm, update);
     }
 
     writer.close();
@@ -1198,12 +1246,16 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("upd");
+    fieldTypes.disableSorting("f1");
+    fieldTypes.disableSorting("f2");
     
-    Document doc = new Document();
-    doc.add(new StringField("upd", "t1", Store.NO));
-    doc.add(new StringField("upd", "t2", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
-    doc.add(new BinaryDocValuesField("f2", toBytes(1L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("upd", "t1");
+    doc.addAtom("upd", "t2");
+    doc.addBinary("f1", toBytes(1L));
+    doc.addBinary("f2", toBytes(1L));
     writer.addDocument(doc);
     writer.updateBinaryDocValue(new Term("upd", "t1"), "f1", toBytes(2L)); // update f1 to 2
     writer.updateBinaryDocValue(new Term("upd", "t1"), "f2", toBytes(2L)); // update f2 to 2
@@ -1224,10 +1276,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f1");
+
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc");
+    doc.addBinary("f1", toBytes(1L));
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.commit();
@@ -1248,10 +1302,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f1");
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc", Store.NO));
-    doc.add(new BinaryDocValuesField("f1", toBytes(1L)));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc");
+    doc.addBinary("f1", toBytes(1L));
     writer.addDocument(doc);
     // update w/ multiple nonexisting terms in same field
     writer.updateBinaryDocValue(new Term("c", "foo"), "f1", toBytes(2L));
@@ -1277,8 +1333,10 @@
     conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
     conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("val");
     for (int i = 0; i < 100; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
     }
     writer.commit();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java
index 9953fc7..de1ce4f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java
@@ -20,9 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -44,11 +41,9 @@
       bytes.bytes[0] = (byte) i;
       bytes.bytes[1] = (byte) (255 - i);
       bytes.length = 2;
-      Document doc = new Document();
-      FieldType customType = new FieldType();
-      customType.setStored(true);
-      doc.add(new Field("id", "" + i, customType));
-      doc.add(new TextField("bytes", tokenStream));
+      Document doc = iw.newDocument();
+      doc.addStoredString("id", "" + i);
+      doc.addLargeText("bytes", tokenStream);
       iw.addDocument(doc);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java
index fca3281..347bc2c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java
@@ -17,23 +17,21 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.ByteArrayOutputStream;
+import java.io.IOException;
 import java.io.PrintStream;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.List;
 
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class TestCheckIndex extends LuceneTestCase {
 
@@ -41,13 +39,15 @@
     Directory dir = newDirectory();
     IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                  .setMaxBufferedDocs(2));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+
     for(int i=0;i<19;i++) {
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      doc.add(newField("field", "aaa"+i, customType));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", "aaa"+i);
       writer.addDocument(doc);
     }
     writer.forceMerge(1);
@@ -104,15 +104,15 @@
   public void testBogusTermVectors() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    Field field = new Field("foo", "", ft);
-    field.setTokenStream(new CannedTokenStream(
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("foo");
+    fieldTypes.enableTermVectorOffsets("foo");
+    fieldTypes.disableHighlighting("foo");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", new CannedTokenStream(
         new Token("bar", 5, 10), new Token("bar", 1, 4)
     ));
-    doc.add(field);
     iw.addDocument(doc);
     iw.close();
     dir.close(); // checkindex
@@ -121,7 +121,7 @@
   public void testObtainsLock() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     
     // keep IW open...
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java
index 546a6bd..d1416ab 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -33,8 +32,8 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
     int numDocs = atLeast(100);
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
-      doc.add(newField("foo", "bar", TextField.TYPE_NOT_STORED));
+      Document doc = w.newDocument();
+      doc.addLargeText("foo", "bar");
       w.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 5ae4d89..5af8a75 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -29,8 +29,6 @@
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -800,8 +798,8 @@
     // we don't need many documents to assert this, but don't use one document either
     int numDocs = atLeast(random, 50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("f", "doc", Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("f", "doc");
       writer.addDocument(doc);
     }
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
index b9b1f22..942d70b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -25,9 +25,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
@@ -94,9 +92,6 @@
         });
     }
     IndexWriter writer = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    doc.add(idField);
 
     outer:
     for(int i=0;i<10;i++) {
@@ -105,10 +100,13 @@
       }
 
       for(int j=0;j<20;j++) {
-        idField.setStringValue(Integer.toString(i*20+j));
+        Document doc = writer.newDocument();
+        doc.addInt("id", i*20+j);
         writer.addDocument(doc);
       }
 
+      Document doc = writer.newDocument();
+      doc.addInt("id", i*20+19);
       // must cycle here because sometimes the merge flushes
       // the doc we just added and so there's nothing to
       // flush, and we don't hit the exception
@@ -150,15 +148,14 @@
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random()))
                                                       .setMergePolicy(mp));
 
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    doc.add(idField);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     for(int i=0;i<10;i++) {
       if (VERBOSE) {
         System.out.println("\nTEST: cycle");
       }
       for(int j=0;j<100;j++) {
-        idField.setStringValue(Integer.toString(i*100+j));
+        Document doc = writer.newDocument();
+        doc.addUniqueInt("id", i*100+j);
         writer.addDocument(doc);
       }
 
@@ -167,7 +164,7 @@
         if (VERBOSE) {
           System.out.println("TEST: del " + delID);
         }
-        writer.deleteDocuments(new Term("id", ""+delID));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", delID));
         delID += 10;
       }
 
@@ -197,11 +194,14 @@
       }
 
       for(int j=0;j<21;j++) {
-        Document doc = new Document();
-        doc.add(newTextField("content", "a b c", Field.Store.NO));
+        Document doc = writer.newDocument();
+        doc.addLargeText("content", "a b c");
         writer.addDocument(doc);
       }
         
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter + " now close");
+      }
       writer.close();
       TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");
 
@@ -217,9 +217,6 @@
 
   public void testNoWaitClose() throws IOException {
     Directory directory = newDirectory();
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    doc.add(idField);
 
     IndexWriter writer = new IndexWriter(
         directory,
@@ -229,25 +226,27 @@
             setMergePolicy(newLogMergePolicy(100)).
             setCommitOnClose(false)
     );
+    FieldTypes fieldTypes = writer.getFieldTypes();
 
     int numIters = TEST_NIGHTLY ? 10 : 3;
     for(int iter=0;iter<numIters;iter++) {
 
       for(int j=0;j<201;j++) {
-        idField.setStringValue(Integer.toString(iter*201+j));
+        Document doc = writer.newDocument();
+        doc.addUniqueInt("id",iter*201+j);
         writer.addDocument(doc);
       }
 
       int delID = iter*201;
       for(int j=0;j<20;j++) {
-        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", delID));
         delID += 5;
       }
 
       // Force a bunch of merge threads to kick off so we
       // stress out aborting them on close:
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
-      writer.addDocument(doc);
+      writer.addDocument(writer.newDocument());
       writer.commit();
 
       try {
@@ -333,8 +332,8 @@
     tmp.setSegmentsPerTier(2);
 
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(newField("field", "field", TextField.TYPE_NOT_STORED));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "field");
     while(enoughMergesWaiting.getCount() != 0 && !failed.get()) {
       for(int i=0;i<10;i++) {
         w.addDocument(doc);
@@ -379,13 +378,15 @@
       iwc.setCodec(TestUtil.alwaysPostingsFormat(TestUtil.getDefaultPostingsFormat()));
     }
     IndexWriter w = new IndexWriter(d, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+
     for(int i=0;i<1000;i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
       w.addDocument(doc);
 
       if (random().nextBoolean()) {
-        w.deleteDocuments(new Term("id", ""+random().nextInt(i+1)));
+        w.deleteDocuments(fieldTypes.newIntTerm("id", random().nextInt(i+1)));
       }
     }
     atLeastOneMerge.await();
@@ -455,7 +456,7 @@
     IndexWriter w = new IndexWriter(d, iwc);
     // Makes 100 segments
     for(int i=0;i<200;i++) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
     }
 
     // No merges should have run so far, because TMP has high segmentsPerTier:
@@ -468,7 +469,7 @@
 
     // Makes another 100 segments
     for(int i=0;i<200;i++) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
     }
 
     ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).setMaxMergesAndThreads(1, 1);
@@ -494,7 +495,7 @@
         }
       });
     IndexWriter w = new IndexWriter(dir, iwc);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.forceMerge(1);
     assertTrue(wasCalled.get());
 
@@ -529,12 +530,12 @@
 
     final IndexWriter w = new IndexWriter(dir, iwc);
     
-    w.addDocument(new Document());
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
+    w.addDocument(w.newDocument());
     // flush
 
-    w.addDocument(new Document());
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
+    w.addDocument(w.newDocument());
     // flush + merge
 
     // Wait for merge to kick off
@@ -544,13 +545,13 @@
       @Override
       public void run() {
         try {
-          w.addDocument(new Document());
-          w.addDocument(new Document());
+          w.addDocument(w.newDocument());
+          w.addDocument(w.newDocument());
           // flush
 
-          w.addDocument(new Document());
+          w.addDocument(w.newDocument());
           // W/o the fix for LUCENE-6094 we would hang forever here:
-          w.addDocument(new Document());
+          w.addDocument(w.newDocument());
           // flush + merge
           
           // Now allow first merge to finish:
@@ -583,12 +584,12 @@
     iwc.setMergePolicy(lmp);
 
     IndexWriter w = new IndexWriter(dir, iwc);
-    w.addDocument(new Document());
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
+    w.addDocument(w.newDocument());
     // flush
 
-    w.addDocument(new Document());
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
+    w.addDocument(w.newDocument());
     // flush + merge
 
     // CMS should have now set true values:
@@ -650,8 +651,8 @@
     iwc.setMaxBufferedDocs(2);
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<1000;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("field", ""+i, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addAtom("field", ""+i);
       w.addDocument(doc);
     }
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java b/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
index dd4e3bb..53bbed3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
@@ -21,11 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FailOnNonBulkMergesInfoStream;
 import org.apache.lucene.util.LuceneTestCase;
@@ -34,91 +30,29 @@
 public class TestConsistentFieldNumbers extends LuceneTestCase {
 
   @Test
-  public void testSameFieldNumbersAcrossSegments() throws Exception {
-    for (int i = 0; i < 2; i++) {
-      Directory dir = newDirectory();
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                                   .setMergePolicy(NoMergePolicy.INSTANCE));
-
-      Document d1 = new Document();
-      d1.add(new StringField("f1", "first field", Field.Store.YES));
-      d1.add(new StringField("f2", "second field", Field.Store.YES));
-      writer.addDocument(d1);
-
-      if (i == 1) {
-        writer.close();
-        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                         .setMergePolicy(NoMergePolicy.INSTANCE));
-      } else {
-        writer.commit();
-      }
-
-      Document d2 = new Document();
-      FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-      customType2.setStoreTermVectors(true);
-      d2.add(new TextField("f2", "second field", Field.Store.NO));
-      d2.add(new Field("f1", "first field", customType2));
-      d2.add(new TextField("f3", "third field", Field.Store.NO));
-      d2.add(new TextField("f4", "fourth field", Field.Store.NO));
-      writer.addDocument(d2);
-
-      writer.close();
-
-      SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
-      assertEquals(2, sis.size());
-
-      FieldInfos fis1 = IndexWriter.readFieldInfos(sis.info(0));
-      FieldInfos fis2 = IndexWriter.readFieldInfos(sis.info(1));
-
-      assertEquals("f1", fis1.fieldInfo(0).name);
-      assertEquals("f2", fis1.fieldInfo(1).name);
-      assertEquals("f1", fis2.fieldInfo(0).name);
-      assertEquals("f2", fis2.fieldInfo(1).name);
-      assertEquals("f3", fis2.fieldInfo(2).name);
-      assertEquals("f4", fis2.fieldInfo(3).name);
-
-      writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      writer.forceMerge(1);
-      writer.close();
-
-      sis = SegmentInfos.readLatestCommit(dir);
-      assertEquals(1, sis.size());
-
-      FieldInfos fis3 = IndexWriter.readFieldInfos(sis.info(0));
-
-      assertEquals("f1", fis3.fieldInfo(0).name);
-      assertEquals("f2", fis3.fieldInfo(1).name);
-      assertEquals("f3", fis3.fieldInfo(2).name);
-      assertEquals("f4", fis3.fieldInfo(3).name);
-
-
-      dir.close();
-    }
-  }
-
-  @Test
   public void testAddIndexes() throws Exception {
     Directory dir1 = newDirectory();
     Directory dir2 = newDirectory();
     IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig(new MockAnalyzer(random()))
                                                  .setMergePolicy(NoMergePolicy.INSTANCE));
 
-    Document d1 = new Document();
-    d1.add(new TextField("f1", "first field", Field.Store.YES));
-    d1.add(new TextField("f2", "second field", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document d1 = writer.newDocument();
+    d1.addLargeText("f1", "first field");
+    d1.addLargeText("f2", "second field");
     writer.addDocument(d1);
 
     writer.close();
     writer = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random()))
                                      .setMergePolicy(NoMergePolicy.INSTANCE));
 
-    Document d2 = new Document();
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setStoreTermVectors(true);
-    d2.add(new TextField("f2", "second field", Field.Store.YES));
-    d2.add(new Field("f1", "first field", customType2));
-    d2.add(new TextField("f3", "third field", Field.Store.YES));
-    d2.add(new TextField("f4", "fourth field", Field.Store.YES));
+    fieldTypes = writer.getFieldTypes();
+    Document d2 = writer.newDocument();
+    fieldTypes.enableTermVectors("f1");
+    d2.addLargeText("f2", "second field");
+    d2.addLargeText("f1", "first field");
+    d2.addLargeText("f3", "third field");
+    d2.addLargeText("f4", "fourth field");
     writer.addDocument(d2);
 
     writer.close();
@@ -153,9 +87,12 @@
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                     .setMergePolicy(NoMergePolicy.INSTANCE));
-        Document d = new Document();
-        d.add(new TextField("f1", "d1 first field", Field.Store.YES));
-        d.add(new TextField("f2", "d1 second field", Field.Store.YES));
+        FieldTypes fieldTypes = writer.getFieldTypes();
+        fieldTypes.disableExistsFilters();
+
+        Document d = writer.newDocument();
+        d.addLargeText("f1", "d1 first field");
+        d.addLargeText("f2", "d1 second field");
         writer.addDocument(d);
         writer.close();
         SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
@@ -169,9 +106,9 @@
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                     .setMergePolicy(NoMergePolicy.INSTANCE));
-        Document d = new Document();
-        d.add(new TextField("f1", "d2 first field", Field.Store.YES));
-        d.add(new StoredField("f3", new byte[] { 1, 2, 3 }));
+        Document d = writer.newDocument();
+        d.addLargeText("f1", "d2 first field");
+        d.addStoredBinary("f3", new byte[] { 1, 2, 3 });
         writer.addDocument(d);
         writer.close();
         SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
@@ -188,10 +125,10 @@
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                     .setMergePolicy(NoMergePolicy.INSTANCE));
-        Document d = new Document();
-        d.add(new TextField("f1", "d3 first field", Field.Store.YES));
-        d.add(new TextField("f2", "d3 second field", Field.Store.YES));
-        d.add(new StoredField("f3", new byte[] { 1, 2, 3, 4, 5 }));
+        Document d = writer.newDocument();
+        d.addLargeText("f1", "d3 first field");
+        d.addLargeText("f2", "d3 second field");
+        d.addStoredBinary("f3", new byte[] { 1, 2, 3, 4, 5 });
         writer.addDocument(d);
         writer.close();
         SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
@@ -249,16 +186,27 @@
 
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    for(int i=0;i<MAX_FIELDS;i++) {
+      fieldTypes.setMultiValued(""+i);
+    }
 
     for (int i = 0; i < NUM_DOCS; i++) {
-      Document d = new Document();
+      Document d = writer.newDocument();
       for (int j = 0; j < docs[i].length; j++) {
-        d.add(getField(docs[i][j]));
+        addField(fieldTypes, d, docs[i][j]);
       }
 
       writer.addDocument(d);
     }
 
+    Document d = writer.newDocument();
+
+    for(int i=0;i<MAX_FIELDS;i++) {
+      addField(fieldTypes, d, i);
+    }
+
     writer.forceMerge(1);
     writer.close();
 
@@ -267,7 +215,7 @@
       FieldInfos fis = IndexWriter.readFieldInfos(si);
 
       for (FieldInfo fi : fis) {
-        Field expected = getField(Integer.parseInt(fi.name));
+        IndexableField expected = d.getField(fi.name);
         assertEquals(expected.fieldType().indexOptions(), fi.getIndexOptions());
         assertEquals(expected.fieldType().storeTermVectors(), fi.hasVectors());
       }
@@ -276,95 +224,96 @@
     dir.close();
   }
 
-  private Field getField(int number) {
-    int mode = number % 16;
+  private void addField(FieldTypes fieldTypes, Document d, int number) {
     String fieldName = "" + number;
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setTokenized(false);
-    
-    FieldType customType3 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType3.setTokenized(false);
-    
-    FieldType customType4 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType4.setTokenized(false);
-    customType4.setStoreTermVectors(true);
-    customType4.setStoreTermVectorOffsets(true);
-    
-    FieldType customType5 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType5.setStoreTermVectors(true);
-    customType5.setStoreTermVectorOffsets(true);
 
-    FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-    customType6.setTokenized(false);
-    customType6.setStoreTermVectors(true);
-    customType6.setStoreTermVectorOffsets(true);
-
-    FieldType customType7 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType7.setTokenized(false);
-    customType7.setStoreTermVectors(true);
-    customType7.setStoreTermVectorOffsets(true);
-
-    FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-    customType8.setTokenized(false);
-    customType8.setStoreTermVectors(true);
-    customType8.setStoreTermVectorPositions(true);
-
-    FieldType customType9 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType9.setStoreTermVectors(true);
-    customType9.setStoreTermVectorPositions(true);
-
-    FieldType customType10 = new FieldType(TextField.TYPE_STORED);
-    customType10.setTokenized(false);
-    customType10.setStoreTermVectors(true);
-    customType10.setStoreTermVectorPositions(true);
-
-    FieldType customType11 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType11.setTokenized(false);
-    customType11.setStoreTermVectors(true);
-    customType11.setStoreTermVectorPositions(true);
-
-    FieldType customType12 = new FieldType(TextField.TYPE_STORED);
-    customType12.setStoreTermVectors(true);
-    customType12.setStoreTermVectorOffsets(true);
-    customType12.setStoreTermVectorPositions(true);
-
-    FieldType customType13 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType13.setStoreTermVectors(true);
-    customType13.setStoreTermVectorOffsets(true);
-    customType13.setStoreTermVectorPositions(true);
-
-    FieldType customType14 = new FieldType(TextField.TYPE_STORED);
-    customType14.setTokenized(false);
-    customType14.setStoreTermVectors(true);
-    customType14.setStoreTermVectorOffsets(true);
-    customType14.setStoreTermVectorPositions(true);
-
-    FieldType customType15 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType15.setTokenized(false);
-    customType15.setStoreTermVectors(true);
-    customType15.setStoreTermVectorOffsets(true);
-    customType15.setStoreTermVectorPositions(true);
-    
+    int mode = number % 16;
     switch (mode) {
-      case 0: return new Field(fieldName, "some text", customType);
-      case 1: return new TextField(fieldName, "some text", Field.Store.NO);
-      case 2: return new Field(fieldName, "some text", customType2);
-      case 3: return new Field(fieldName, "some text", customType3);
-      case 4: return new Field(fieldName, "some text", customType4);
-      case 5: return new Field(fieldName, "some text", customType5);
-      case 6: return new Field(fieldName, "some text", customType6);
-      case 7: return new Field(fieldName, "some text", customType7);
-      case 8: return new Field(fieldName, "some text", customType8);
-      case 9: return new Field(fieldName, "some text", customType9);
-      case 10: return new Field(fieldName, "some text", customType10);
-      case 11: return new Field(fieldName, "some text", customType11);
-      case 12: return new Field(fieldName, "some text", customType12);
-      case 13: return new Field(fieldName, "some text", customType13);
-      case 14: return new Field(fieldName, "some text", customType14);
-      case 15: return new Field(fieldName, "some text", customType15);
-      default: return null;
+    case 0:
+      d.addStoredString(fieldName, "some text");
+      return;
+    case 1:
+      d.addLargeText(fieldName, "some text");
+      return;
+    case 2:
+      d.addAtom(fieldName, "some text");
+      return;
+    case 3:
+      fieldTypes.disableStored(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 4:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 5:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addLargeText(fieldName, "some text");
+      return;
+    case 6:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 7:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 8:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 9:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      d.addLargeText(fieldName, "some text");
+      return;
+    case 10:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 11:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 12:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addLargeText(fieldName, "some text");
+      return;
+    case 13:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addLargeText(fieldName, "some text");
+      return;
+    case 14:
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    case 15:
+      fieldTypes.disableStored(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      d.addAtom(fieldName, "some text");
+      return;
+    default:
+      assert false;
     }
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCrash.java b/lucene/core/src/test/org/apache/lucene/index/TestCrash.java
index 951aa7d..3c7dbf9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCrash.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCrash.java
@@ -22,7 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NoLockFactory;
@@ -42,11 +41,12 @@
       writer.commit();
     }
     
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
-    doc.add(newTextField("id", "0", Field.Store.NO));
-    for(int i=0;i<157;i++)
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addLargeText("id", "0");
+    for(int i=0;i<157;i++) {
       writer.addDocument(doc);
+    }
 
     return writer;
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
index 407c077..1b3a0ad 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
@@ -22,7 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -66,12 +65,12 @@
     IndexWriter indexWriter = new IndexWriter(crashAfterCreateOutput,
                                               newIndexWriterConfig(new MockAnalyzer(random())));
             
-    indexWriter.addDocument(getDocument());
+    indexWriter.addDocument(getDocument(indexWriter));
     // writes segments_1:
     indexWriter.commit();
             
     crashAfterCreateOutput.setCrashAfterCreateOutput("pending_segments_2");
-    indexWriter.addDocument(getDocument());
+    indexWriter.addDocument(getDocument(indexWriter));
     try {
       // tries to write segments_2 but hits fake exc:
       indexWriter.commit();
@@ -99,7 +98,7 @@
             
     // currently the test fails above.
     // however, to test the fix, the following lines should pass as well.
-    indexWriter.addDocument(getDocument());
+    indexWriter.addDocument(getDocument(indexWriter));
     indexWriter.close();
     assertFalse(slowFileExists(realDirectory, "segments_2"));
     realDirectory.close();
@@ -124,9 +123,9 @@
   /**
    * Gets a document with content "my dog has fleas".
    */
-  private Document getDocument() {
-    Document document = new Document();
-    document.add(newTextField(TEXT_FIELD, "my dog has fleas", Field.Store.NO));
+  private Document getDocument(IndexWriter w) {
+    Document document = w.newDocument();
+    document.addLargeText(TEXT_FIELD, "my dog has fleas");
     return document;
   }
     
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
index fe324f9..0e5df82 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
@@ -20,8 +20,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -49,17 +48,13 @@
     Similarity provider = new MySimProvider();
     config.setSimilarity(provider);
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    final LineFileDocs docs = new LineFileDocs(random());
+    final LineFileDocs docs = new LineFileDocs(writer.w, random());
     int num = atLeast(100);
     for (int i = 0; i < num; i++) {
       Document doc = docs.nextDoc();
       float nextFloat = random().nextFloat();
-      Field f = new TextField(floatTestField, "" + nextFloat, Field.Store.YES);
-      f.setBoost(nextFloat);
-
-      doc.add(f);
+      doc.addLargeText(floatTestField, "" + nextFloat, nextFloat);
       writer.addDocument(doc);
-      doc.removeField(floatTestField);
       if (rarely()) {
         writer.commit();
       }
@@ -70,8 +65,45 @@
     NumericDocValues norms = open.getNormValues(floatTestField);
     assertNotNull(norms);
     for (int i = 0; i < open.maxDoc(); i++) {
-      StoredDocument document = open.document(i);
-      float expected = Float.parseFloat(document.get(floatTestField));
+      Document document = open.document(i);
+      float expected = Float.parseFloat(document.getString(floatTestField));
+      assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
+    }
+    open.close();
+    dir.close();
+    docs.close();
+  }
+
+  // Use FieldTypes.setSimilarity:
+  public void testPerFieldFloatNorms() throws IOException {
+
+    Directory dir = newDirectory();
+    MockAnalyzer analyzer = new MockAnalyzer(random());
+    analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
+
+    IndexWriterConfig config = newIndexWriterConfig(analyzer);
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setSimilarity(floatTestField, new MySimProvider());
+    final LineFileDocs docs = new LineFileDocs(writer.w, random());
+    int num = atLeast(100);
+    for (int i = 0; i < num; i++) {
+      Document doc = docs.nextDoc();
+      float nextFloat = random().nextFloat();
+      doc.addLargeText(floatTestField, "" + nextFloat, nextFloat);
+      writer.addDocument(doc);
+      if (rarely()) {
+        writer.commit();
+      }
+    }
+    writer.commit();
+    writer.close();
+    LeafReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
+    NumericDocValues norms = open.getNormValues(floatTestField);
+    assertNotNull(norms);
+    for (int i = 0; i < open.maxDoc(); i++) {
+      Document document = open.document(i);
+      float expected = Float.parseFloat(document.getString(floatTestField));
       assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
     }
     open.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
index 0a20e5f..2690a9f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -27,7 +27,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -656,6 +656,8 @@
         mp = conf.getMergePolicy();
         mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
         writer = new IndexWriter(dir, conf);
+        FieldTypes fieldTypes = writer.getFieldTypes();
+
         policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
         for(int j=0;j<17;j++) {
           addDocWithID(writer, i*(N+1)+j);
@@ -667,7 +669,7 @@
           .setMergePolicy(NoMergePolicy.INSTANCE);
         writer = new IndexWriter(dir, conf);
         policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-        writer.deleteDocuments(new Term("id", "" + (i*(N+1)+3)));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", (i*(N+1)+3)));
         // this is a commit
         writer.close();
         IndexReader reader = DirectoryReader.open(dir);
@@ -737,16 +739,15 @@
   }
 
   private void addDocWithID(IndexWriter writer, int id) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
-    doc.add(newStringField("id", "" + id, Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addUniqueInt("id", id);
     writer.addDocument(doc);
   }
   
-  private void addDoc(IndexWriter writer) throws IOException
-  {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
deleted file mode 100644
index 4dc82a1..0000000
--- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
+++ /dev/null
@@ -1,1369 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.MockDirectoryWrapper;
-import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.TestUtil;
-
-// TODO:
-//   - old parallel indices are only pruned on commit/close; can we do it on refresh?
-
-/** Simple example showing how to use ParallelLeafReader to index new
- *  stuff (postings, DVs, etc.) from previously stored fields, on the
- *  fly (during NRT reader reopen), after the  initial indexing.  The
- *  test indexes just a single stored field with text "content X" (X is
- *  a number embedded in the text).
- *
- *  Then, on reopen, for any newly created segments (flush or merge), it
- *  builds a new parallel segment by loading all stored docs, parsing
- *  out that X, and adding it as DV and numeric indexed (trie) field.
- *
- *  Finally, for searching, it builds a top-level MultiReader, with
- *  ParallelLeafReader for each segment, and then tests that random
- *  numeric range queries, and sorting by the new DV field, work
- *  correctly.
- *
- *  Each per-segment index lives in a private directory next to the main
- *  index, and they are deleted once their segments are removed from the
- *  index.  They are "volatile", meaning if e.g. the index is replicated to
- *  another machine, it's OK to not copy parallel segments indices,
- *  since they will just be regnerated (at a cost though). */
-
-// @SuppressSysoutChecks(bugUrl="we print stuff")
-
-public class TestDemoParallelLeafReader extends LuceneTestCase {
-
-  static final boolean DEBUG = false;
-
-  static abstract class ReindexingReader implements Closeable {
-
-    /** Key used to store the current schema gen in the SegmentInfo diagnostics */
-    public final static String SCHEMA_GEN_KEY = "schema_gen";
-
-    public final IndexWriter w;
-    public final ReaderManager mgr;
-
-    private final Directory indexDir;
-    private final Path root;
-    private final Path segsPath;
-
-    /** Which segments have been closed, but their parallel index is not yet not removed. */
-    private final Set<SegmentIDAndGen> closedSegments = Collections.newSetFromMap(new ConcurrentHashMap<SegmentIDAndGen,Boolean>());
-
-    /** Holds currently open parallel readers for each segment. */
-    private final Map<SegmentIDAndGen,LeafReader> parallelReaders = new ConcurrentHashMap<>();
-
-    void printRefCounts() {
-      System.out.println("All refCounts:");
-      for(Map.Entry<SegmentIDAndGen,LeafReader> ent : parallelReaders.entrySet()) {
-        System.out.println("  " + ent.getKey() + " " + ent.getValue() + " refCount=" + ent.getValue().getRefCount());
-      }
-    }
-
-    public ReindexingReader(Path root) throws IOException {
-      this.root = root;
-
-      // Normal index is stored under "index":
-      indexDir = openDirectory(root.resolve("index"));
-
-      // Per-segment parallel indices are stored under subdirs "segs":
-      segsPath = root.resolve("segs");
-      Files.createDirectories(segsPath);
-
-      IndexWriterConfig iwc = getIndexWriterConfig();
-      iwc.setMergePolicy(new ReindexingMergePolicy(iwc.getMergePolicy()));
-      if (DEBUG) {
-        System.out.println("TEST: use IWC:\n" + iwc);
-      }
-      w = new IndexWriter(indexDir, iwc);
-
-      w.getConfig().setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
-          @Override
-          public void warm(LeafReader reader) throws IOException {
-            // This will build the parallel index for the merged segment before the merge becomes visible, so reopen delay is only due to
-            // newly flushed segments:
-            if (DEBUG) System.out.println(Thread.currentThread().getName() +": TEST: now warm " + reader);
-            // TODO: it's not great that we pass false here; it means we close the reader & reopen again for NRT reader; still we did "warm" by
-            // building the parallel index, if necessary
-            getParallelLeafReader(reader, false, getCurrentSchemaGen());
-          }
-        });
-
-      // start with empty commit:
-      w.commit();
-      mgr = new ReaderManager(new ParallelLeafDirectoryReader(DirectoryReader.open(w, true)));
-    }
-
-    protected abstract IndexWriterConfig getIndexWriterConfig() throws IOException;
-
-    /** Optional method to validate that the provided parallell reader in fact reflects the changes in schemaGen. */
-    protected void checkParallelReader(LeafReader reader, LeafReader parallelReader, long schemaGen) throws IOException {
-    }
-
-    /** Override to customize Directory impl. */
-    protected Directory openDirectory(Path path) throws IOException {
-      return FSDirectory.open(path);
-    }
-
-    public void commit() throws IOException {
-      w.commit();
-    }
-    
-    LeafReader getCurrentReader(LeafReader reader, long schemaGen) throws IOException {
-      LeafReader parallelReader = getParallelLeafReader(reader, true, schemaGen);
-      if (parallelReader != null) {
-
-        // We should not be embedding one ParallelLeafReader inside another:
-        assertFalse(parallelReader instanceof ParallelLeafReader);
-        assertFalse(reader instanceof ParallelLeafReader);
-
-        // NOTE: important that parallelReader is first, so if there are field name overlaps, because changes to the schema
-        // overwrote existing field names, it wins:
-        LeafReader newReader = new ParallelLeafReader(false, parallelReader, reader) {
-          @Override
-          public Bits getLiveDocs() {
-            return getParallelReaders()[1].getLiveDocs();
-          }
-          @Override
-          public int numDocs() {
-            return getParallelReaders()[1].numDocs();
-          }
-        };
-
-        // Because ParallelLeafReader does its own (extra) incRef:
-        parallelReader.decRef();
-
-        return newReader;
-
-      } else {
-        // This segment was already current as of currentSchemaGen:
-        return reader;
-      }
-    }
-
-    private class ParallelLeafDirectoryReader extends FilterDirectoryReader {
-      public ParallelLeafDirectoryReader(DirectoryReader in) {
-        super(in, new FilterDirectoryReader.SubReaderWrapper() {
-            final long currentSchemaGen = getCurrentSchemaGen();
-            @Override
-            public LeafReader wrap(LeafReader reader) {
-              try {
-                return getCurrentReader(reader, currentSchemaGen);
-              } catch (IOException ioe) {
-                // TODO: must close on exc here:
-                throw new RuntimeException(ioe);
-              }
-            }
-          });
-      }
-
-      @Override
-      protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
-        return new ParallelLeafDirectoryReader(in);
-      }
-
-      @Override
-      protected void doClose() throws IOException {
-        Throwable firstExc = null;
-        for (final LeafReader r : getSequentialSubReaders()) {
-          if (r instanceof ParallelLeafReader) {
-            // try to close each reader, even if an exception is thrown
-            try {
-              r.decRef();
-            } catch (Throwable t) {
-              if (firstExc == null) {
-                firstExc = t;
-              }
-            }
-          }
-        }
-        // Also close in, so it decRef's the SegmentInfos
-        try {
-          in.doClose();
-        } catch (Throwable t) {
-          if (firstExc == null) {
-            firstExc = t;
-          }
-        }
-        // throw the first exception
-        IOUtils.reThrow(firstExc);
-      }
-    }
-
-    @Override
-    public void close() throws IOException {
-      w.close();
-      if (DEBUG) System.out.println("TEST: after close writer index=" + SegmentInfos.readLatestCommit(indexDir).toString(indexDir));
-
-      /*
-      DirectoryReader r = mgr.acquire();
-      try {
-        TestUtil.checkReader(r);
-      } finally {
-        mgr.release(r);
-      }
-      */
-      mgr.close();
-      pruneOldSegments(true);
-      assertNoExtraSegments();
-      indexDir.close();
-    }
-
-    // Make sure we deleted all parallel indices for segments that are no longer in the main index: 
-    private void assertNoExtraSegments() throws IOException {
-      Set<String> liveIDs = new HashSet<String>();
-      for(SegmentCommitInfo info : SegmentInfos.readLatestCommit(indexDir)) {
-        String idString = StringHelper.idToString(info.info.getId());
-        liveIDs.add(idString);
-      }
-
-      // At this point (closing) the only segments in closedSegments should be the still-live ones:
-      for(SegmentIDAndGen segIDGen : closedSegments) {
-        assertTrue(liveIDs.contains(segIDGen.segID));
-      }
-
-      boolean fail = false;
-      try (DirectoryStream<Path> stream = Files.newDirectoryStream(segsPath)) {
-          for (Path path : stream) {
-            SegmentIDAndGen segIDGen = new SegmentIDAndGen(path.getFileName().toString());
-            if (liveIDs.contains(segIDGen.segID) == false) {
-              if (DEBUG) System.out.println("TEST: fail seg=" + path.getFileName() + " is not live but still has a parallel index");
-              fail = true;
-            }
-          }
-        }
-      assertFalse(fail);
-    }
-
-    private static class SegmentIDAndGen {
-      public final String segID;
-      public final long schemaGen;
-
-      public SegmentIDAndGen(String segID, long schemaGen) {
-        this.segID = segID;
-        this.schemaGen = schemaGen;
-      }
-
-      public SegmentIDAndGen(String s) {
-        String[] parts = s.split("_");
-        if (parts.length != 2) {
-          throw new IllegalArgumentException("invalid SegmentIDAndGen \"" + s + "\"");
-        }
-        // TODO: better checking of segID?
-        segID = parts[0];
-        schemaGen = Long.parseLong(parts[1]);
-      }
-
-      @Override
-      public int hashCode() {
-        return (int) (segID.hashCode() * schemaGen);
-      }
-
-      @Override
-      public boolean equals(Object _other) {
-        if (_other instanceof SegmentIDAndGen) {
-          SegmentIDAndGen other = (SegmentIDAndGen) _other;
-          return segID.equals(other.segID) && schemaGen == other.schemaGen;
-        } else {
-          return false;
-        }
-      }
-
-      @Override
-      public String toString() {
-        return segID + "_" + schemaGen;
-      }
-    }
-
-    private class ParallelReaderClosed implements LeafReader.ReaderClosedListener {
-      private final SegmentIDAndGen segIDGen;
-      private final Directory dir;
-
-      public ParallelReaderClosed(SegmentIDAndGen segIDGen, Directory dir) {
-        this.segIDGen = segIDGen;
-        this.dir = dir;
-      }
-
-      @Override
-      public void onClose(IndexReader ignored) {
-        try {
-          // TODO: make this sync finer, i.e. just the segment + schemaGen
-          synchronized(ReindexingReader.this) {
-            if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now close parallel parLeafReader dir=" + dir + " segIDGen=" + segIDGen);
-            parallelReaders.remove(segIDGen);
-            dir.close();
-            closedSegments.add(segIDGen);
-          }
-        } catch (IOException ioe) {
-          System.out.println("TEST: hit IOExc closing dir=" + dir);
-          ioe.printStackTrace(System.out);
-          throw new RuntimeException(ioe);
-        }
-      }
-    }
-
-    // Returns a ref
-    LeafReader getParallelLeafReader(final LeafReader leaf, boolean doCache, long schemaGen) throws IOException {
-      assert leaf instanceof SegmentReader;
-      SegmentInfo info = ((SegmentReader) leaf).getSegmentInfo().info;
-
-      long infoSchemaGen = getSchemaGen(info);
-
-      if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: getParallelLeafReader: " + leaf + " infoSchemaGen=" + infoSchemaGen + " vs schemaGen=" + schemaGen + " doCache=" + doCache);
-
-      if (infoSchemaGen == schemaGen) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: segment is already current schemaGen=" + schemaGen + "; skipping");
-        return null;
-      }
-
-      if (infoSchemaGen > schemaGen) {
-        throw new IllegalStateException("segment infoSchemaGen (" + infoSchemaGen + ") cannot be greater than requested schemaGen (" + schemaGen + ")");
-      }
-
-      final SegmentIDAndGen segIDGen = new SegmentIDAndGen(StringHelper.idToString(info.getId()), schemaGen);
-
-      // While loop because the parallel reader may be closed out from under us, so we must retry:
-      while (true) {
-
-        // TODO: make this sync finer, i.e. just the segment + schemaGen
-        synchronized (this) {
-          LeafReader parReader = parallelReaders.get(segIDGen);
-      
-          assert doCache || parReader == null;
-
-          if (parReader == null) {
-
-            Path leafIndex = segsPath.resolve(segIDGen.toString());
-
-            final Directory dir = openDirectory(leafIndex);
-
-            if (Files.exists(leafIndex.resolve("done")) == false) {
-              if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: build segment index for " + leaf + " " + segIDGen + " (source: " + info.getDiagnostics().get("source") + ") dir=" + leafIndex);
-
-              if (dir.listAll().length != 0) {
-                // It crashed before finishing last time:
-                if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: remove old incomplete index files: " + leafIndex);
-                IOUtils.rm(leafIndex);
-              }
-
-              reindex(infoSchemaGen, schemaGen, leaf, dir);
-
-              // Marker file, telling us this index is in fact done.  This way if we crash while doing the reindexing for a given segment, we will
-              // later try again:
-              dir.createOutput("done", IOContext.DEFAULT).close();
-            } else {
-              if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: segment index already exists for " + leaf + " " + segIDGen + " (source: " + info.getDiagnostics().get("source") + ") dir=" + leafIndex);
-            }
-
-            if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check index " + dir);
-            //TestUtil.checkIndex(dir);
-
-            SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
-            final LeafReader parLeafReader;
-            if (infos.size() == 1) {
-              parLeafReader = new SegmentReader(infos.info(0), IOContext.DEFAULT);
-            } else {
-              // This just means we didn't forceMerge above:
-              parLeafReader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
-            }
-
-            //checkParallelReader(leaf, parLeafReader, schemaGen);
-
-            if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: opened parallel reader: " + parLeafReader);
-            if (doCache) {
-              parallelReaders.put(segIDGen, parLeafReader);
-
-              // Our id+gen could have been previously closed, e.g. if it was a merged segment that was warmed, so we must clear this else
-              // the pruning may remove our directory:
-              closedSegments.remove(segIDGen);
-
-              parLeafReader.addReaderClosedListener(new ParallelReaderClosed(segIDGen, dir));
-
-            } else {
-              // Used only for merged segment warming:
-              // Messy: we close this reader now, instead of leaving open for reuse:
-              if (DEBUG) System.out.println("TEST: now decRef non cached refCount=" + parLeafReader.getRefCount());
-              parLeafReader.decRef();
-              dir.close();
-
-              // Must do this after dir is closed, else another thread could "rm -rf" while we are closing (which makes MDW.close's
-              // checkIndex angry):
-              closedSegments.add(segIDGen);
-              parReader = null;
-            }
-            parReader = parLeafReader;
-
-          } else {
-            if (parReader.tryIncRef() == false) {
-              // We failed: this reader just got closed by another thread, e.g. refresh thread opening a new reader, so this reader is now
-              // closed and we must try again.
-              if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: tryIncRef failed for " + parReader + "; retry");
-              parReader = null;
-              continue;
-            }
-            if (DEBUG) System.out.println(Thread.currentThread().getName()+ ": TEST: use existing already opened parReader=" + parReader + " refCount=" + parReader.getRefCount());
-            //checkParallelReader(leaf, parReader, schemaGen);
-          }
-
-          // We return the new reference to caller
-          return parReader;
-        }
-      }
-    }
-
-    // TODO: we could pass a writer already opened...?
-    protected abstract void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException;
-
-    /** Returns the gen for the current schema. */
-    protected abstract long getCurrentSchemaGen();
-
-    /** Returns the gen that should be merged, meaning those changes will be folded back into the main index. */
-    protected long getMergingSchemaGen() {
-      return getCurrentSchemaGen();
-    }
-
-    /** Removes the parallel index that are no longer in the last commit point.  We can't
-     *  remove this when the parallel reader is closed because it may still be referenced by
-     *  the last commit. */
-    private void pruneOldSegments(boolean removeOldGens) throws IOException {
-      SegmentInfos lastCommit = SegmentInfos.readLatestCommit(indexDir);
-      if (DEBUG) System.out.println("TEST: prune");
-
-      Set<String> liveIDs = new HashSet<String>();
-      for(SegmentCommitInfo info : lastCommit) {
-        String idString = StringHelper.idToString(info.info.getId());
-        liveIDs.add(idString);
-      }
-
-      long currentSchemaGen = getCurrentSchemaGen();
-
-      if (Files.exists(segsPath)) {
-        try (DirectoryStream<Path> stream = Files.newDirectoryStream(segsPath)) {
-            for (Path path : stream) {
-              if (Files.isDirectory(path)) {
-                SegmentIDAndGen segIDGen = new SegmentIDAndGen(path.getFileName().toString());
-                assert segIDGen.schemaGen <= currentSchemaGen;
-                if (liveIDs.contains(segIDGen.segID) == false && (closedSegments.contains(segIDGen) || (removeOldGens && segIDGen.schemaGen < currentSchemaGen))) {
-                  if (DEBUG) System.out.println("TEST: remove " + segIDGen);
-                  try {
-                    IOUtils.rm(path);
-                    closedSegments.remove(segIDGen);
-                  } catch (IOException ioe) {
-                    // OK, we'll retry later
-                    if (DEBUG) System.out.println("TEST: ignore ioe during delete " + path + ":" + ioe);
-                  }
-                }
-              }
-            }
-          }
-      }
-    }
-
-    /** Just replaces the sub-readers with parallel readers, so reindexed fields are merged into new segments. */
-    private class ReindexingMergePolicy extends MergePolicy {
-
-      class ReindexingOneMerge extends OneMerge {
-
-        List<LeafReader> parallelReaders;
-        final long schemaGen;
-
-        ReindexingOneMerge(List<SegmentCommitInfo> segments) {
-          super(segments);
-          // Commit up front to which schemaGen we will merge; we don't want a schema change sneaking in for some of our leaf readers but not others:
-          schemaGen = getMergingSchemaGen();
-          long currentSchemaGen = getCurrentSchemaGen();
-
-          // Defensive sanity check:
-          if (schemaGen > currentSchemaGen) {
-            throw new IllegalStateException("currentSchemaGen (" + currentSchemaGen + ") must always be >= mergingSchemaGen (" + schemaGen + ")");
-          }
-        }
-
-        @Override
-        public List<CodecReader> getMergeReaders() throws IOException {
-          if (parallelReaders == null) {
-            parallelReaders = new ArrayList<>();
-            for (CodecReader reader : super.getMergeReaders()) {
-              parallelReaders.add(getCurrentReader((SegmentReader)reader, schemaGen));
-            }
-          }
-
-          // TODO: fix ParallelLeafReader, if this is a good use case
-          List<CodecReader> mergeReaders = new ArrayList<>();
-          for (LeafReader reader : parallelReaders) {
-            mergeReaders.add(SlowCodecReaderWrapper.wrap(reader));
-          }
-          return mergeReaders;
-        }
-
-        @Override
-        public void mergeFinished() throws IOException {
-          Throwable th = null;
-          for(LeafReader r : parallelReaders) {
-            if (r instanceof ParallelLeafReader) {
-              try {
-                r.decRef();
-              } catch (Throwable t) {
-                if (th == null) {
-                  th = t;
-                }
-              }
-            }
-          }
-
-          // If any error occured, throw it.
-          IOUtils.reThrow(th);
-        }
-    
-        @Override
-        public void setInfo(SegmentCommitInfo info) {
-          // Record that this merged segment is current as of this schemaGen:
-          info.info.getDiagnostics().put(SCHEMA_GEN_KEY, Long.toString(schemaGen));
-          super.setInfo(info);
-        }
-
-        @Override
-        public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
-          return super.getDocMap(mergeState);
-        }
-      }
-
-      class ReindexingMergeSpecification extends MergeSpecification {
-        @Override
-        public void add(OneMerge merge) {
-          super.add(new ReindexingOneMerge(merge.segments));
-        }
-
-        @Override
-        public String segString(Directory dir) {
-          return "ReindexingMergeSpec(" + super.segString(dir) + ")";
-        }
-      }
-
-      MergeSpecification wrap(MergeSpecification spec) {
-        MergeSpecification wrapped = null;
-        if (spec != null) {
-          wrapped = new ReindexingMergeSpecification();
-          for (OneMerge merge : spec.merges) {
-            wrapped.add(merge);
-          }
-        }
-        return wrapped;
-      }
-
-      final MergePolicy in;
-
-      /** Create a new {@code MergePolicy} that sorts documents with the given {@code sort}. */
-      public ReindexingMergePolicy(MergePolicy in) {
-        this.in = in;
-      }
-
-      @Override
-      public MergeSpecification findMerges(MergeTrigger mergeTrigger,
-                                           SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
-        return wrap(in.findMerges(mergeTrigger, segmentInfos, writer));
-      }
-
-      @Override
-      public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
-                                                 int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
-        throws IOException {
-        // TODO: do we need to force-force this?  Ie, wrapped MP may think index is already optimized, yet maybe its schemaGen is old?  need test!
-        return wrap(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer));
-      }
-
-      @Override
-      public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
-        throws IOException {
-        return wrap(in.findForcedDeletesMerges(segmentInfos, writer));
-      }
-
-      @Override
-      public boolean useCompoundFile(SegmentInfos segments,
-                                     SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
-        return in.useCompoundFile(segments, newSegment, writer);
-      }
-
-      @Override
-      public String toString() {
-        return "ReindexingMergePolicy(" + in + ")";
-      }
-    }
-
-    static long getSchemaGen(SegmentInfo info) {
-      String s = info.getDiagnostics().get(SCHEMA_GEN_KEY);
-      if (s == null) {
-        return -1;
-      } else {
-        return Long.parseLong(s);
-      }
-    }
-  }
-
-  private ReindexingReader getReindexer(Path root) throws IOException {
-    return new ReindexingReader(root) {
-      @Override
-      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-        TieredMergePolicy tmp = new TieredMergePolicy();
-        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
-        tmp.setFloorSegmentMB(.01);
-        iwc.setMergePolicy(tmp);
-        return iwc;
-      }
-
-      @Override
-      protected Directory openDirectory(Path path) throws IOException {
-        MockDirectoryWrapper dir = newMockFSDirectory(path);
-        dir.setUseSlowOpenClosers(false);
-        dir.setThrottling(Throttling.NEVER);
-        return dir;
-      }
-
-      @Override
-      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-
-        // The order of our docIDs must precisely matching incoming reader:
-        iwc.setMergePolicy(new LogByteSizeMergePolicy());
-        IndexWriter w = new IndexWriter(parallelDir, iwc);
-        int maxDoc = reader.maxDoc();
-
-        // Slowly parse the stored field into a new doc values field:
-        for(int i=0;i<maxDoc;i++) {
-          // TODO: is this still O(blockSize^2)?
-          StoredDocument oldDoc = reader.document(i);
-          Document newDoc = new Document();
-          long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
-          newDoc.add(new NumericDocValuesField("number", value));
-          newDoc.add(new LongField("number", value, Field.Store.NO));
-          w.addDocument(newDoc);
-        }
-
-        if (random().nextBoolean()) {
-          w.forceMerge(1);
-        }
-
-        w.close();
-      }
-
-      @Override
-      protected long getCurrentSchemaGen() {
-        return 0;
-      }
-    };
-  }
-
-  /** Schema change by adding a new number_<schemaGen> DV field each time. */
-  private ReindexingReader getReindexerNewDVFields(Path root, final AtomicLong currentSchemaGen) throws IOException {
-    return new ReindexingReader(root) {
-      @Override
-      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-        TieredMergePolicy tmp = new TieredMergePolicy();
-        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
-        tmp.setFloorSegmentMB(.01);
-        iwc.setMergePolicy(tmp);
-        return iwc;
-      }
-
-      @Override
-      protected Directory openDirectory(Path path) throws IOException {
-        MockDirectoryWrapper dir = newMockFSDirectory(path);
-        dir.setUseSlowOpenClosers(false);
-        dir.setThrottling(Throttling.NEVER);
-        return dir;
-      }
-
-      @Override
-      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-
-        // The order of our docIDs must precisely matching incoming reader:
-        iwc.setMergePolicy(new LogByteSizeMergePolicy());
-        IndexWriter w = new IndexWriter(parallelDir, iwc);
-        int maxDoc = reader.maxDoc();
-
-        if (oldSchemaGen <= 0) {
-          // Must slowly parse the stored field into a new doc values field:
-          for(int i=0;i<maxDoc;i++) {
-            // TODO: is this still O(blockSize^2)?
-            StoredDocument oldDoc = reader.document(i);
-            Document newDoc = new Document();
-            long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
-            newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, value));
-            newDoc.add(new LongField("number", value, Field.Store.NO));
-            w.addDocument(newDoc);
-          }
-        } else {
-          // Just carry over doc values from previous field:
-          NumericDocValues oldValues = reader.getNumericDocValues("number_" + oldSchemaGen);
-          assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
-          for(int i=0;i<maxDoc;i++) {
-            // TODO: is this still O(blockSize^2)?
-            StoredDocument oldDoc = reader.document(i);
-            Document newDoc = new Document();
-            newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, oldValues.get(i)));
-            w.addDocument(newDoc);
-          }
-        }
-
-        if (random().nextBoolean()) {
-          w.forceMerge(1);
-        }
-
-        w.close();
-      }
-
-      @Override
-      protected long getCurrentSchemaGen() {
-        return currentSchemaGen.get();
-      }
-
-      @Override
-      protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen) throws IOException {
-        String fieldName = "number_" + schemaGen;
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check parallel number DVs field=" + fieldName + " r=" + r + " parR=" + parR);
-        NumericDocValues numbers = parR.getNumericDocValues(fieldName);
-        if (numbers == null) {
-          return;
-        }
-        int maxDoc = r.maxDoc();
-        boolean failed = false;
-        for(int i=0;i<maxDoc;i++) {
-          StoredDocument oldDoc = r.document(i);
-          long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
-          if (value != numbers.get(i)) {
-            if (DEBUG) System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
-            failed = true;
-          } else if (failed) {
-            if (DEBUG) System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
-          }
-        }
-        assertFalse("FAILED field=" + fieldName + " r=" + r, failed);
-      }
-    };
-  }
-
-  /** Schema change by adding changing how the same "number" DV field is indexed. */
-  private ReindexingReader getReindexerSameDVField(Path root, final AtomicLong currentSchemaGen, final AtomicLong mergingSchemaGen) throws IOException {
-    return new ReindexingReader(root) {
-      @Override
-      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-        TieredMergePolicy tmp = new TieredMergePolicy();
-        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
-        tmp.setFloorSegmentMB(.01);
-        iwc.setMergePolicy(tmp);
-        if (TEST_NIGHTLY) {
-          // during nightly tests, we might use too many files if we arent careful
-          iwc.setUseCompoundFile(true);
-        }
-        return iwc;
-      }
-
-      @Override
-      protected Directory openDirectory(Path path) throws IOException {
-        MockDirectoryWrapper dir = newMockFSDirectory(path);
-        dir.setUseSlowOpenClosers(false);
-        dir.setThrottling(Throttling.NEVER);
-        return dir;
-      }
-
-      @Override
-      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
-        IndexWriterConfig iwc = newIndexWriterConfig();
-
-        // The order of our docIDs must precisely matching incoming reader:
-        iwc.setMergePolicy(new LogByteSizeMergePolicy());
-        IndexWriter w = new IndexWriter(parallelDir, iwc);
-        int maxDoc = reader.maxDoc();
-
-        if (oldSchemaGen <= 0) {
-          // Must slowly parse the stored field into a new doc values field:
-          for(int i=0;i<maxDoc;i++) {
-            // TODO: is this still O(blockSize^2)?
-            StoredDocument oldDoc = reader.document(i);
-            Document newDoc = new Document();
-            long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
-            newDoc.add(new NumericDocValuesField("number", newSchemaGen*value));
-            newDoc.add(new LongField("number", value, Field.Store.NO));
-            w.addDocument(newDoc);
-          }
-        } else {
-          // Just carry over doc values from previous field:
-          NumericDocValues oldValues = reader.getNumericDocValues("number");
-          assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
-          for(int i=0;i<maxDoc;i++) {
-            // TODO: is this still O(blockSize^2)?
-            StoredDocument oldDoc = reader.document(i);
-            Document newDoc = new Document();
-            newDoc.add(new NumericDocValuesField("number", newSchemaGen*(oldValues.get(i)/oldSchemaGen)));
-            w.addDocument(newDoc);
-          }
-        }
-
-        if (random().nextBoolean()) {
-          w.forceMerge(1);
-        }
-
-        w.close();
-      }
-
-      @Override
-      protected long getCurrentSchemaGen() {
-        return currentSchemaGen.get();
-      }
-
-      @Override
-      protected long getMergingSchemaGen() {
-        return mergingSchemaGen.get();
-      }
-
-      @Override
-      protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen) throws IOException {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check parallel number DVs r=" + r + " parR=" + parR);
-        NumericDocValues numbers = parR.getNumericDocValues("numbers");
-        if (numbers == null) {
-          return;
-        }
-        int maxDoc = r.maxDoc();
-        boolean failed = false;
-        for(int i=0;i<maxDoc;i++) {
-          StoredDocument oldDoc = r.document(i);
-          long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
-          value *= schemaGen;
-          if (value != numbers.get(i)) {
-            System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
-            failed = true;
-          } else if (failed) {
-            System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
-          }
-        }
-        assertFalse("FAILED r=" + r, failed);
-      }
-    };
-  }
-
-  public void testBasicMultipleSchemaGens() throws Exception {
-
-    AtomicLong currentSchemaGen = new AtomicLong();
-
-    // TODO: separate refresh thread, search threads, indexing threads
-    ReindexingReader reindexer = getReindexerNewDVFields(createTempDir(), currentSchemaGen);
-    reindexer.commit();
-
-    Document doc = new Document();
-    doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-    reindexer.w.addDocument(doc);
-
-    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: refresh @ 1 doc");
-    reindexer.mgr.maybeRefresh();
-    DirectoryReader r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-    //reindexer.printRefCounts();
-
-    currentSchemaGen.incrementAndGet();
-
-    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: increment schemaGen");
-    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: commit");
-    reindexer.commit();
-
-    doc = new Document();
-    doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-    reindexer.w.addDocument(doc);
-
-    if (DEBUG) System.out.println("TEST: refresh @ 2 docs");
-    reindexer.mgr.maybeRefresh();
-    //reindexer.printRefCounts();
-    r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println("TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-
-    if (DEBUG) System.out.println("TEST: forceMerge");
-    reindexer.w.forceMerge(1);
-
-    currentSchemaGen.incrementAndGet();
-
-    if (DEBUG) System.out.println("TEST: commit");
-    reindexer.commit();
-
-    if (DEBUG) System.out.println("TEST: refresh after forceMerge");
-    reindexer.mgr.maybeRefresh();
-    r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println("TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-
-    if (DEBUG) System.out.println("TEST: close writer");
-    reindexer.close();
-  }
-
-  public void testRandomMultipleSchemaGens() throws Exception {
-
-    AtomicLong currentSchemaGen = new AtomicLong();
-    ReindexingReader reindexer = null;
-
-    // TODO: separate refresh thread, search threads, indexing threads
-    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
-    int maxID = 0;
-    Path root = createTempDir();
-    int refreshEveryNumDocs = 100;
-    int commitCloseNumDocs = 1000;
-    for(int i=0;i<numDocs;i++) {
-      if (reindexer == null) {
-        reindexer = getReindexerNewDVFields(root, currentSchemaGen);
-      }
-
-      Document doc = new Document();
-      String id;
-      String updateID;
-      if (maxID > 0 && random().nextInt(10) == 7) {
-        // Replace a doc
-        id = "" + random().nextInt(maxID);
-        updateID = id;
-      } else {
-        id = "" + (maxID++);
-        updateID = null;
-      }
-        
-      doc.add(newStringField("id", id, Field.Store.NO));
-      doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-      if (updateID == null) {
-        reindexer.w.addDocument(doc);
-      } else {
-        reindexer.w.updateDocument(new Term("id", updateID), doc);
-      }
-      if (random().nextInt(refreshEveryNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: refresh @ " + (i+1) + " docs");
-        reindexer.mgr.maybeRefresh();
-
-        DirectoryReader r = reindexer.mgr.acquire();
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: got reader=" + r);
-        try {
-          checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
-        } finally {
-          reindexer.mgr.release(r);
-        }
-        if (DEBUG) reindexer.printRefCounts();
-        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
-      }
-
-      if (random().nextInt(500) == 17) {
-        currentSchemaGen.incrementAndGet();
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance schemaGen to " + currentSchemaGen);
-      }
-
-      if (i > 0 && random().nextInt(10) == 7) {
-        // Random delete:
-        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
-      }
-
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: commit @ " + (i+1) + " docs");
-        reindexer.commit();
-        //reindexer.printRefCounts();
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-
-      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: close writer @ " + (i+1) + " docs");
-        reindexer.close();
-        reindexer = null;
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-    }
-
-    if (reindexer != null) {
-      reindexer.close();
-    }
-  }
-
-  /** First schema change creates a new "number" DV field off the stored field; subsequent changes just change the value of that number
-   *  field for all docs. */
-  public void testRandomMultipleSchemaGensSameField() throws Exception {
-
-    AtomicLong currentSchemaGen = new AtomicLong();
-    AtomicLong mergingSchemaGen = new AtomicLong();
-
-    ReindexingReader reindexer = null;
-
-    // TODO: separate refresh thread, search threads, indexing threads
-    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
-    int maxID = 0;
-    Path root = createTempDir();
-    int refreshEveryNumDocs = 100;
-    int commitCloseNumDocs = 1000;
-
-    for(int i=0;i<numDocs;i++) {
-      if (reindexer == null) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: open new reader/writer");
-        reindexer = getReindexerSameDVField(root, currentSchemaGen, mergingSchemaGen);
-      }
-
-      Document doc = new Document();
-      String id;
-      String updateID;
-      if (maxID > 0 && random().nextInt(10) == 7) {
-        // Replace a doc
-        id = "" + random().nextInt(maxID);
-        updateID = id;
-      } else {
-        id = "" + (maxID++);
-        updateID = null;
-      }
-        
-      doc.add(newStringField("id", id, Field.Store.NO));
-      doc.add(newTextField("text", "number " + TestUtil.nextInt(random(), -10000, 10000), Field.Store.YES));
-      if (updateID == null) {
-        reindexer.w.addDocument(doc);
-      } else {
-        reindexer.w.updateDocument(new Term("id", updateID), doc);
-      }
-      if (random().nextInt(refreshEveryNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: refresh @ " + (i+1) + " docs");
-        reindexer.mgr.maybeRefresh();
-        DirectoryReader r = reindexer.mgr.acquire();
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: got reader=" + r);
-        try {
-          checkAllNumberDVs(r, "number", true, (int) currentSchemaGen.get());
-        } finally {
-          reindexer.mgr.release(r);
-        }
-        if (DEBUG) reindexer.printRefCounts();
-        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
-      }
-
-      if (random().nextInt(500) == 17) {
-        currentSchemaGen.incrementAndGet();
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance schemaGen to " + currentSchemaGen);
-        if (random().nextBoolean()) {
-          mergingSchemaGen.incrementAndGet();
-          if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance mergingSchemaGen to " + mergingSchemaGen);
-        }
-      }
-
-      if (i > 0 && random().nextInt(10) == 7) {
-        // Random delete:
-        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
-      }
-
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: commit @ " + (i+1) + " docs");
-        reindexer.commit();
-        //reindexer.printRefCounts();
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-
-      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: close writer @ " + (i+1) + " docs");
-        reindexer.close();
-        reindexer = null;
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-    }
-
-    if (reindexer != null) {
-      reindexer.close();
-    }
-
-    // Verify main index never reflects schema changes beyond mergingSchemaGen:
-    try (Directory dir = newFSDirectory(root.resolve("index"));
-         IndexReader r = DirectoryReader.open(dir)) {
-        for (LeafReaderContext ctx : r.leaves()) {
-          LeafReader leaf = ctx.reader();
-          NumericDocValues numbers = leaf.getNumericDocValues("number");
-          if (numbers != null) {
-            int maxDoc = leaf.maxDoc();
-            for(int i=0;i<maxDoc;i++) {
-              StoredDocument doc = leaf.document(i);
-              long value = Long.parseLong(doc.get("text").split(" ")[1]);
-              long dvValue = numbers.get(i);
-              if (value == 0) {
-                assertEquals(0, dvValue);
-              } else {
-                assertTrue(dvValue % value == 0);
-                assertTrue(dvValue / value <= mergingSchemaGen.get());
-              }
-            }
-          }
-        }
-      }
-  }
-
-  public void testBasic() throws Exception {
-    ReindexingReader reindexer = getReindexer(createTempDir());
-
-    // Start with initial empty commit:
-    reindexer.commit();
-
-    Document doc = new Document();
-    doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-    reindexer.w.addDocument(doc);
-
-    if (DEBUG) System.out.println("TEST: refresh @ 1 doc");
-    reindexer.mgr.maybeRefresh();
-    DirectoryReader r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println("TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r);
-      IndexSearcher s = newSearcher(r);
-      testNumericDVSort(s);
-      testNumericRangeQuery(s);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-    //reindexer.printRefCounts();
-
-    if (DEBUG) System.out.println("TEST: commit");
-    reindexer.commit();
-
-    doc = new Document();
-    doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-    reindexer.w.addDocument(doc);
-
-    if (DEBUG) System.out.println("TEST: refresh @ 2 docs");
-    reindexer.mgr.maybeRefresh();
-    //reindexer.printRefCounts();
-    r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println("TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r);
-      IndexSearcher s = newSearcher(r);
-      testNumericDVSort(s);
-      testNumericRangeQuery(s);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-
-    if (DEBUG) System.out.println("TEST: forceMerge");
-    reindexer.w.forceMerge(1);
-
-    if (DEBUG) System.out.println("TEST: commit");
-    reindexer.commit();
-
-    if (DEBUG) System.out.println("TEST: refresh after forceMerge");
-    reindexer.mgr.maybeRefresh();
-    r = reindexer.mgr.acquire();
-    if (DEBUG) System.out.println("TEST: got reader=" + r);
-    try {
-      checkAllNumberDVs(r);
-      IndexSearcher s = newSearcher(r);
-      testNumericDVSort(s);
-      testNumericRangeQuery(s);
-    } finally {
-      reindexer.mgr.release(r);
-    }
-
-    if (DEBUG) System.out.println("TEST: close writer");
-    reindexer.close();
-  }
-
-  public void testRandom() throws Exception {
-    Path root = createTempDir();
-    ReindexingReader reindexer = null;
-
-    // TODO: separate refresh thread, search threads, indexing threads
-    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
-    int maxID = 0;
-    int refreshEveryNumDocs = 100;
-    int commitCloseNumDocs = 1000;
-    for(int i=0;i<numDocs;i++) {
-      if (reindexer == null) {
-        reindexer = getReindexer(root);
-      }
-
-      Document doc = new Document();
-      String id;
-      String updateID;
-      if (maxID > 0 && random().nextInt(10) == 7) {
-        // Replace a doc
-        id = "" + random().nextInt(maxID);
-        updateID = id;
-      } else {
-        id = "" + (maxID++);
-        updateID = null;
-      }
-        
-      doc.add(newStringField("id", id, Field.Store.NO));
-      doc.add(newTextField("text", "number " + random().nextLong(), Field.Store.YES));
-      if (updateID == null) {
-        reindexer.w.addDocument(doc);
-      } else {
-        reindexer.w.updateDocument(new Term("id", updateID), doc);
-      }
-
-      if (random().nextInt(refreshEveryNumDocs) == 17) {
-        if (DEBUG) System.out.println("TEST: refresh @ " + (i+1) + " docs");
-        reindexer.mgr.maybeRefresh();
-        DirectoryReader r = reindexer.mgr.acquire();
-        if (DEBUG) System.out.println("TEST: got reader=" + r);
-        try {
-          checkAllNumberDVs(r);
-          IndexSearcher s = newSearcher(r);
-          testNumericDVSort(s);
-          testNumericRangeQuery(s);
-        } finally {
-          reindexer.mgr.release(r);
-        }
-        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
-      }
-
-      if (i > 0 && random().nextInt(10) == 7) {
-        // Random delete:
-        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
-      }
-
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println("TEST: commit @ " + (i+1) + " docs");
-        reindexer.commit();
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-
-      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
-      if (random().nextInt(commitCloseNumDocs) == 17) {
-        if (DEBUG) System.out.println("TEST: close writer @ " + (i+1) + " docs");
-        reindexer.close();
-        reindexer = null;
-        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
-      }
-    }
-    if (reindexer != null) {
-      reindexer.close();
-    }
-  }
-
-  private static void checkAllNumberDVs(IndexReader r) throws IOException {
-    checkAllNumberDVs(r, "number", true, 1);
-  }
-
-  private static void checkAllNumberDVs(IndexReader r, String fieldName, boolean doThrow, int multiplier) throws IOException {
-    NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName);
-    int maxDoc = r.maxDoc();
-    boolean failed = false;
-    long t0 = System.currentTimeMillis();
-    for(int i=0;i<maxDoc;i++) {
-      StoredDocument oldDoc = r.document(i);
-      long value = multiplier * Long.parseLong(oldDoc.get("text").split(" ")[1]);
-      if (value != numbers.get(i)) {
-        System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
-        failed = true;
-      } else if (failed) {
-        System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
-      }
-    }
-    if (failed) {
-      if (r instanceof LeafReader == false) {
-        System.out.println("TEST FAILED; check leaves");
-        for(LeafReaderContext ctx : r.leaves()) {
-          System.out.println("CHECK LEAF=" + ctx.reader());
-          checkAllNumberDVs(ctx.reader(), fieldName, false, 1);
-        }
-      }
-      if (doThrow) {
-        assertFalse("FAILED field=" + fieldName + " r=" + r, failed);
-      } else {
-        System.out.println("FAILED field=" + fieldName + " r=" + r);
-      }
-    }
-  }
-
-  private static void testNumericDVSort(IndexSearcher s) throws IOException {
-    // Confirm we can sort by the new DV field:
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 100, new Sort(new SortField("number", SortField.Type.LONG)));
-    NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
-    long last = Long.MIN_VALUE;
-    for(ScoreDoc scoreDoc : hits.scoreDocs) {
-      long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
-      assertTrue(value >= last);
-      assertEquals(value, numbers.get(scoreDoc.doc));
-      last = value;
-    }
-  }
-
-  private static void testNumericRangeQuery(IndexSearcher s) throws IOException {
-    NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
-    for(int i=0;i<100;i++) {
-      // Confirm we can range search by the new indexed (numeric) field:
-      long min = random().nextLong();
-      long max = random().nextLong();
-      if (min > max) {
-        long x = min;
-        min = max;
-        max = x;
-      }
-
-      TopDocs hits = s.search(NumericRangeQuery.newLongRange("number", min, max, true, true), 100);
-      for(ScoreDoc scoreDoc : hits.scoreDocs) {
-        long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
-        assertTrue(value >= min);
-        assertTrue(value <= max);
-        assertEquals(value, numbers.get(scoreDoc.doc));
-      }
-    }
-  }
-
-  // TODO: test exceptions
-}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
index 31e5b72..aa367ae 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -31,11 +31,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -52,22 +48,19 @@
   public void testDocument() throws IOException {
     SegmentReader [] readers = new SegmentReader[2];
     Directory dir = newDirectory();
-    Document doc1 = new Document();
-    Document doc2 = new Document();
-    DocHelper.setupDoc(doc1);
-    DocHelper.setupDoc(doc2);
-    DocHelper.writeDoc(random(), dir, doc1);
-    DocHelper.writeDoc(random(), dir, doc2);
+    DocHelper.writeDoc(random(), dir);
+    DocHelper.writeDoc(random(), dir);
     DirectoryReader reader = DirectoryReader.open(dir);
     assertTrue(reader != null);
     assertTrue(reader instanceof StandardDirectoryReader);
-    
-    StoredDocument newDoc1 = reader.document(0);
+    FieldTypes fieldTypes = reader.getFieldTypes();
+    Set<String> unstored = DocHelper.getUnstored(fieldTypes);
+    Document newDoc1 = reader.document(0);
     assertTrue(newDoc1 != null);
-    assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
-    StoredDocument newDoc2 = reader.document(1);
+    assertEquals(DocHelper.numFields() - unstored.size() + 1, DocHelper.numFields(newDoc1));
+    Document newDoc2 = reader.document(1);
     assertTrue(newDoc2 != null);
-    assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+    assertEquals(DocHelper.numFields() - unstored.size() + 1, DocHelper.numFields(newDoc2));
     Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
     assertNotNull(vector);
 
@@ -128,8 +121,8 @@
   private void addDoc(Random random, Directory ramDir1, String s, boolean create) throws IOException {
     IndexWriter iw = new IndexWriter(ramDir1, newIndexWriterConfig(new MockAnalyzer(random))
                                                 .setOpenMode(create ? OpenMode.CREATE : OpenMode.APPEND));
-    Document doc = new Document();
-    doc.add(newTextField("body", s, Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", s);
     iw.addDocument(doc);
     iw.close();
   }
@@ -163,207 +156,202 @@
    * @throws Exception on error
    */
   public void testGetFieldNames() throws Exception {
-      Directory d = newDirectory();
-      // set up writer
-      IndexWriter writer = new IndexWriter(
-          d,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-      );
+    Directory d = newDirectory();
+    // set up writer
+    IndexWriter writer = new IndexWriter(
+                                         d,
+                                         newIndexWriterConfig(new MockAnalyzer(random()))
+                                         );
 
-      Document doc = new Document();
+    Document doc = writer.newDocument();
 
-      FieldType customType3 = new FieldType();
-      customType3.setStored(true);
-      
-      doc.add(new StringField("keyword", "test1", Field.Store.YES));
-      doc.add(new TextField("text", "test1", Field.Store.YES));
-      doc.add(new Field("unindexed", "test1", customType3));
-      doc.add(new TextField("unstored","test1", Field.Store.NO));
+    doc.addAtom("keyword", "test1");
+    doc.addLargeText("text", "test1");
+    doc.addStoredString("unindexed", "test1");
+    doc.addLargeText("unstored", "test1");
+    writer.addDocument(doc);
+
+    writer.close();
+    // set up reader
+    DirectoryReader reader = DirectoryReader.open(d);
+    FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
+    assertNotNull(fieldInfos.fieldInfo("keyword"));
+    assertNotNull(fieldInfos.fieldInfo("text"));
+    assertNotNull(fieldInfos.fieldInfo("unindexed"));
+    assertNotNull(fieldInfos.fieldInfo("unstored"));
+    reader.close();
+    // add more documents
+    writer = new IndexWriter(
+                             d,
+                             newIndexWriterConfig(new MockAnalyzer(random()))
+                             .setOpenMode(OpenMode.APPEND)
+                             .setMergePolicy(newLogMergePolicy())
+                             );
+    // want to get some more segments here
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = writer.newDocument();
+      doc.addAtom("keyword", "test1");
+      doc.addLargeText("text", "test1");
+      doc.addStoredString("unindexed", "test1");
+      doc.addLargeText("unstored", "test1");
       writer.addDocument(doc);
-
-      writer.close();
-      // set up reader
-      DirectoryReader reader = DirectoryReader.open(d);
-      FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
-      assertNotNull(fieldInfos.fieldInfo("keyword"));
-      assertNotNull(fieldInfos.fieldInfo("text"));
-      assertNotNull(fieldInfos.fieldInfo("unindexed"));
-      assertNotNull(fieldInfos.fieldInfo("unstored"));
-      reader.close();
-      // add more documents
-      writer = new IndexWriter(
-          d,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-             .setOpenMode(OpenMode.APPEND)
-             .setMergePolicy(newLogMergePolicy())
-      );
-      // want to get some more segments here
-      int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new StringField("keyword", "test1", Field.Store.YES));
-        doc.add(new TextField("text", "test1", Field.Store.YES));
-        doc.add(new Field("unindexed", "test1", customType3));
-        doc.add(new TextField("unstored","test1", Field.Store.NO));
-        writer.addDocument(doc);
-      }
-      // new fields are in some different segments (we hope)
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new StringField("keyword2", "test1", Field.Store.YES));
-        doc.add(new TextField("text2", "test1", Field.Store.YES));
-        doc.add(new Field("unindexed2", "test1", customType3));
-        doc.add(new TextField("unstored2","test1", Field.Store.NO));
-        writer.addDocument(doc);
-      }
-      // new termvector fields
-
-      FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-      customType5.setStoreTermVectors(true);
-      FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-      customType6.setStoreTermVectors(true);
-      customType6.setStoreTermVectorOffsets(true);
-      FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-      customType7.setStoreTermVectors(true);
-      customType7.setStoreTermVectorPositions(true);
-      FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-      customType8.setStoreTermVectors(true);
-      customType8.setStoreTermVectorOffsets(true);
-      customType8.setStoreTermVectorPositions(true);
-      
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new TextField("tvnot", "tvnot", Field.Store.YES));
-        doc.add(new Field("termvector", "termvector", customType5));
-        doc.add(new Field("tvoffset", "tvoffset", customType6));
-        doc.add(new Field("tvposition", "tvposition", customType7));
-        doc.add(new Field("tvpositionoffset", "tvpositionoffset", customType8));
-        writer.addDocument(doc);
-      }
-      
-      writer.close();
-
-      // verify fields again
-      reader = DirectoryReader.open(d);
-      fieldInfos = MultiFields.getMergedFieldInfos(reader);
-
-      Collection<String> allFieldNames = new HashSet<>();
-      Collection<String> indexedFieldNames = new HashSet<>();
-      Collection<String> notIndexedFieldNames = new HashSet<>();
-      Collection<String> tvFieldNames = new HashSet<>();
-
-      for(FieldInfo fieldInfo : fieldInfos) {
-        final String name = fieldInfo.name;
-        allFieldNames.add(name);
-        if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
-          indexedFieldNames.add(name);
-        } else {
-          notIndexedFieldNames.add(name);
-        }
-        if (fieldInfo.hasVectors()) {
-          tvFieldNames.add(name);
-        }
-      }
-
-      assertTrue(allFieldNames.contains("keyword"));
-      assertTrue(allFieldNames.contains("text"));
-      assertTrue(allFieldNames.contains("unindexed"));
-      assertTrue(allFieldNames.contains("unstored"));
-      assertTrue(allFieldNames.contains("keyword2"));
-      assertTrue(allFieldNames.contains("text2"));
-      assertTrue(allFieldNames.contains("unindexed2"));
-      assertTrue(allFieldNames.contains("unstored2"));
-      assertTrue(allFieldNames.contains("tvnot"));
-      assertTrue(allFieldNames.contains("termvector"));
-      assertTrue(allFieldNames.contains("tvposition"));
-      assertTrue(allFieldNames.contains("tvoffset"));
-      assertTrue(allFieldNames.contains("tvpositionoffset"));
-      
-      // verify that only indexed fields were returned
-      assertEquals(11, indexedFieldNames.size());    // 6 original + the 5 termvector fields 
-      assertTrue(indexedFieldNames.contains("keyword"));
-      assertTrue(indexedFieldNames.contains("text"));
-      assertTrue(indexedFieldNames.contains("unstored"));
-      assertTrue(indexedFieldNames.contains("keyword2"));
-      assertTrue(indexedFieldNames.contains("text2"));
-      assertTrue(indexedFieldNames.contains("unstored2"));
-      assertTrue(indexedFieldNames.contains("tvnot"));
-      assertTrue(indexedFieldNames.contains("termvector"));
-      assertTrue(indexedFieldNames.contains("tvposition"));
-      assertTrue(indexedFieldNames.contains("tvoffset"));
-      assertTrue(indexedFieldNames.contains("tvpositionoffset"));
-      
-      // verify that only unindexed fields were returned
-      assertEquals(2, notIndexedFieldNames.size());    // the following fields
-      assertTrue(notIndexedFieldNames.contains("unindexed"));
-      assertTrue(notIndexedFieldNames.contains("unindexed2"));
-              
-      // verify index term vector fields  
-      assertEquals(tvFieldNames.toString(), 4, tvFieldNames.size());    // 4 field has term vector only
-      assertTrue(tvFieldNames.contains("termvector"));
-
-      reader.close();
-      d.close();
-  }
-
-public void testTermVectors() throws Exception {
-  Directory d = newDirectory();
-  // set up writer
-  IndexWriter writer = new IndexWriter(
-      d,
-      newIndexWriterConfig(new MockAnalyzer(random()))
-          .setMergePolicy(newLogMergePolicy())
-  );
-  // want to get some more segments here
-  // new termvector fields
-  int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
-  FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-  customType5.setStoreTermVectors(true);
-  FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-  customType6.setStoreTermVectors(true);
-  customType6.setStoreTermVectorOffsets(true);
-  FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-  customType7.setStoreTermVectors(true);
-  customType7.setStoreTermVectorPositions(true);
-  FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-  customType8.setStoreTermVectors(true);
-  customType8.setStoreTermVectorOffsets(true);
-  customType8.setStoreTermVectorPositions(true);
-  for (int i = 0; i < 5 * mergeFactor; i++) {
-    Document doc = new Document();
-      doc.add(new TextField("tvnot", "one two two three three three", Field.Store.YES));
-      doc.add(new Field("termvector", "one two two three three three", customType5));
-      doc.add(new Field("tvoffset", "one two two three three three", customType6));
-      doc.add(new Field("tvposition", "one two two three three three", customType7));
-      doc.add(new Field("tvpositionoffset", "one two two three three three", customType8));
-      
-      writer.addDocument(doc);
-  }
-  writer.close();
-  d.close();
-}
-
-void assertTermDocsCount(String msg,
-                                   IndexReader reader,
-                                   Term term,
-                                   int expected)
-  throws IOException {
-  DocsEnum tdocs = TestUtil.docs(random(), reader,
-      term.field(),
-      new BytesRef(term.text()),
-      MultiFields.getLiveDocs(reader),
-      null,
-      0);
-  int count = 0;
-  if (tdocs != null) {
-    while(tdocs.nextDoc()!= DocIdSetIterator.NO_MORE_DOCS) {
-      count++;
     }
-  }
-  assertEquals(msg + ", count mismatch", expected, count);
-}
+    // new fields are in some different segments (we hope)
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = writer.newDocument();
+      doc.addAtom("keyword2", "test1");
+      doc.addLargeText("text2", "test1");
+      doc.addStoredString("unindexed2", "test1");
+      doc.addLargeText("unstored2", "test1");
+      writer.addDocument(doc);
+    }
+    // new termvector fields
 
-  
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termvector");
+
+    fieldTypes.enableTermVectors("tvoffset");
+    fieldTypes.enableTermVectorOffsets("tvoffset");
+
+    fieldTypes.enableTermVectors("tvposition");
+    fieldTypes.enableTermVectorPositions("tvposition");
+
+    fieldTypes.enableTermVectors("tvpositionoffset");
+    fieldTypes.enableTermVectorOffsets("tvpositionoffset");
+    fieldTypes.enableTermVectorPositions("tvpositionoffset");
+
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = writer.newDocument();
+      doc.addLargeText("tvnot", "tvnot");
+      doc.addLargeText("termvector", "termvector");
+      doc.addLargeText("tvoffset", "tvoffset");
+      doc.addLargeText("tvposition", "tvposition");
+      doc.addLargeText("tvpositionoffset", "tvpositionoffset");
+      writer.addDocument(doc);
+    }
+      
+    writer.close();
+
+    // verify fields again
+    reader = DirectoryReader.open(d);
+    fieldInfos = MultiFields.getMergedFieldInfos(reader);
+
+    Collection<String> allFieldNames = new HashSet<>();
+    Collection<String> indexedFieldNames = new HashSet<>();
+    Collection<String> notIndexedFieldNames = new HashSet<>();
+    Collection<String> tvFieldNames = new HashSet<>();
+
+    for(FieldInfo fieldInfo : fieldInfos) {
+      final String name = fieldInfo.name;
+      allFieldNames.add(name);
+      if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
+        indexedFieldNames.add(name);
+      } else {
+        notIndexedFieldNames.add(name);
+      }
+      if (fieldInfo.hasVectors()) {
+        tvFieldNames.add(name);
+      }
+      if (fieldInfo.hasVectors()) {
+        tvFieldNames.add(name);
+      }
+    }
+
+    assertTrue(allFieldNames.contains("keyword"));
+    assertTrue(allFieldNames.contains("text"));
+    assertTrue(allFieldNames.contains("unindexed"));
+    assertTrue(allFieldNames.contains("unstored"));
+    assertTrue(allFieldNames.contains("keyword2"));
+    assertTrue(allFieldNames.contains("text2"));
+    assertTrue(allFieldNames.contains("unindexed2"));
+    assertTrue(allFieldNames.contains("unstored2"));
+    assertTrue(allFieldNames.contains("tvnot"));
+    assertTrue(allFieldNames.contains("termvector"));
+    assertTrue(allFieldNames.contains("tvposition"));
+    assertTrue(allFieldNames.contains("tvoffset"));
+    assertTrue(allFieldNames.contains("tvpositionoffset"));
+      
+    // verify that only indexed fields were returned
+    assertEquals(12, indexedFieldNames.size());    // 6 original + the 5 termvector fields + $fieldnames
+    assertTrue(indexedFieldNames.contains("keyword"));
+    assertTrue(indexedFieldNames.contains("text"));
+    assertTrue(indexedFieldNames.contains("unstored"));
+    assertTrue(indexedFieldNames.contains("keyword2"));
+    assertTrue(indexedFieldNames.contains("text2"));
+    assertTrue(indexedFieldNames.contains("unstored2"));
+    assertTrue(indexedFieldNames.contains("tvnot"));
+    assertTrue(indexedFieldNames.contains("termvector"));
+    assertTrue(indexedFieldNames.contains("tvposition"));
+    assertTrue(indexedFieldNames.contains("tvoffset"));
+    assertTrue(indexedFieldNames.contains("tvpositionoffset"));
+      
+    // verify that only unindexed fields were returned
+    assertEquals(2, notIndexedFieldNames.size());    // the following fields
+    assertTrue(notIndexedFieldNames.contains("unindexed"));
+    assertTrue(notIndexedFieldNames.contains("unindexed2"));
+              
+    // verify index term vector fields  
+    assertEquals(tvFieldNames.toString(), 4, tvFieldNames.size());    // 4 field has term vector only
+    assertTrue(tvFieldNames.contains("termvector"));
+
+    reader.close();
+    d.close();
+  }
+
+  public void testTermVectors() throws Exception {
+    Directory d = newDirectory();
+    // set up writer
+    IndexWriter writer = new IndexWriter(
+                                         d,
+                                         newIndexWriterConfig(new MockAnalyzer(random()))
+                                         .setMergePolicy(newLogMergePolicy())
+                                         );
+    // want to get some more segments here
+    // new termvector fields
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("tvoffset");
+    fieldTypes.enableTermVectorOffsets("tvoffset");
+    fieldTypes.enableTermVectors("tvposition");
+    fieldTypes.enableTermVectorPositions("tvposition");
+    fieldTypes.enableTermVectors("tvpositionoffset");
+    fieldTypes.enableTermVectorPositions("tvpositionoffset");
+    fieldTypes.enableTermVectorOffsets("tvpositionoffset");
+    for (int i = 0; i < 5 * mergeFactor; i++) {
+      Document doc = writer.newDocument();
+      doc.addLargeText("tvnot", "one two two three three three");
+      doc.addStoredString("termvector", "one two two three three three");
+      doc.addLargeText("tvoffset", "one two two three three three");
+      doc.addLargeText("tvposition", "one two two three three three");
+      doc.addLargeText("tvpositionoffset", "one two two three three three");
+      writer.addDocument(doc);
+    }
+    writer.close();
+    d.close();
+  }
+
+  void assertTermDocsCount(String msg,
+                           IndexReader reader,
+                           Term term,
+                           int expected)
+    throws IOException {
+    DocsEnum tdocs = TestUtil.docs(random(), reader,
+                                   term.field(),
+                                   new BytesRef(term.text()),
+                                   MultiFields.getLiveDocs(reader),
+                                   null,
+                                   0);
+    int count = 0;
+    if (tdocs != null) {
+      while(tdocs.nextDoc()!= DocIdSetIterator.NO_MORE_DOCS) {
+        count++;
+      }
+    }
+    assertEquals(msg + ", count mismatch", expected, count);
+  }
+
   public void testBinaryFields() throws IOException {
     Directory dir = newDirectory();
     byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -381,17 +369,17 @@
     writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                     .setOpenMode(OpenMode.APPEND)
                                     .setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
-    doc.add(new StoredField("bin1", bin));
-    doc.add(new TextField("junk", "junk text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addStoredBinary("bin1", new BytesRef(bin));
+    doc.addLargeText("junk", "junk text");
     writer.addDocument(doc);
     writer.close();
     DirectoryReader reader = DirectoryReader.open(dir);
-    StoredDocument doc2 = reader.document(reader.maxDoc() - 1);
-    StorableField[] fields = doc2.getFields("bin1");
+    Document doc2 = reader.document(reader.maxDoc() - 1);
+    List<IndexableField> fields = doc2.getFields("bin1");
     assertNotNull(fields);
-    assertEquals(1, fields.length);
-    StorableField b1 = fields[0];
+    assertEquals(1, fields.size());
+    IndexableField b1 = fields.get(0);
     assertTrue(b1.binaryValue() != null);
     BytesRef bytesRef = b1.binaryValue();
     assertEquals(bin.length, bytesRef.length);
@@ -411,8 +399,8 @@
     doc2 = reader.document(reader.maxDoc() - 1);
     fields = doc2.getFields("bin1");
     assertNotNull(fields);
-    assertEquals(1, fields.length);
-    b1 = fields[0];
+    assertEquals(1, fields.size());
+    b1 = fields.get(0);
     assertTrue(b1.binaryValue() != null);
     bytesRef = b1.binaryValue();
     assertEquals(bin.length, bytesRef.length);
@@ -438,35 +426,35 @@
     rmDir(fileDirName);
   }*/
   
-public void testFilesOpenClose() throws IOException {
-      // Create initial data set
-      Path dirFile = createTempDir("TestIndexReader.testFilesOpenClose");
-      Directory dir = newFSDirectory(dirFile);
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      addDoc(writer, "test");
-      writer.close();
-      dir.close();
+  public void testFilesOpenClose() throws IOException {
+    // Create initial data set
+    Path dirFile = createTempDir("TestIndexReader.testFilesOpenClose");
+    Directory dir = newFSDirectory(dirFile);
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    addDoc(writer, "test");
+    writer.close();
+    dir.close();
 
-      // Try to erase the data - this ensures that the writer closed all files
-      IOUtils.rm(dirFile);
-      dir = newFSDirectory(dirFile);
+    // Try to erase the data - this ensures that the writer closed all files
+    IOUtils.rm(dirFile);
+    dir = newFSDirectory(dirFile);
 
-      // Now create the data set again, just as before
-      writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                       .setOpenMode(OpenMode.CREATE));
-      addDoc(writer, "test");
-      writer.close();
-      dir.close();
+    // Now create the data set again, just as before
+    writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                              .setOpenMode(OpenMode.CREATE));
+    addDoc(writer, "test");
+    writer.close();
+    dir.close();
 
-      // Now open existing directory and test that reader closes all files
-      dir = newFSDirectory(dirFile);
-      DirectoryReader reader1 = DirectoryReader.open(dir);
-      reader1.close();
-      dir.close();
+    // Now open existing directory and test that reader closes all files
+    dir = newFSDirectory(dirFile);
+    DirectoryReader reader1 = DirectoryReader.open(dir);
+    reader1.close();
+    dir.close();
 
-      // The following will fail if reader did not close
-      // all files
-      IOUtils.rm(dirFile);
+    // The following will fail if reader did not close
+    // all files
+    IOUtils.rm(dirFile);
   }
 
   public void testOpenReaderAfterDelete() throws IOException {
@@ -495,60 +483,54 @@
     dir.close();
   }
 
-  static void addDocumentWithFields(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
+  static void addDocumentWithFields(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
       
-      FieldType customType3 = new FieldType();
-      customType3.setStored(true);
-      doc.add(newStringField("keyword", "test1", Field.Store.YES));
-      doc.add(newTextField("text", "test1", Field.Store.YES));
-      doc.add(newField("unindexed", "test1", customType3));
-      doc.add(new TextField("unstored","test1", Field.Store.NO));
-      writer.addDocument(doc);
-  }
-
-  static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
-  {
-    Document doc = new Document();
-    
-    FieldType customType3 = new FieldType();
-    customType3.setStored(true);
-    doc.add(newStringField("keyword2", "test1", Field.Store.YES));
-    doc.add(newTextField("text2", "test1", Field.Store.YES));
-    doc.add(newField("unindexed2", "test1", customType3));
-    doc.add(new TextField("unstored2","test1", Field.Store.NO));
+    doc.addAtom("keyword", "test1");
+    doc.addLargeText("text", "test1");
+    doc.addStoredString("unindexed", "test1");
+    doc.addLargeText("unstored","test1");
     writer.addDocument(doc);
   }
 
-  static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-      customType5.setStoreTermVectors(true);
-      FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-      customType6.setStoreTermVectors(true);
-      customType6.setStoreTermVectorOffsets(true);
-      FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-      customType7.setStoreTermVectors(true);
-      customType7.setStoreTermVectorPositions(true);
-      FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-      customType8.setStoreTermVectors(true);
-      customType8.setStoreTermVectorOffsets(true);
-      customType8.setStoreTermVectorPositions(true);
-      doc.add(newTextField("tvnot", "tvnot", Field.Store.YES));
-      doc.add(newField("termvector","termvector",customType5));
-      doc.add(newField("tvoffset","tvoffset", customType6));
-      doc.add(newField("tvposition","tvposition", customType7));
-      doc.add(newField("tvpositionoffset","tvpositionoffset", customType8));
-      
-      writer.addDocument(doc);
+  static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    
+    doc.addAtom("keyword2", "test1");
+    doc.addLargeText("text2", "test1");
+    doc.addStoredString("unindexed2", "test1");
+    doc.addLargeText("unstored2","test1");
+    writer.addDocument(doc);
+  }
+
+  static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termvector");
+
+    fieldTypes.enableTermVectors("tvoffset");
+    fieldTypes.enableTermVectorOffsets("tvoffset");
+
+    fieldTypes.enableTermVectors("tvposition");
+    fieldTypes.enableTermVectorOffsets("tvposition");
+
+    fieldTypes.enableTermVectors("tvpositionoffset");
+    fieldTypes.enableTermVectorOffsets("tvpositionoffset");
+    fieldTypes.enableTermVectorPositions("tvpositionoffset");
+
+    doc.addLargeText("tvnot", "tvnot");
+    doc.addLargeText("termvector", "termvector");
+    doc.addLargeText("tvoffset", "tvoffset");
+    doc.addLargeText("tvposition", "tvposition");
+    doc.addLargeText("tvpositionoffset", "tvpositionoffset");
+
+    writer.addDocument(doc);
   }
   
   static void addDoc(IndexWriter writer, String value) throws IOException {
-      Document doc = new Document();
-      doc.add(newTextField("content", value, Field.Store.NO));
-      writer.addDocument(doc);
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", value);
+    writer.addDocument(doc);
   }
 
   // TODO: maybe this can reuse the logic of test dueling codecs?
@@ -597,16 +579,16 @@
     // check stored fields
     for (int i = 0; i < index1.maxDoc(); i++) {
       if (liveDocs1 == null || liveDocs1.get(i)) {
-        StoredDocument doc1 = index1.document(i);
-        StoredDocument doc2 = index2.document(i);
-        List<StorableField> field1 = doc1.getFields();
-        List<StorableField> field2 = doc2.getFields();
+        Document doc1 = index1.document(i);
+        Document doc2 = index2.document(i);
+        List<IndexableField> field1 = doc1.getFields();
+        List<IndexableField> field2 = doc2.getFields();
         assertEquals("Different numbers of fields for doc " + i + ".", field1.size(), field2.size());
-        Iterator<StorableField> itField1 = field1.iterator();
-        Iterator<StorableField> itField2 = field2.iterator();
+        Iterator<IndexableField> itField1 = field1.iterator();
+        Iterator<IndexableField> itField2 = field2.iterator();
         while (itField1.hasNext()) {
-          Field curField1 = (Field) itField1.next();
-          Field curField2 = (Field) itField2.next();
+          IndexableField curField1 = itField1.next();
+          IndexableField curField2 = itField2.next();
           assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
           assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
         }          
@@ -634,7 +616,9 @@
       while(enum1.next() != null) {
         assertEquals("Different terms", enum1.term(), enum2.next());
         DocsAndPositionsEnum tp1 = enum1.docsAndPositions(liveDocs, null);
+        assertNotNull("no positions for field=" + field1 + " term=" + enum1.term(), tp1);
         DocsAndPositionsEnum tp2 = enum2.docsAndPositions(liveDocs, null);
+        assertNotNull("no positions field=" + field1 + " term=" + enum2.term(), tp2);
 
         while(tp1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
           assertTrue(tp2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -705,16 +689,6 @@
     d.close();
   }      
 
-  static Document createDocument(String id) {
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setTokenized(false);
-    customType.setOmitNorms(true);
-    
-    doc.add(newField("id", id, customType));
-    return doc;
-  }
-  
   // LUCENE-1468 -- make sure on attempting to open an
   // DirectoryReader on a non-existent directory, you get a
   // good exception
@@ -738,9 +712,11 @@
     
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMaxBufferedDocs(2));
-    writer.addDocument(createDocument("a"));
-    writer.addDocument(createDocument("a"));
-    writer.addDocument(createDocument("a"));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "a");
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.addDocument(doc);
     writer.close();
     
     Collection<IndexCommit> commits = DirectoryReader.listCommits(dir);
@@ -760,9 +736,9 @@
   public void testUniqueTermCount() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO));
-    doc.add(newTextField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z");
+    doc.addLargeText("number", "0 1 2 3 4 5 6 7 8 9");
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.commit();
@@ -791,7 +767,7 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
     writer.commit();
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
     DirectoryReader r = DirectoryReader.open(dir);
     assertTrue(r.isCurrent());
@@ -813,13 +789,13 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(null)
         .setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy())));
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     sdp.snapshot();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     sdp.snapshot();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     sdp.snapshot();
     writer.close();
@@ -836,8 +812,8 @@
   public void testTotalTermFreqCached() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newTextField("f", "a a b", Field.Store.NO));
+    Document d = writer.newDocument();
+    d.addLargeText("f", "a a b");
     writer.addDocument(d);
     DirectoryReader r = writer.getReader();
     writer.close();
@@ -856,11 +832,11 @@
   public void testGetSumDocFreq() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newTextField("f", "a", Field.Store.NO));
+    Document d = writer.newDocument();
+    d.addLargeText("f", "a");
     writer.addDocument(d);
-    d = new Document();
-    d.add(newTextField("f", "b", Field.Store.NO));
+    d = writer.newDocument();
+    d.addLargeText("f", "b");
     writer.addDocument(d);
     DirectoryReader r = writer.getReader();
     writer.close();
@@ -877,11 +853,11 @@
   public void testGetDocCount() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newTextField("f", "a", Field.Store.NO));
+    Document d = writer.newDocument();
+    d.addLargeText("f", "a");
     writer.addDocument(d);
-    d = new Document();
-    d.add(newTextField("f", "a", Field.Store.NO));
+    d = writer.newDocument();
+    d.addLargeText("f", "a");
     writer.addDocument(d);
     DirectoryReader r = writer.getReader();
     writer.close();
@@ -898,11 +874,11 @@
   public void testGetSumTotalTermFreq() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newTextField("f", "a b b", Field.Store.NO));
+    Document d = writer.newDocument();
+    d.addLargeText("f", "a b b");
     writer.addDocument(d);
-    d = new Document();
-    d.add(newTextField("f", "a a b", Field.Store.NO));
+    d = writer.newDocument();
+    d.addLargeText("f", "a a b");
     writer.addDocument(d);
     DirectoryReader r = writer.getReader();
     writer.close();
@@ -922,9 +898,9 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMergePolicy(newLogMergePolicy()));
     ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     final DirectoryReader reader = writer.getReader();
     final int[] closeCount = new int[1];
@@ -955,7 +931,7 @@
   public void testOOBDocID() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     DirectoryReader r = writer.getReader();
     writer.close();
     r.document(0);
@@ -972,7 +948,7 @@
   public void testTryIncRef() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     DirectoryReader r = DirectoryReader.open(dir);
     assertTrue(r.tryIncRef());
@@ -986,7 +962,7 @@
   public void testStressTryIncRef() throws IOException, InterruptedException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     DirectoryReader r = DirectoryReader.open(dir);
     int numThreads = atLeast(2);
@@ -1038,16 +1014,16 @@
   public void testLoadCertainFields() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("field1", "foobar", Field.Store.YES));
-    doc.add(newStringField("field2", "foobaz", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("field1", "foobar");
+    doc.addAtom("field2", "foobaz");
     writer.addDocument(doc);
     DirectoryReader r = writer.getReader();
     writer.close();
     Set<String> fieldsToLoad = new HashSet<>();
     assertEquals(0, r.document(0, fieldsToLoad).getFields().size());
     fieldsToLoad.add("field1");
-    StoredDocument doc2 = r.document(0, fieldsToLoad);
+    Document doc2 = r.document(0, fieldsToLoad);
     assertEquals(1, doc2.getFields().size());
     assertEquals("foobar", doc2.get("field1"));
     r.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
index 6b93479..6218fe3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
@@ -30,10 +30,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -112,26 +109,19 @@
     DirectoryReader reader = DirectoryReader.open(dir);
     try {
       int M = 3;
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setTokenized(false);
-      FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-      customType2.setTokenized(false);
-      customType2.setOmitNorms(true);
-      FieldType customType3 = new FieldType();
-      customType3.setStored(true);
       for (int i=0; i<4; i++) {
         for (int j=0; j<M; j++) {
-          Document doc = new Document();
-          doc.add(newField("id", i+"_"+j, customType));
-          doc.add(newField("id2", i+"_"+j, customType2));
-          doc.add(newField("id3", i+"_"+j, customType3));
+          Document doc = iwriter.newDocument();
+          doc.addAtom("id", i+"_"+j);
+          doc.addAtom("id2", i+"_"+j);
+          doc.addStoredString("id3", i+"_"+j);
           iwriter.addDocument(doc);
           if (i>0) {
             int k = i-1;
             int n = j + k*M;
-            StoredDocument prevItereationDoc = reader.document(n);
+            Document prevItereationDoc = reader.document(n);
             assertNotNull(prevItereationDoc);
-            String id = prevItereationDoc.get("id");
+            String id = prevItereationDoc.getString("id");
             assertEquals(k+"_"+j, id);
           }
         }
@@ -206,8 +196,10 @@
     // NOTE: this also controls the number of threads!
     final int n = TestUtil.nextInt(random(), 20, 40);
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
     for (int i = 0; i < n; i++) {
-      writer.addDocument(createDocument(i, 3));
+      writer.addDocument(createDocument(writer, i, 3));
     }
     writer.forceMerge(1);
     writer.close();
@@ -216,7 +208,7 @@
       @Override
       protected void modifyIndex(int i) throws IOException {
        IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-       modifier.addDocument(createDocument(n + i, 6));
+       modifier.addDocument(createDocument(modifier, n + i, 6));
        modifier.close();
       }
 
@@ -436,9 +428,11 @@
   public static void createIndex(Random random, Directory dir, boolean multiSegment) throws IOException {
     IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, new MockAnalyzer(random))
         .setMergePolicy(new LogDocMergePolicy()));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableExistsFilters();
     
     for (int i = 0; i < 100; i++) {
-      w.addDocument(createDocument(i, 4));
+      w.addDocument(createDocument(w, i, 4));
       if (multiSegment && (i % 10) == 0) {
         w.commit();
       }
@@ -459,23 +453,18 @@
     r.close();
   }
 
-  public static Document createDocument(int n, int numFields) {
+  public static Document createDocument(IndexWriter w, int n, int numFields) {
     StringBuilder sb = new StringBuilder();
-    Document doc = new Document();
+    Document doc = w.newDocument();
     sb.append("a");
     sb.append(n);
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setTokenized(false);
-    customType2.setOmitNorms(true);
-    FieldType customType3 = new FieldType();
-    customType3.setStored(true);
-    doc.add(new TextField("field1", sb.toString(), Field.Store.YES));
-    doc.add(new Field("fielda", sb.toString(), customType2));
-    doc.add(new Field("fieldb", sb.toString(), customType3));
+    doc.addLargeText("field1", sb.toString());
+    doc.addLargeText("fielda", sb.toString());
+    doc.addStoredString("fieldb", sb.toString());
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(new TextField("field" + (i+1), sb.toString(), Field.Store.YES));
+      doc.addLargeText("field" + (i+1), sb.toString());
     }
     return doc;
   }
@@ -500,16 +489,16 @@
       }
       case 2: {
         IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-        w.addDocument(createDocument(101, 4));
+        w.addDocument(createDocument(w, 101, 4));
         w.forceMerge(1);
-        w.addDocument(createDocument(102, 4));
-        w.addDocument(createDocument(103, 4));
+        w.addDocument(createDocument(w, 102, 4));
+        w.addDocument(createDocument(w, 103, 4));
         w.close();
         break;
       }
       case 3: {
         IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-        w.addDocument(createDocument(101, 4));
+        w.addDocument(createDocument(w, 101, 4));
         w.close();
         break;
       }
@@ -552,9 +541,10 @@
             .setMaxBufferedDocs(-1)
             .setMergePolicy(newLogMergePolicy(10))
     );
+    FieldTypes fieldTypes = writer.getFieldTypes();
     for(int i=0;i<4;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
       writer.addDocument(doc);
       Map<String,String> data = new HashMap<>();
       data.put("index", i+"");
@@ -562,7 +552,7 @@
       writer.commit();
     }
     for(int i=0;i<4;i++) {
-      writer.deleteDocuments(new Term("id", ""+i));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", i));
       Map<String,String> data = new HashMap<>();
       data.put("index", (4+i)+"");
       writer.setCommitData(data);
@@ -604,8 +594,8 @@
 
     // Can't use RIW because it randomly commits:
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("field", "value", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "value");
     w.addDocument(doc);
     w.commit();
     List<IndexCommit> commits = DirectoryReader.listCommits(dir);
@@ -633,11 +623,11 @@
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.getDefaultCodec());
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("id", "id");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.commit();
 
@@ -698,11 +688,11 @@
     Directory dir = new RAMDirectory();
 
     IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("id", "id");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.deleteDocuments(new Term("id", "id"));
     w.commit();
@@ -717,16 +707,16 @@
     }
 
     w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-    doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 13));
+    doc = w.newDocument();
+    doc.addAtom("id", "id");
+    doc.addInt("ndv", 13);
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.commit();
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.updateNumericDocValue(new Term("id", "id"), "ndv", 17L);
     w.commit();
@@ -748,11 +738,11 @@
     Directory dir = new RAMDirectory();
 
     IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("id", "id");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.deleteDocuments(new Term("id", "id"));
     w.commit();
@@ -767,13 +757,13 @@
     }
 
     w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-    doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 13));
+    doc = w.newDocument();
+    doc.addAtom("id", "id");
+    doc.addInt("ndv", 13);
     w.addDocument(doc);
     w.commit();
-    doc = new Document();
-    doc.add(newStringField("id", "id2", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("id", "id2");
     w.addDocument(doc);
     w.commit();
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
index 3eb7f11..b4238f2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
@@ -35,7 +35,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
@@ -129,6 +129,8 @@
                                          setMaxBufferedDocs(-1).
                                          setMergePolicy(newLogMergePolicy(10))
                                          );
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.disableExistsFilters();
 
     SegmentCommitInfo si1 = indexDoc(writer, "test.txt");
     printSegment(out, si1);
@@ -174,6 +176,9 @@
                              setMergePolicy(newLogMergePolicy(10))
                              );
 
+    fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+
     si1 = indexDoc(writer, "test.txt");
     printSegment(out, si1);
 
@@ -199,30 +204,29 @@
   }
 
   private SegmentCommitInfo indexDoc(IndexWriter writer, String fileName)
-    throws Exception
-  {
+    throws Exception {
     Path path = workDir.resolve(fileName);
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     InputStreamReader is = new InputStreamReader(Files.newInputStream(path), StandardCharsets.UTF_8);
-    doc.add(new TextField("contents", is));
+    doc.addLargeText("contents", is);
     writer.addDocument(doc);
     writer.commit();
     is.close();
     return writer.newestSegment();
   }
 
-
   private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile)
     throws Exception {
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(dir, null);
     IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
-    SegmentReader r1 = new SegmentReader(si1, context);
-    SegmentReader r2 = new SegmentReader(si2, context);
+    SegmentReader r1 = new SegmentReader(fieldTypes, si1, context);
+    SegmentReader r2 = new SegmentReader(fieldTypes, si2, context);
 
     final Codec codec = Codec.getDefault();
     TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
     final SegmentInfo si = new SegmentInfo(si1.info.dir, Version.LATEST, merged, -1, false, codec, null, StringHelper.randomId(), new HashMap<>());
 
-    SegmentMerger merger = new SegmentMerger(Arrays.<CodecReader>asList(r1, r2),
+    SegmentMerger merger = new SegmentMerger(fieldTypes, Arrays.<CodecReader>asList(r1, r2),
                                              si, InfoStream.getDefault(), trackingDir,
                                              new FieldInfos.FieldNumbers(), context);
 
@@ -246,7 +250,7 @@
 
   private void printSegment(PrintWriter out, SegmentCommitInfo si)
     throws Exception {
-    SegmentReader reader = new SegmentReader(si, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(null, si, newIOContext(random()));
 
     for (int i = 0; i < reader.numDocs(); i++)
       out.println(reader.document(i));
@@ -262,7 +266,6 @@
         out.println("    DF=" + tis.docFreq());
 
         DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null);
-
         while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
           out.print(" doc=" + positions.docID());
           out.print(" TF=" + positions.freq());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
index cda1a97..f6a33d7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
@@ -18,7 +18,7 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FixedBitSet;
@@ -34,7 +34,7 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; i++) {
-      iw.addDocument(doc());
+      iw.addDocument(doc(iw));
     }
     IndexReader ir = iw.getReader();
     verifyCount(ir);
@@ -47,11 +47,14 @@
     dir.close();
   }
   
-  private Document doc() {
-    Document doc = new Document();
+  private Document doc(RandomIndexWriter w) {
+    Document doc = w.newDocument();
     int numFields = TestUtil.nextInt(random(), 1, 10);
+    FieldTypes fieldTypes = w.getFieldTypes();
     for (int i = 0; i < numFields; i++) {
-      doc.add(newStringField("" + TestUtil.nextInt(random(), 'a', 'z'), "" + TestUtil.nextInt(random(), 'a', 'z'), Field.Store.NO));
+      String fieldName = "" + TestUtil.nextInt(random(), 'a', 'z');
+      fieldTypes.setMultiValued(fieldName);
+      doc.addAtom(fieldName, "" + TestUtil.nextInt(random(), 'a', 'z'));
     }
     return doc;
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocInverterPerFieldErrorInfo.java b/lucene/core/src/test/org/apache/lucene/index/TestDocInverterPerFieldErrorInfo.java
index c426bb5..f652904 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocInverterPerFieldErrorInfo.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocInverterPerFieldErrorInfo.java
@@ -17,28 +17,25 @@
  * limitations under the License.
  */
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.PrintStreamInfoStream;
 import org.junit.Test;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-
 /**
  * Test adding to the info stream when there's an exception thrown during field analysis.
  */
 public class TestDocInverterPerFieldErrorInfo extends LuceneTestCase {
-  private static final FieldType storedTextType = new FieldType(TextField.TYPE_NOT_STORED);
 
   private static class BadNews extends RuntimeException {
     private BadNews(String message) {
@@ -67,15 +64,14 @@
   @Test
   public void testInfoStreamGetsFieldName() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter writer;
     IndexWriterConfig c = new IndexWriterConfig(new ThrowingAnalyzer());
     final ByteArrayOutputStream infoBytes = new ByteArrayOutputStream();
     PrintStream infoPrintStream = new PrintStream(infoBytes, true, IOUtils.UTF_8);
     PrintStreamInfoStream printStreamInfoStream = new PrintStreamInfoStream(infoPrintStream);
     c.setInfoStream(printStreamInfoStream);
-    writer = new IndexWriter(dir, c);
-    Document doc = new Document();
-    doc.add(newField("distinctiveFieldName", "aaa ", storedTextType));
+    IndexWriter writer = new IndexWriter(dir, c);
+    Document doc = writer.newDocument();
+    doc.addLargeText("distinctiveFieldName", "aaa ");
     try {
       writer.addDocument(doc);
       fail("Failed to fail.");
@@ -92,15 +88,14 @@
   @Test
   public void testNoExtraNoise() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter writer;
     IndexWriterConfig c = new IndexWriterConfig(new ThrowingAnalyzer());
     final ByteArrayOutputStream infoBytes = new ByteArrayOutputStream();
     PrintStream infoPrintStream = new PrintStream(infoBytes, true, IOUtils.UTF_8);
     PrintStreamInfoStream printStreamInfoStream = new PrintStreamInfoStream(infoPrintStream);
     c.setInfoStream(printStreamInfoStream);
-    writer = new IndexWriter(dir, c);
-    Document doc = new Document();
-    doc.add(newField("boringFieldName", "aaa ", storedTextType));
+    IndexWriter writer = new IndexWriter(dir, c);
+    Document doc = writer.newDocument();
+    doc.addLargeText("boringFieldName", "aaa ");
     try {
       writer.addDocument(doc);
     } catch(BadNews badNews) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValues.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValues.java
index d254815..1c84c4c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValues.java
@@ -17,14 +17,8 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -39,7 +33,7 @@
   public void testEmptyIndex() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
     
@@ -62,8 +56,10 @@
   public void testMisconfiguredField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setDocValuesType("foo", DocValuesType.NONE);
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
@@ -105,8 +101,8 @@
   public void testNumericField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 3));
+    Document doc = iw.newDocument();
+    doc.addInt("foo", 3);
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
@@ -141,8 +137,10 @@
   public void testBinaryField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("foo", new BytesRef("bar")));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    Document doc = iw.newDocument();
+    doc.addBinary("foo", new BytesRef("bar"));
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
@@ -180,8 +178,8 @@
   public void testSortedField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("bar")));
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", new BytesRef("bar"));
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
@@ -213,8 +211,10 @@
   public void testSortedSetField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("foo", new BytesRef("bar")));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("foo");
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", new BytesRef("bar"));
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
@@ -252,8 +252,11 @@
   public void testSortedNumericField() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("foo", 3));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("foo");
+
+    Document doc = iw.newDocument();
+    doc.addInt("foo", 3);
     iw.addDocument(doc);
     DirectoryReader dr = DirectoryReader.open(iw, true);
     LeafReader r = getOnlySegmentReader(dr);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
index a315212..00f0ed9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
@@ -23,17 +23,8 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -54,18 +45,18 @@
   public void testAddIndexes() throws IOException {
     Directory d1 = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d1);
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv", 1));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
+    doc.addInt("dv", 1);
     w.addDocument(doc);
     IndexReader r1 = w.getReader();
     w.close();
 
     Directory d2 = newDirectory();
     w = new RandomIndexWriter(random(), d2);
-    doc = new Document();
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv", 2));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "2");
+    doc.addInt("dv", 2);
     w.addDocument(doc);
     IndexReader r2 = w.getReader();
     w.close();
@@ -92,21 +83,20 @@
   public void testMultiValuedDocValuesField() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    Field f = new NumericDocValuesField("field", 17);
-    doc.add(f);
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
     
     // add the doc
     w.addDocument(doc);
     
     // Index doc values are single-valued so we should not
     // be able to add same field more than once:
-    doc.add(f);
     try {
-      w.addDocument(doc);
+      doc.addInt("field", 16);
       fail("didn't hit expected exception");
     } catch (IllegalArgumentException iae) {
       // expected
+      assertEquals("field=\"field\": this field is added more than once but is not multiValued", iae.getMessage());
     }
 
     DirectoryReader r = w.getReader();
@@ -119,17 +109,16 @@
   public void testDifferentTypedDocValuesField() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("field", 17));
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
     w.addDocument(doc);
     
     // Index doc values are single-valued so we should not
     // be able to add same field more than once:
-    doc.add(new BinaryDocValuesField("field", new BytesRef("blah")));
     try {
-      w.addDocument(doc);
+      doc.addBinary("field", new BytesRef("blah"));
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
 
@@ -143,17 +132,16 @@
   public void testDifferentTypedDocValuesField2() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("field", 17));
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
     w.addDocument(doc);
     
     // Index doc values are single-valued so we should not
     // be able to add same field more than once:
-    doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
     try {
-      w.addDocument(doc);
+      doc.addAtom("field", new BytesRef("hello"));
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     DirectoryReader r = w.getReader();
@@ -167,12 +155,15 @@
   public void testLengthPrefixAcrossTwoPages() throws Exception {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("field", IndexOptions.NONE);
+
+    Document doc = w.newDocument();
     byte[] bytes = new byte[32764];
     BytesRef b = new BytesRef();
     b.bytes = bytes;
     b.length = bytes.length;
-    doc.add(new SortedDocValuesField("field", b));
+    doc.addAtom("field", b);
     w.addDocument(doc);
     bytes[0] = 1;
     w.addDocument(doc);
@@ -200,9 +191,9 @@
     iwconfig.setMergePolicy(newLogMergePolicy());
     IndexWriter writer = new IndexWriter(dir, iwconfig);
     for (int i = 0; i < 50; i++) {
-      Document doc = new Document();
-      doc.add(new NumericDocValuesField("dv", i));
-      doc.add(new TextField("docId", "" + i, Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addInt("dv", i);
+      doc.addLargeText("docId", "" + i);
       writer.addDocument(doc);
     }
     DirectoryReader r = writer.getReader();
@@ -213,10 +204,9 @@
     NumericDocValues dv = slow.getNumericDocValues("dv");
     for (int i = 0; i < 50; i++) {
       assertEquals(i, dv.get(i));
-      StoredDocument d = slow.document(i);
+      Document d = slow.document(i);
       // cannot use d.get("dv") due to another bug!
-      assertNull(d.getField("dv"));
-      assertEquals(Integer.toString(i), d.get("docId"));
+      assertEquals(Integer.toString(i), d.getString("docId"));
     }
     slow.close();
     writer.close();
@@ -227,15 +217,14 @@
   public void testMixedTypesSameDocument() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     try {
-      w.addDocument(doc);
+      doc.addAtom("foo", new BytesRef("hello"));
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     IndexReader ir = w.getReader();
@@ -249,16 +238,15 @@
   public void testMixedTypesDifferentDocuments() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
+    doc = w.newDocument();
     try {
-      w.addDocument(doc);
+      doc.addAtom("foo", new BytesRef("hello"));
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     IndexReader ir = w.getReader();
@@ -276,20 +264,16 @@
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("dv", new BytesRef("foo!"));
     iwriter.addDocument(doc);
     
-    doc.add(new SortedDocValuesField("dv", new BytesRef("bar!")));
     try {
-      iwriter.addDocument(doc);
+      doc.addAtom("dv", new BytesRef("bar!"));
       fail("didn't hit expected exception");
     } catch (IllegalArgumentException expected) {
       // expected
-      if (VERBOSE) {
-        System.out.println("hit exc:");
-        expected.printStackTrace(System.out);
-      }
+      assertEquals("field=\"dv\": this field is added more than once but is not multiValued", expected.getMessage());
     }
     IndexReader ir = iwriter.getReader();
     assertEquals(1, ir.numDocs());
@@ -306,16 +290,16 @@
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("foo!"));
     iwriter.addDocument(doc);
     
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("bar!")));
     try {
-      iwriter.addDocument(doc);
+      doc.addBinary("dv", new BytesRef("bar!"));
       fail("didn't hit expected exception");
     } catch (IllegalArgumentException expected) {
       // expected
+      assertEquals("field=\"dv\": this field is added more than once but is not multiValued", expected.getMessage());
     }
     
     IndexReader ir = iwriter.getReader();
@@ -334,16 +318,16 @@
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 1));
+    Document doc = iwriter.newDocument();
+    doc.addInt("dv", 1);
     iwriter.addDocument(doc);
     
-    doc.add(new NumericDocValuesField("dv", 2));
     try {
-      iwriter.addDocument(doc);
+      doc.addInt("dv", 2);
       fail("didn't hit expected exception");
     } catch (IllegalArgumentException expected) {
-      // expected
+      assertEquals("field=\"dv\": this field is added more than once but is not multiValued", expected.getMessage());
+      // EXPECTED
     }
     IndexReader ir = iwriter.getReader();
     assertEquals(1, ir.numDocs());
@@ -360,15 +344,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("just fine")));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("dv", new BytesRef("just fine"));
     iwriter.addDocument(doc);
     
-    doc = new Document();
+    doc = iwriter.newDocument();
     byte bytes[] = new byte[100000];
     BytesRef b = new BytesRef(bytes);
     random().nextBytes(bytes);
-    doc.add(new SortedDocValuesField("dv", b));
+    doc.addAtom("dv", b);
     try {
       iwriter.addDocument(doc);
       fail("did not get expected exception");
@@ -390,15 +374,17 @@
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("dv", new BytesRef("just fine")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    Document doc = iwriter.newDocument();
+    fieldTypes.setMultiValued("dv");
+    doc.addAtom("dv", new BytesRef("just fine"));
     iwriter.addDocument(doc);
     
-    doc = new Document();
+    doc = iwriter.newDocument();
     byte bytes[] = new byte[100000];
     BytesRef b = new BytesRef(bytes);
     random().nextBytes(bytes);
-    doc.add(new SortedSetDocValuesField("dv", b));
+    doc.addAtom("dv", b);
     try {
       iwriter.addDocument(doc);
       fail("did not get expected exception");
@@ -416,17 +402,16 @@
   public void testMixedTypesDifferentSegments() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     w.addDocument(doc);
     w.commit();
 
-    doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
+    doc = w.newDocument();
     try {
-      w.addDocument(doc);
+      doc.addAtom("foo", new BytesRef("hello"));
       fail("did not get expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     w.close();
@@ -437,13 +422,13 @@
   public void testMixedTypesAfterDeleteAll() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     w.addDocument(doc);
     w.deleteAll();
 
-    doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
+    doc = w.newDocument();
+    doc.addAtom("foo", new BytesRef("hello"));
     w.addDocument(doc);
     w.close();
     dir.close();
@@ -453,95 +438,20 @@
   public void testMixedTypesAfterReopenCreate() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     w.addDocument(doc);
     w.close();
 
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
     w = new IndexWriter(dir, iwc);
-    doc = new Document();
+    doc = w.newDocument();
     w.addDocument(doc);
     w.close();
     dir.close();
   }
 
-  public void testMixedTypesAfterReopenAppend1() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
-    w.addDocument(doc);
-    w.close();
-
-    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
-    try {
-      w.addDocument(doc);
-      fail("did not get expected exception");
-    } catch (IllegalArgumentException iae) {
-      // expected
-    }
-    w.close();
-    dir.close();
-  }
-
-  public void testMixedTypesAfterReopenAppend2() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))) ;
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("foo", new BytesRef("foo")));
-    w.addDocument(doc);
-    w.close();
-
-    doc = new Document();
-    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
-    doc.add(new BinaryDocValuesField("foo", new BytesRef("foo")));
-    try {
-      // NOTE: this case follows a different code path inside
-      // DefaultIndexingChain/FieldInfos, because the field (foo)
-      // is first added without DocValues:
-      w.addDocument(doc);
-      fail("did not get expected exception");
-    } catch (IllegalArgumentException iae) {
-      // expected
-    }
-    w.forceMerge(1);
-    w.close();
-    dir.close();
-  }
-
-  public void testMixedTypesAfterReopenAppend3() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))) ;
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("foo", new BytesRef("foo")));
-    w.addDocument(doc);
-    w.close();
-
-    doc = new Document();
-    w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
-    doc.add(new BinaryDocValuesField("foo", new BytesRef("foo")));
-    try {
-      // NOTE: this case follows a different code path inside
-      // DefaultIndexingChain/FieldInfos, because the field (foo)
-      // is first added without DocValues:
-      w.addDocument(doc);
-      fail("did not get expected exception");
-    } catch (IllegalArgumentException iae) {
-      // expected
-    }
-    // Also add another document so there is a segment to write here:
-    w.addDocument(new Document());
-    w.forceMerge(1);
-    w.close();
-    dir.close();
-  }
-
   // Two documents with same field as different types, added
   // from separate threads:
   public void testMixedTypesDifferentThreads() throws Exception {
@@ -552,24 +462,21 @@
     final AtomicBoolean hitExc = new AtomicBoolean();
     Thread[] threads = new Thread[3];
     for(int i=0;i<3;i++) {
-      Field field;
-      if (i == 0) {
-        field = new SortedDocValuesField("foo", new BytesRef("hello"));
-      } else if (i == 1) {
-        field = new NumericDocValuesField("foo", 0);
-      } else {
-        field = new BinaryDocValuesField("foo", new BytesRef("bazz"));
-      }
-      final Document doc = new Document();
-      doc.add(field);
-
+      final int what = i;
       threads[i] = new Thread() {
           @Override
           public void run() {
             try {
               startingGun.await();
-              w.addDocument(doc);
-            } catch (IllegalArgumentException iae) {
+              Document doc = w.newDocument();
+              if (what == 0) {
+                doc.addAtom("foo", new BytesRef("hello"));
+              } else if (what == 1) {
+                doc.addInt("foo", 0);
+              } else {
+                doc.addAtom("foo", new BytesRef("bazz"));
+              }
+            } catch (IllegalStateException ise) {
               // expected
               hitExc.set(true);
             } catch (Exception e) {
@@ -594,22 +501,22 @@
   public void testMixedTypesViaAddIndexes() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("foo", 0));
+    Document doc = w.newDocument();
+    doc.addInt("foo", 0);
     w.addDocument(doc);
 
     // Make 2nd index w/ inconsistent field
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random())));
-    doc = new Document();
-    doc.add(new SortedDocValuesField("foo", new BytesRef("hello")));
+    doc = w2.newDocument();
+    doc.addAtom("foo", new BytesRef("hello"));
     w2.addDocument(doc);
     w2.close();
 
     try {
       w.addIndexes(new Directory[] {dir2});
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException iae) {
       // expected
     }
 
@@ -617,7 +524,7 @@
     try {
       TestUtil.addIndexesSlowly(w, r);
       fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException iae) {
       // expected
     }
 
@@ -631,15 +538,14 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
     try {
-      writer.addDocument(doc);
+      doc.addAtom("dv", new BytesRef("foo"));
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     IndexReader ir = writer.getReader();
@@ -653,19 +559,18 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir, conf);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
     try {
-      writer.addDocument(doc);
+      doc.addAtom("dv", new BytesRef("foo"));
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     writer.close();
@@ -676,16 +581,16 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir, conf);
     writer.deleteAll();
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
+    doc.addAtom("dv", new BytesRef("foo"));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -695,12 +600,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.deleteAll();
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
+    doc.addAtom("dv", new BytesRef("foo"));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -710,13 +615,13 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.commit();
     writer.deleteAll();
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
+    doc.addAtom("dv", new BytesRef("foo"));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -726,15 +631,15 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
     writer = new IndexWriter(dir, conf);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
+    doc.addAtom("dv", new BytesRef("foo"));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -744,21 +649,20 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
     Directory dir2 = newDirectory();
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir2, conf);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
-    writer.addDocument(doc);
+    doc = writer.newDocument();
     try {
+      doc.addAtom("dv", new BytesRef("foo"));
       writer.addIndexes(dir);
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     writer.close();
@@ -771,22 +675,22 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
     Directory dir2 = newDirectory();
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir2, conf);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
+    doc.addAtom("dv", new BytesRef("foo"));
     writer.addDocument(doc);
     DirectoryReader reader = DirectoryReader.open(dir);
     try {
       TestUtil.addIndexesSlowly(writer, reader);
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     reader.close();
@@ -800,8 +704,8 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
@@ -809,12 +713,11 @@
     conf = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir2, conf);
     writer.addIndexes(dir);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
     try {
-      writer.addDocument(doc);
+      doc.addAtom("dv", new BytesRef("foo"));
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     writer.close();
@@ -826,8 +729,8 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     writer.close();
 
@@ -837,12 +740,11 @@
     DirectoryReader reader = DirectoryReader.open(dir);
     TestUtil.addIndexesSlowly(writer, reader);
     reader.close();
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
+    doc = writer.newDocument();
     try {
-      writer.addDocument(doc);
+      doc.addAtom("dv", new BytesRef("foo"));
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // expected
     }
     writer.close();
@@ -854,13 +756,13 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 0L));
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new TextField("dv", "some text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("dv", 0L));
+    doc = writer.newDocument();
+    doc.addLargeText("text", "some text");
+    doc.addLong("dv", 0L);
     writer.addDocument(doc);
     
     DirectoryReader r = writer.getReader();
@@ -875,58 +777,4 @@
     r.close();
     dir.close();
   }
-
-  public void testSameFieldNameForPostingAndDocValue() throws Exception {
-    // LUCENE-5192: FieldInfos.Builder neglected to update
-    // globalFieldNumbers.docValuesType map if the field existed, resulting in
-    // potentially adding the same field with different DV types.
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
-    IndexWriter writer = new IndexWriter(dir, conf);
-    
-    Document doc = new Document();
-    doc.add(new StringField("f", "mock-value", Store.NO));
-    doc.add(new NumericDocValuesField("f", 5));
-    writer.addDocument(doc);
-    writer.commit();
-    
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("f", new BytesRef("mock")));
-    try {
-      writer.addDocument(doc);
-      fail("should not have succeeded to add a field with different DV type than what already exists");
-    } catch (IllegalArgumentException e) {
-      writer.rollback();
-    }
-    
-    dir.close();
-  }
-
-  // LUCENE-6049
-  public void testExcIndexingDocBeforeDocValues() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
-    IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setDocValuesType(DocValuesType.SORTED);
-    ft.freeze();
-    Field field = new Field("test", "value", ft);
-    field.setTokenStream(new TokenStream() {
-        @Override
-        public boolean incrementToken() {
-          throw new RuntimeException("no");
-        }
-      });
-    doc.add(field);
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (RuntimeException re) {
-      // expected
-    }
-    w.addDocument(new Document());
-    w.close();
-    dir.close();
-  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
index 2c185cb..53dc43b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
@@ -20,11 +20,8 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
@@ -47,14 +44,16 @@
   public void testPositionsSimple() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random())));
+                                                     newIndexWriterConfig());
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableNorms(fieldName);
+    fieldTypes.disableStored(fieldName);
+
     for (int i = 0; i < 39; i++) {
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setOmitNorms(true);
-      doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 "
+      Document doc = writer.newDocument();
+      doc.addLargeText(fieldName, "1 2 3 4 5 6 7 8 9 10 "
           + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 "
-          + "1 2 3 4 5 6 7 8 9 10", customType));
+          + "1 2 3 4 5 6 7 8 9 10");
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
@@ -111,16 +110,16 @@
   public void testRandomPositions() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-          .setMergePolicy(newLogMergePolicy()));
+                                                     newIndexWriterConfig().setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
     int numDocs = atLeast(47);
     int max = 1051;
     int term = random().nextInt(max);
     Integer[][] positionsInDoc = new Integer[numDocs][];
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
+    fieldTypes.disableNorms(fieldName);
+    fieldTypes.disableStored(fieldName);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       ArrayList<Integer> positions = new ArrayList<>();
       StringBuilder builder = new StringBuilder();
       int num = atLeast(131);
@@ -135,7 +134,7 @@
         builder.append(term);
         positions.add(num);
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       positionsInDoc[i] = positions.toArray(new Integer[0]);
       writer.addDocument(doc);
     }
@@ -195,16 +194,17 @@
   public void testRandomDocs() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-                                                     newIndexWriterConfig(new MockAnalyzer(random()))
-                                                       .setMergePolicy(newLogMergePolicy()));
+                                                     newIndexWriterConfig().setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableNorms(fieldName);
+    fieldTypes.disableStored(fieldName);
+
     int numDocs = atLeast(49);
     int max = 15678;
     int term = random().nextInt(max);
     int[] freqInDoc = new int[numDocs];
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = new Document(fieldTypes);
       StringBuilder builder = new StringBuilder();
       for (int j = 0; j < 199; j++) {
         int nextInt = random().nextInt(max);
@@ -213,7 +213,7 @@
           freqInDoc[i]++;
         }
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       writer.addDocument(doc);
     }
 
@@ -275,12 +275,13 @@
   public void testLargeNumberOfPositions() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-        newIndexWriterConfig(new MockAnalyzer(random())));
+                                                     newIndexWriterConfig());
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableNorms(fieldName);
+    fieldTypes.disableStored(fieldName);
     int howMany = 1000;
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
     for (int i = 0; i < 39; i++) {
-      Document doc = new Document();
+      Document doc = new Document(fieldTypes);
       StringBuilder builder = new StringBuilder();
       for (int j = 0; j < howMany; j++) {
         if (j % 2 == 0) {
@@ -289,7 +290,7 @@
           builder.append("odd ");
         }
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       writer.addDocument(doc);
     }
 
@@ -330,9 +331,11 @@
   
   public void testDocsEnumStart() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("foo", "bar", Field.Store.NO));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableStored("foo");
+    Document doc = new Document(fieldTypes);
+    doc.addAtom("foo", "bar");
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     LeafReader r = getOnlySegmentReader(reader);
@@ -355,9 +358,11 @@
   
   public void testDocsAndPositionsEnumStart() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("foo", "bar", Field.Store.NO));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableStored("foo");
+    Document doc = new Document(fieldTypes);
+    doc.addLargeText("foo", "bar");
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     LeafReader r = getOnlySegmentReader(reader);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index 37dc798..949e5cc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -18,15 +18,14 @@
  */
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.AttributeSource;
@@ -54,41 +53,37 @@
   }
 
   public void testAddDocument() throws Exception {
-    Document testDoc = new Document();
-    DocHelper.setupDoc(testDoc);
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(testDoc);
-    writer.commit();
-    SegmentCommitInfo info = writer.newestSegment();
-    writer.close();
+    SegmentCommitInfo info = DocHelper.writeDoc(random(), dir);
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(dir, null);
+
     //After adding the document, we should be able to read it back in
-    SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(fieldTypes, info, newIOContext(random()));
     assertTrue(reader != null);
-    StoredDocument doc = reader.document(0);
+    Document doc = reader.document(0);
     assertTrue(doc != null);
 
     //System.out.println("Document: " + doc);
-    StorableField[] fields = doc.getFields("textField2");
-    assertTrue(fields != null && fields.length == 1);
-    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
-    assertTrue(fields[0].fieldType().storeTermVectors());
+    List<IndexableField> fields = doc.getFields("textField2");
+    assertTrue(fields != null && fields.size() == 1);
+    assertTrue(fields.get(0).stringValue().equals(DocHelper.FIELD_2_TEXT));
+    assertTrue(fields.get(0).fieldType().storeTermVectors());
 
     fields = doc.getFields("textField1");
-    assertTrue(fields != null && fields.length == 1);
-    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
-    assertFalse(fields[0].fieldType().storeTermVectors());
+    assertTrue(fields != null && fields.size() == 1);
+    assertTrue(fields.get(0).stringValue().equals(DocHelper.FIELD_1_TEXT));
+    assertFalse(fields.get(0).fieldType().storeTermVectors());
 
     fields = doc.getFields("keyField");
-    assertTrue(fields != null && fields.length == 1);
-    assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
+    assertTrue(fields != null && fields.size() == 1);
+    assertTrue(fields.get(0).stringValue().equals(DocHelper.KEYWORD_TEXT));
 
     fields = doc.getFields(DocHelper.NO_NORMS_KEY);
-    assertTrue(fields != null && fields.length == 1);
-    assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
+    assertTrue(fields != null && fields.size() == 1);
+    assertTrue(fields.get(0).stringValue().equals(DocHelper.NO_NORMS_TEXT));
 
     fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
-    assertTrue(fields != null && fields.length == 1);
-    assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
+    assertTrue(fields != null && fields.size() == 1);
+    assertTrue(fields.get(0).stringValue().equals(DocHelper.FIELD_3_TEXT));
 
     // test that the norms are not present in the segment if
     // omitNorms is true
@@ -101,29 +96,23 @@
   }
 
   public void testPositionIncrementGap() throws IOException {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      public TokenStreamComponents createComponents(String fieldName) {
-        return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, false));
-      }
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = writer.getFieldTypes();
 
-      @Override
-      public int getPositionIncrementGap(String fieldName) {
-        return 500;
-      }
-    };
-
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-
-    Document doc = new Document();
-    doc.add(newTextField("repeated", "repeated one", Field.Store.YES));
-    doc.add(newTextField("repeated", "repeated two", Field.Store.YES));
+    fieldTypes.setMultiValued("repeated");
+    fieldTypes.setIndexOptions("repeated", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    fieldTypes.setAnalyzerPositionGap("repeated", 500);
+    Document doc = writer.newDocument();
+    doc.addLargeText("repeated", "repeated one");
+    doc.addLargeText("repeated", "repeated two");
 
     writer.addDocument(doc);
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+    
+    SegmentReader reader = new SegmentReader(FieldTypes.getFieldTypes(dir, new MockAnalyzer(random())),
+                                             info, newIOContext(random()));
 
     DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
                                                                           "repeated", new BytesRef("repeated"));
@@ -188,14 +177,15 @@
 
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer));
 
-    Document doc = new Document();
-    doc.add(newTextField("f1", "a 5 a a", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("f1", "a 5 a a");
 
     writer.addDocument(doc);
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(FieldTypes.getFieldTypes(dir, analyzer),
+                                             info, newIOContext(random()));
 
     DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -213,9 +203,9 @@
 
   public void testPreAnalyzedField() throws IOException {
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
+    Document doc = writer.newDocument();
 
-    doc.add(new TextField("preanalyzed", new TokenStream() {
+    doc.addLargeText("preanalyzed", new TokenStream() {
       private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
       private int index = 0;
       
@@ -231,13 +221,14 @@
           return true;
         }        
       }
-      }));
+      });
     
     writer.addDocument(doc);
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(FieldTypes.getFieldTypes(dir, null),
+                                             info, newIOContext(random()));
 
     DocsAndPositionsEnum termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term1"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -256,43 +247,4 @@
     assertEquals(2, termPositions.nextPosition());
     reader.close();
   }
-
-  /**
-   * Test adding two fields with the same name, one indexed
-   * the other stored only. The omitNorms and omitTermFreqAndPositions setting
-   * of the stored field should not affect the indexed one (LUCENE-1590)
-   */
-  public void testLUCENE_1590() throws Exception {
-    Document doc = new Document();
-    // f1 has no norms
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
-    doc.add(newField("f1", "v1", customType));
-    doc.add(newField("f1", "v2", customType2));
-    // f2 has no TF
-    FieldType customType3 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType3.setIndexOptions(IndexOptions.DOCS);
-    Field f = newField("f2", "v1", customType3);
-    doc.add(f);
-    doc.add(newField("f2", "v2", customType2));
-
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(doc);
-    writer.forceMerge(1); // be sure to have a single segment
-    writer.close();
-
-    TestUtil.checkIndex(dir);
-
-    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(dir));
-    FieldInfos fi = reader.getFieldInfos();
-    // f1
-    assertFalse("f1 should have no norms", fi.fieldInfo("f1").hasNorms());
-    assertEquals("omitTermFreqAndPositions field bit should not be set for f1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f1").getIndexOptions());
-    // f2
-    assertTrue("f2 should have norms", fi.fieldInfo("f2").hasNorms());
-    assertEquals("omitTermFreqAndPositions field bit should be set for f2", IndexOptions.DOCS, fi.fieldInfo("f2").getIndexOptions());
-    reader.close();
-  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
index 521dd90..2e89c3e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
@@ -23,9 +23,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
@@ -108,30 +106,30 @@
   public static void createRandomIndex(int numdocs, RandomIndexWriter writer, long seed) throws IOException {
     Random random = new Random(seed);
     // primary source for our data is from linefiledocs, it's realistic.
-    LineFileDocs lineFileDocs = new LineFileDocs(random);
+    LineFileDocs lineFileDocs = new LineFileDocs(writer.w, random);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("sortedset");
+    fieldTypes.setMultiValued("sparsesortednum");
 
     // TODO: we should add other fields that use things like docs&freqs but omit positions,
     // because linefiledocs doesn't cover all the possibilities.
     for (int i = 0; i < numdocs; i++) {
       Document document = lineFileDocs.nextDoc();
       // grab the title and add some SortedSet instances for fun
-      String title = document.get("titleTokenized");
+      String title = document.getString("titleTokenized");
       String split[] = title.split("\\s+");
-      document.removeFields("sortedset");
       for (String trash : split) {
-        document.add(new SortedSetDocValuesField("sortedset", new BytesRef(trash)));
+        document.addAtom("sortedset", new BytesRef(trash));
       }
       // add a numeric dv field sometimes
-      document.removeFields("sparsenumeric");
       if (random.nextInt(4) == 2) {
-        document.add(new NumericDocValuesField("sparsenumeric", random.nextInt()));
+        document.addInt("sparsenumeric", random.nextInt());
       }
       // add sortednumeric sometimes
-      document.removeFields("sparsesortednum");
       if (random.nextInt(5) == 1) {
-        document.add(new SortedNumericDocValuesField("sparsesortednum", random.nextLong()));
+        document.addLong("sparsesortednum", random.nextLong());
         if (random.nextBoolean()) {
-          document.add(new SortedNumericDocValuesField("sparsesortednum", random.nextLong()));
+          document.addLong("sparsesortednum", random.nextLong());
         }
       }
       writer.addDocument(document);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestExceedMaxTermLength.java b/lucene/core/src/test/org/apache/lucene/index/TestExceedMaxTermLength.java
index 256b665..b2de9d4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestExceedMaxTermLength.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestExceedMaxTermLength.java
@@ -21,8 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -56,32 +55,23 @@
     
     IndexWriter w = new IndexWriter
       (dir, newIndexWriterConfig(random(), new MockAnalyzer(random())));
+    FieldTypes fieldTypes = w.getFieldTypes();
     try {
-      final FieldType ft = new FieldType();
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-      ft.setStored(random().nextBoolean());
-      ft.freeze();
-      
-      final Document doc = new Document();
+      final Document doc = w.newDocument();
       if (random().nextBoolean()) {
         // totally ok short field value
-        doc.add(new Field(TestUtil.randomSimpleString(random(), 1, 10),
-                          TestUtil.randomSimpleString(random(), 1, 10),
-                          ft));
+        doc.addLargeText(TestUtil.randomSimpleString(random(), 1, 10), TestUtil.randomSimpleString(random(), 1, 10));
       }
       // problematic field
       final String name = TestUtil.randomSimpleString(random(), 1, 50);
       final String value = TestUtil.randomSimpleString(random(),
                                                        minTestTermLength,
                                                        maxTestTermLegnth);
-      final Field f = new Field(name, value, ft);
       if (random().nextBoolean()) {
         // totally ok short field value
-        doc.add(new Field(TestUtil.randomSimpleString(random(), 1, 10),
-                          TestUtil.randomSimpleString(random(), 1, 10),
-                          ft));
+        doc.addLargeText(TestUtil.randomSimpleString(random(), 1, 10), TestUtil.randomSimpleString(random(), 1, 10));
       }
-      doc.add(f);
+      doc.addLargeText(name, value);
       
       try {
         w.addDocument(doc);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
index 4fc541a..9e2e85f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
@@ -17,9 +17,10 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.ExitableDirectoryReader.ExitingReaderException;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PrefixQuery;
@@ -28,8 +29,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-
 /**
  * Test that uses a default/lucene Implementation of {@link QueryTimeout}
  * to exit out long running queries that take too long to iterate over Terms.
@@ -96,16 +95,16 @@
     Directory directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
 
-    Document d1 = new Document();
-    d1.add(newTextField("default", "one two", Field.Store.YES));
+    Document d1 = writer.newDocument();
+    d1.addLargeText("default", "one two");
     writer.addDocument(d1);
 
-    Document d2 = new Document();
-    d2.add(newTextField("default", "one three", Field.Store.YES));
+    Document d2 = writer.newDocument();
+    d2.addLargeText("default", "one three");
     writer.addDocument(d2);
 
-    Document d3 = new Document();
-    d3.add(newTextField("default", "ones two four", Field.Store.YES));
+    Document d3 = writer.newDocument();
+    d3.addLargeText("default", "ones two four");
     writer.addDocument(d3);
 
     writer.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
deleted file mode 100644
index 68cd6a8..0000000
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
+++ /dev/null
@@ -1,178 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.Collections;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.NumericUtils;
-
-/** test tokenstream reuse by DefaultIndexingChain */
-public class TestFieldReuse extends BaseTokenStreamTestCase {
-  
-  public void testStringField() throws IOException {
-    StringField stringField = new StringField("foo", "bar", Field.Store.NO);
-    
-    // passing null
-    TokenStream ts = stringField.tokenStream(null, null);
-    assertTokenStreamContents(ts, 
-        new String[] { "bar" },
-        new int[]    { 0 },
-        new int[]    { 3 }
-    );
-    
-    // now reuse previous stream
-    stringField = new StringField("foo", "baz", Field.Store.NO);
-    TokenStream ts2 = stringField.tokenStream(null, ts);
-    assertSame(ts, ts);
-    assertTokenStreamContents(ts, 
-        new String[] { "baz" },
-        new int[]    { 0 },
-        new int[]    { 3 }
-    );
-    
-    // pass a bogus stream and ensure it's still ok
-    stringField = new StringField("foo", "beer", Field.Store.NO);
-    TokenStream bogus = new NumericTokenStream();
-    ts = stringField.tokenStream(null, bogus);
-    assertNotSame(ts, bogus);
-    assertTokenStreamContents(ts, 
-        new String[] { "beer" },
-        new int[]    { 0 },
-        new int[]    { 4 }
-    );
-  }
-  
-  public void testNumericReuse() throws IOException {
-    IntField intField = new IntField("foo", 5, Field.Store.NO);
-    
-    // passing null
-    TokenStream ts = intField.tokenStream(null, null);
-    assertTrue(ts instanceof NumericTokenStream);
-    assertEquals(NumericUtils.PRECISION_STEP_DEFAULT_32, ((NumericTokenStream)ts).getPrecisionStep());
-    assertNumericContents(5, ts);
-
-    // now reuse previous stream
-    intField = new IntField("foo", 20, Field.Store.NO);
-    TokenStream ts2 = intField.tokenStream(null, ts);
-    assertSame(ts, ts2);
-    assertNumericContents(20, ts);
-    
-    // pass a bogus stream and ensure it's still ok
-    intField = new IntField("foo", 2343, Field.Store.NO);
-    TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
-    ts = intField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(2343, ts);
-    
-    // pass another bogus stream (numeric, but different precision step!)
-    intField = new IntField("foo", 42, Field.Store.NO);
-    assert 3 != NumericUtils.PRECISION_STEP_DEFAULT;
-    bogus = new NumericTokenStream(3);
-    ts = intField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(42, ts);
-  }
-  
-  static class MyField implements IndexableField {
-    TokenStream lastSeen;
-    TokenStream lastReturned;
-    
-    @Override
-    public String name() {
-      return "foo";
-    }
-    
-    @Override
-    public IndexableFieldType fieldType() {
-      return StringField.TYPE_NOT_STORED;
-    }
-    
-    @Override
-    public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
-      lastSeen = reuse;
-      return lastReturned = new CannedTokenStream(new Token("unimportant", 0, 10));
-    }
-    
-    @Override
-    public float boost() {
-      return 1;
-    } 
-  }
-  
-  public void testIndexWriterActuallyReuses() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriterConfig iwc = new IndexWriterConfig(null);
-    IndexWriter iw = new IndexWriter(dir, iwc);
-    final MyField field1 = new MyField();
-    iw.addDocument(new IndexDocument() {
-      @Override
-      public Iterable<? extends IndexableField> indexableFields() {
-        return Collections.singletonList(field1);
-      }
-      @Override
-      public Iterable<StorableField> storableFields() {
-        return Collections.emptyList();
-      }
-    });
-    TokenStream previous = field1.lastReturned;
-    assertNotNull(previous);
-    
-    final MyField field2 = new MyField();
-    iw.addDocument(new IndexDocument() {
-      @Override
-      public Iterable<? extends IndexableField> indexableFields() {
-        return Collections.singletonList(field2);
-      }
-      @Override
-      public Iterable<StorableField> storableFields() {
-        return Collections.emptyList();
-      }
-    });
-    assertSame(previous, field2.lastSeen);
-    iw.close();
-    dir.close();
-  }
-  
-  private void assertNumericContents(int value, TokenStream ts) throws IOException {
-    assertTrue(ts instanceof NumericTokenStream);
-    NumericTermAttribute numericAtt = ts.getAttribute(NumericTermAttribute.class);
-    ts.reset();
-    boolean seen = false;
-    while (ts.incrementToken()) {
-      if (numericAtt.getShift() == 0) {
-        assertEquals(value, numericAtt.getRawValue());
-        seen = true;
-      }
-    }
-    ts.end();
-    ts.close();
-    assertTrue(seen);
-  }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
index 1057e4f..6441d17 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
@@ -25,8 +25,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DocumentStoredFieldVisitor;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FilterDirectory;
@@ -39,70 +38,49 @@
 
 public class TestFieldsReader extends LuceneTestCase {
   private static Directory dir;
-  private static Document testDoc;
-  private static FieldInfos.Builder fieldInfos = null;
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-    testDoc = new Document();
-    fieldInfos = new FieldInfos.Builder();
-    DocHelper.setupDoc(testDoc);
-    for (IndexableField field : testDoc.getFields()) {
-      FieldInfo fieldInfo = fieldInfos.getOrAdd(field.name());
-      IndexableFieldType ift = field.fieldType();
-      fieldInfo.setIndexOptions(ift.indexOptions());
-      if (ift.omitNorms()) {
-        fieldInfo.setOmitsNorms();
-      }
-      fieldInfo.setDocValuesType(ift.docValuesType());
-    }
     dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()))
-                               .setMergePolicy(newLogMergePolicy());
-    conf.getMergePolicy().setNoCFSRatio(0.0);
-    IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(testDoc);
-    writer.close();
+    DocHelper.writeDoc(random(), dir);
   }
 
   @AfterClass
   public static void afterClass() throws Exception {
     dir.close();
     dir = null;
-    fieldInfos = null;
-    testDoc = null;
   }
 
   public void test() throws IOException {
     assertTrue(dir != null);
-    assertTrue(fieldInfos != null);
     IndexReader reader = DirectoryReader.open(dir);
-    StoredDocument doc = reader.document(0);
+    Document doc = reader.document(0);
     assertTrue(doc != null);
     assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
 
-    Field field = (Field) doc.getField(DocHelper.TEXT_FIELD_2_KEY);
+    IndexableField field = doc.getField(DocHelper.TEXT_FIELD_2_KEY);
     assertTrue(field != null);
     assertTrue(field.fieldType().storeTermVectors());
 
     assertFalse(field.fieldType().omitNorms());
-    assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, field.fieldType().indexOptions());
 
-    field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY);
+    field = doc.getField(DocHelper.TEXT_FIELD_3_KEY);
     assertTrue(field != null);
     assertFalse(field.fieldType().storeTermVectors());
     assertTrue(field.fieldType().omitNorms());
-    assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, field.fieldType().indexOptions());
 
-    field = (Field) doc.getField(DocHelper.NO_TF_KEY);
+    field = doc.getField(DocHelper.NO_TF_KEY);
     assertTrue(field != null);
     assertFalse(field.fieldType().storeTermVectors());
     assertFalse(field.fieldType().omitNorms());
     assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS);
 
-    DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(dir, null);
+    DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldTypes, DocHelper.TEXT_FIELD_3_KEY);
     reader.document(0, visitor);
-    final List<StorableField> fields = visitor.getDocument().getFields();
+    final List<IndexableField> fields = visitor.getDocument().getFields();
     assertEquals(1, fields.size());
     assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name());
     reader.close();
@@ -191,11 +169,11 @@
     try {
       Directory fsDir = newFSDirectory(indexDir);
       FaultyFSDirectory dir = new FaultyFSDirectory(fsDir);
-      IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
-                                .setOpenMode(OpenMode.CREATE);
+      for(int i=0;i<2;i++) {
+        DocHelper.writeDoc(random(), dir);
+      }
+      IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
       IndexWriter writer = new IndexWriter(dir, iwc);
-      for(int i=0;i<2;i++)
-        writer.addDocument(testDoc);
       writer.forceMerge(1);
       writer.close();
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
index 1dc07e5..15b03d0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
@@ -24,7 +24,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
@@ -118,16 +117,16 @@
 
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
 
-    Document d1 = new Document();
-    d1.add(newTextField("default", "one two", Field.Store.YES));
+    Document d1 = writer.newDocument();
+    d1.addLargeText("default", "one two");
     writer.addDocument(d1);
 
-    Document d2 = new Document();
-    d2.add(newTextField("default", "one three", Field.Store.YES));
+    Document d2 = writer.newDocument();
+    d2.addLargeText("default", "one three");
     writer.addDocument(d2);
 
-    Document d3 = new Document();
-    d3.add(newTextField("default", "two four", Field.Store.YES));
+    Document d3 = writer.newDocument();
+    d3.addLargeText("default", "two four");
     writer.addDocument(d3);
 
     writer.close();
@@ -195,7 +194,7 @@
   public void testUnwrap() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     DirectoryReader dr = w.getReader();
     LeafReader r = dr.leaves().get(0).reader();
     FilterLeafReader r2 = new FilterLeafReader(r);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
index b70c82b..559f8c7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
@@ -38,11 +38,11 @@
 
     for(int iter=0;iter<2;iter++) {
       if (iter == 0) {
-        Document doc = new Document();
-        doc.add(newTextField("field1", "this is field1", Field.Store.NO));
-        doc.add(newTextField("field2", "this is field2", Field.Store.NO));
-        doc.add(newTextField("field3", "aaa", Field.Store.NO));
-        doc.add(newTextField("field4", "bbb", Field.Store.NO));
+        Document doc = w.newDocument();
+        doc.addLargeText("field1", "this is field1");
+        doc.addLargeText("field2", "this is field2");
+        doc.addLargeText("field3", "aaa");
+        doc.addLargeText("field4", "bbb");
         for(int i=0;i<DOC_COUNT;i++) {
           w.addDocument(doc);
         }
@@ -65,8 +65,8 @@
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random()))
                                          .setCodec(TestUtil.alwaysPostingsFormat(TestUtil.getDefaultPostingsFormat())));
-    Document doc = new Document();
-    doc.add(newTextField("f", "a b c", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("f", "a b c");
     w.addDocument(doc);
     w.forceMerge(1);
     DirectoryReader r = w.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
index 189e0e3..7782d33 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
@@ -30,24 +30,9 @@
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 
 public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
 
-  private static LineFileDocs lineDocFile;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    lineDocFile = new LineFileDocs(random(), true);
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    lineDocFile.close();
-    lineDocFile = null;
-  }
-
   public void testFlushByRam() throws IOException, InterruptedException {
     final double ramBuffer = (TEST_NIGHTLY ? 1 : 10) + atLeast(2)
         + random().nextDouble();
@@ -78,6 +63,7 @@
     iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     IndexWriter writer = new IndexWriter(dir, iwc);
+    LineFileDocs docs = new LineFileDocs(writer, random());
     flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
     assertFalse(flushPolicy.flushOnDocCount());
     assertFalse(flushPolicy.flushOnDeleteTerms());
@@ -89,7 +75,7 @@
 
     IndexThread[] threads = new IndexThread[numThreads];
     for (int x = 0; x < threads.length; x++) {
-      threads[x] = new IndexThread(numDocs, numThreads, writer, lineDocFile,
+      threads[x] = new IndexThread(numDocs, numThreads, writer, docs,
           false);
       threads[x].start();
     }
@@ -97,6 +83,7 @@
     for (int x = 0; x < threads.length; x++) {
       threads[x].join();
     }
+    docs.close();
     final long maxRAMBytes = (long) (iwc.getRAMBufferSizeMB() * 1024. * 1024.);
     assertEquals(" all flushes must be due numThreads=" + numThreads, 0,
         flushControl.flushBytes());
@@ -137,6 +124,7 @@
       iwc.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
       iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
       IndexWriter writer = new IndexWriter(dir, iwc);
+      LineFileDocs docs = new LineFileDocs(writer, random());
       flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
       assertTrue(flushPolicy.flushOnDocCount());
       assertFalse(flushPolicy.flushOnDeleteTerms());
@@ -149,14 +137,14 @@
       IndexThread[] threads = new IndexThread[numThreads[i]];
       for (int x = 0; x < threads.length; x++) {
         threads[x] = new IndexThread(numDocs, numThreads[i], writer,
-            lineDocFile, false);
+            docs, false);
         threads[x].start();
       }
 
       for (int x = 0; x < threads.length; x++) {
         threads[x].join();
       }
-
+      docs.close();
       assertEquals(" all flushes must be due numThreads=" + numThreads[i], 0,
           flushControl.flushBytes());
       assertEquals(numDocumentsToIndex, writer.numDocs());
@@ -187,6 +175,7 @@
     iwc.setIndexerThreadPool(threadPool);
 
     IndexWriter writer = new IndexWriter(dir, iwc);
+    LineFileDocs docs = new LineFileDocs(writer, random());
     flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
     DocumentsWriter docsWriter = writer.getDocsWriter();
     assertNotNull(docsWriter);
@@ -196,7 +185,7 @@
 
     IndexThread[] threads = new IndexThread[numThreads];
     for (int x = 0; x < threads.length; x++) {
-      threads[x] = new IndexThread(numDocs, numThreads, writer, lineDocFile,
+      threads[x] = new IndexThread(numDocs, numThreads, writer, docs,
           true);
       threads[x].start();
     }
@@ -204,6 +193,8 @@
     for (int x = 0; x < threads.length; x++) {
       threads[x].join();
     }
+    docs.close();
+
     assertEquals(" all flushes must be due", 0, flushControl.flushBytes());
     assertEquals(numDocumentsToIndex, writer.numDocs());
     assertEquals(numDocumentsToIndex, writer.maxDoc());
@@ -255,16 +246,18 @@
       // with such a small ram buffer we should be stalled quiet quickly
       iwc.setRAMBufferSizeMB(0.25);
       IndexWriter writer = new IndexWriter(dir, iwc);
+      LineFileDocs docs = new LineFileDocs(writer, random());
       IndexThread[] threads = new IndexThread[numThreads[i]];
       for (int x = 0; x < threads.length; x++) {
         threads[x] = new IndexThread(numDocs, numThreads[i], writer,
-            lineDocFile, false);
+            docs, false);
         threads[x].start();
       }
 
       for (int x = 0; x < threads.length; x++) {
         threads[x].join();
       }
+      docs.close();
       DocumentsWriter docsWriter = writer.getDocsWriter();
       assertNotNull(docsWriter);
       DocumentsWriterFlushControl flushControl = docsWriter.flushControl;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java b/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java
index 46c3e2c..17ddd1f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java
@@ -19,8 +19,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.TopDocs;
@@ -48,8 +46,8 @@
         sb.append(TestUtil.randomRealisticUnicodeString(random()));
         sb.append(' ');
       }
-      final Document doc = new Document();
-      doc.add(new TextField("field", sb.toString(), Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("field", sb.toString());
       w.addDocument(doc);
     }
     final IndexReader r = w.getReader();
@@ -74,7 +72,7 @@
     assertTrue(hits.totalHits > 0);
     final int queryCloneCount = dir.getInputCloneCount() - cloneCount;
     //System.out.println("query clone count=" + queryCloneCount);
-    assertTrue("too many calls to IndexInput.clone during TermRangeQuery: " + queryCloneCount, queryCloneCount < 50);
+    assertTrue("too many calls to IndexInput.clone during TermRangeQuery: " + queryCloneCount, queryCloneCount < 60);
     r.close();
     dir.close();
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestForceMergeForever.java b/lucene/core/src/test/org/apache/lucene/index/TestForceMergeForever.java
index 54f1008..57a2d8a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestForceMergeForever.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestForceMergeForever.java
@@ -62,7 +62,7 @@
     // Try to make an index that requires merging:
     w.getConfig().setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 11));
     final int numStartDocs = atLeast(20);
-    final LineFileDocs docs = new LineFileDocs(random(), true);
+    final LineFileDocs docs = new LineFileDocs(w, random());
     for(int docIDX=0;docIDX<numStartDocs;docIDX++) {
       w.addDocument(docs.nextDoc());
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
index 9f5fa2b..0b8646a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
@@ -211,11 +210,10 @@
     out.close();
   }
 
-  private void addDoc(IndexWriter writer, int id) throws IOException
-  {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
-    doc.add(newStringField("id", Integer.toString(id), Field.Store.NO));
+  private void addDoc(IndexWriter writer, int id) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addAtom("id", Integer.toString(id));
     writer.addDocument(doc);
   }
   
@@ -246,7 +244,7 @@
     });
     
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     // stop virus scanner
     stopScanning.set(true);
     iw.commit();
@@ -322,7 +320,7 @@
     
     // ensure we write _4 segment next
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     iw.close();
     sis = SegmentInfos.readLatestCommit(dir);
@@ -337,7 +335,7 @@
     
     // initial commit
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     iw.close();   
     
@@ -384,7 +382,7 @@
     
     // initial commit
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     iw.close();   
     
@@ -466,10 +464,12 @@
         } else if (random().nextInt(10) == 7) {
           w.getReader().close();
         } else {
-          Document doc = new Document();
-          doc.add(newTextField("field", "some text", Field.Store.NO));
+          Document doc = w.newDocument();
+          doc.addLargeText("field", "some text");
           w.addDocument(doc);
         }
+      } catch (AlreadyClosedException ace) {
+        // ok
       } catch (IOException ioe) {
         if (ioe.getMessage().contains("background merge hit exception")) {
           Throwable cause = ioe.getCause();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexReaderClose.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexReaderClose.java
index 44a4b48..f0a211d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexReaderClose.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexReaderClose.java
@@ -98,7 +98,7 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), newDirectory());
     final int numDocs = TestUtil.nextInt(random(), 1, 5);
     for (int i = 0; i < numDocs; ++i) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
       if (random().nextBoolean()) {
         w.commit();
       }
@@ -172,5 +172,4 @@
       throw new IllegalStateException("GRRRRRRRRRRRR!");
     }
   }
-
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 1ba6355..ae1e573 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -43,24 +43,14 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
@@ -84,485 +74,428 @@
 import org.apache.lucene.util.automaton.Automata;
 import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestIndexWriter extends LuceneTestCase {
 
-    private static final FieldType storedTextType = new FieldType(TextField.TYPE_NOT_STORED);
-    public void testDocCount() throws IOException {
-        Directory dir = newDirectory();
+  public void testDocCount() throws IOException {
+    Directory dir = newDirectory();
 
-        IndexWriter writer = null;
-        IndexReader reader = null;
-        int i;
+    IndexWriter writer = null;
+    IndexReader reader = null;
+    int i;
 
-        long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
-        try {
-          IndexWriterConfig.setDefaultWriteLockTimeout(2000);
-          assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
-          writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        } finally {
-          IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
-        }
-
-        // add 100 documents
-        for (i = 0; i < 100; i++) {
-            addDocWithIndex(writer,i);
-        }
-        assertEquals(100, writer.maxDoc());
-        writer.close();
-
-        // delete 40 documents
-        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                        .setMergePolicy(NoMergePolicy.INSTANCE));
-        for (i = 0; i < 40; i++) {
-            writer.deleteDocuments(new Term("id", ""+i));
-        }
-        writer.close();
-
-        reader = DirectoryReader.open(dir);
-        assertEquals(60, reader.numDocs());
-        reader.close();
-
-        // merge the index down and check that the new doc count is correct
-        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        assertEquals(60, writer.numDocs());
-        writer.forceMerge(1);
-        assertEquals(60, writer.maxDoc());
-        assertEquals(60, writer.numDocs());
-        writer.close();
-
-        // check that the index reader gives the same numbers.
-        reader = DirectoryReader.open(dir);
-        assertEquals(60, reader.maxDoc());
-        assertEquals(60, reader.numDocs());
-        reader.close();
-
-        // make sure opening a new index for create over
-        // this existing one works correctly:
-        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                        .setOpenMode(OpenMode.CREATE));
-        assertEquals(0, writer.maxDoc());
-        assertEquals(0, writer.numDocs());
-        writer.close();
-        dir.close();
+    long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
+    try {
+      IndexWriterConfig.setDefaultWriteLockTimeout(2000);
+      assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
+      writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    } finally {
+      IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
     }
 
-    static void addDoc(IndexWriter writer) throws IOException
-    {
-        Document doc = new Document();
-        doc.add(newTextField("content", "aaa", Field.Store.NO));
-        writer.addDocument(doc);
+    // add 100 documents
+    for (i = 0; i < 100; i++) {
+      addDocWithIndex(writer, i);
     }
+    assertEquals(100, writer.maxDoc());
+    writer.close();
 
-    static void addDocWithIndex(IndexWriter writer, int index) throws IOException
-    {
-        Document doc = new Document();
-        doc.add(newField("content", "aaa " + index, storedTextType));
-        doc.add(newField("id", "" + index, storedTextType));
-        writer.addDocument(doc);
+    // delete 40 documents
+    writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                             .setMergePolicy(NoMergePolicy.INSTANCE));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for (i = 0; i < 40; i++) {
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", i));
     }
+    writer.close();
 
+    reader = DirectoryReader.open(dir);
+    assertEquals(60, reader.numDocs());
+    reader.close();
 
+    // merge the index down and check that the new doc count is correct
+    writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    assertEquals(60, writer.numDocs());
+    writer.forceMerge(1);
+    assertEquals(60, writer.maxDoc());
+    assertEquals(60, writer.numDocs());
+    writer.close();
 
-    // TODO: we have the logic in MDW to do this check, and it's better, because it knows about files it tried
-    // to delete but couldn't: we should replace this!!!!
-    public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
-      if (dir instanceof MockDirectoryWrapper) {
-        assertFalse("test is broken: should disable virus scanner", ((MockDirectoryWrapper)dir).getEnableVirusScanner());
+    // check that the index reader gives the same numbers.
+    reader = DirectoryReader.open(dir);
+    assertEquals(60, reader.maxDoc());
+    assertEquals(60, reader.numDocs());
+    reader.close();
+
+    // make sure opening a new index for create over
+    // this existing one works correctly:
+    writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                             .setOpenMode(OpenMode.CREATE));
+    assertEquals(0, writer.maxDoc());
+    assertEquals(0, writer.numDocs());
+    writer.close();
+    dir.close();
+  }
+
+  static void addDoc(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    writer.addDocument(doc);
+  }
+
+  static void addDocWithIndex(IndexWriter writer, int index) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa " + index);
+    doc.addUniqueInt("id", index);
+    writer.addDocument(doc);
+  }
+
+  // TODO: we have the logic in MDW to do this check, and its better, because it knows about files it tried
+  // to delete but couldn't: we should replace this!!!!
+  public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
+    if (dir instanceof MockDirectoryWrapper) {
+      assertFalse("test is broken: should disable virus scanner", ((MockDirectoryWrapper)dir).getEnableVirusScanner());
+    }
+    String[] startFiles = dir.listAll();
+    new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random()))).rollback();
+    String[] endFiles = dir.listAll();
+
+    Arrays.sort(startFiles);
+    Arrays.sort(endFiles);
+
+    if (!Arrays.equals(startFiles, endFiles)) {
+      fail(message + ": before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
+    }
+  }
+
+  static String arrayToString(String[] l) {
+    String s = "";
+    for(int i=0;i<l.length;i++) {
+      if (i > 0) {
+        s += "\n    ";
       }
-      String[] startFiles = dir.listAll();
-      new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random()))).rollback();
-      String[] endFiles = dir.listAll();
-
-      Arrays.sort(startFiles);
-      Arrays.sort(endFiles);
-
-      if (!Arrays.equals(startFiles, endFiles)) {
-        fail(message + ": before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
-      }
+      s += l[i];
     }
+    return s;
+  }
 
-    static String arrayToString(String[] l) {
-      String s = "";
-      for(int i=0;i<l.length;i++) {
-        if (i > 0) {
-          s += "\n    ";
-        }
-        s += l[i];
-      }
-      return s;
-    }
+  // Make sure we can open an index for create even when a
+  // reader holds it open (this fails pre lock-less
+  // commits on windows):
+  public void testCreateWithReader() throws IOException {
+    Directory dir = newDirectory();
 
-    // Make sure we can open an index for create even when a
-    // reader holds it open (this fails pre lock-less
-    // commits on windows):
-    public void testCreateWithReader() throws IOException {
-      Directory dir = newDirectory();
+    // add one document & close writer
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    addDoc(writer);
+    writer.close();
 
-      // add one document & close writer
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    // now open reader:
+    IndexReader reader = DirectoryReader.open(dir);
+    assertEquals("should be one document", reader.numDocs(), 1);
+
+    // now open index for create:
+    writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                             .setOpenMode(OpenMode.CREATE));
+    assertEquals("should be zero documents", writer.maxDoc(), 0);
+    addDoc(writer);
+    writer.close();
+
+    assertEquals("should be one document", reader.numDocs(), 1);
+    IndexReader reader2 = DirectoryReader.open(dir);
+    assertEquals("should be one document", reader2.numDocs(), 1);
+    reader.close();
+    reader2.close();
+
+    dir.close();
+  }
+
+  public void testChangesAfterClose() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = null;
+
+    writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    addDoc(writer);
+
+    // close
+    writer.close();
+    try {
       addDoc(writer);
-      writer.close();
+      fail("did not hit AlreadyClosedException");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+    dir.close();
+  }
 
-      // now open reader:
-      IndexReader reader = DirectoryReader.open(dir);
-      assertEquals("should be one document", reader.numDocs(), 1);
 
-      // now open index for create:
-      writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                      .setOpenMode(OpenMode.CREATE));
-      assertEquals("should be zero documents", writer.maxDoc(), 0);
-      addDoc(writer);
-      writer.close();
 
-      assertEquals("should be one document", reader.numDocs(), 1);
-      IndexReader reader2 = DirectoryReader.open(dir);
-      assertEquals("should be one document", reader2.numDocs(), 1);
-      reader.close();
-      reader2.close();
+  public void testIndexNoDocuments() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    writer.commit();
+    writer.close();
 
-      dir.close();
+    IndexReader reader = DirectoryReader.open(dir);
+    assertEquals(0, reader.maxDoc());
+    assertEquals(0, reader.numDocs());
+    reader.close();
+
+    writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                              .setOpenMode(OpenMode.APPEND));
+    writer.commit();
+    writer.close();
+
+    reader = DirectoryReader.open(dir);
+    assertEquals(0, reader.maxDoc());
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+
+  public void testSmallRAMBuffer() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(
+                                          dir,
+                                          newIndexWriterConfig(new MockAnalyzer(random()))
+                                          .setRAMBufferSizeMB(0.000001)
+                                          .setMergePolicy(newLogMergePolicy(10))
+                                          );
+    int lastNumSegments = getSegmentCount(dir);
+    for(int j=0;j<9;j++) {
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", "aaa" + j);
+      writer.addDocument(doc);
+      // Verify that with a tiny RAM buffer we see new
+      // segment after every doc
+      int numSegments = getSegmentCount(dir);
+      assertTrue(numSegments > lastNumSegments);
+      lastNumSegments = numSegments;
+    }
+    writer.close();
+    dir.close();
+  }
+
+  /** Returns how many unique segment names are in the directory. */
+  private static int getSegmentCount(Directory dir) throws IOException {
+    Set<String> segments = new HashSet<>();
+    for(String file : dir.listAll()) {
+      segments.add(IndexFileNames.parseSegmentName(file));
     }
 
-    public void testChangesAfterClose() throws IOException {
-        Directory dir = newDirectory();
+    return segments.size();
+  }
 
-        IndexWriter writer = null;
+  // Make sure it's OK to change RAM buffer size and
+  // maxBufferedDocs in a write session
+  public void testChangingRAMBuffer() throws IOException {
+    Directory dir = newDirectory();      
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    writer.getConfig().setMaxBufferedDocs(10);
+    writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
 
-        writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        addDoc(writer);
-
-        // close
-        writer.close();
-        try {
-          addDoc(writer);
-          fail("did not hit AlreadyClosedException");
-        } catch (AlreadyClosedException e) {
-          // expected
-        }
-        dir.close();
-    }
-
-
-
-    public void testIndexNoDocuments() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      writer.commit();
-      writer.close();
-
-      IndexReader reader = DirectoryReader.open(dir);
-      assertEquals(0, reader.maxDoc());
-      assertEquals(0, reader.numDocs());
-      reader.close();
-
-      writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                       .setOpenMode(OpenMode.APPEND));
-      writer.commit();
-      writer.close();
-
-      reader = DirectoryReader.open(dir);
-      assertEquals(0, reader.maxDoc());
-      assertEquals(0, reader.numDocs());
-      reader.close();
-      dir.close();
-    }
-
-    public void testSmallRAMBuffer() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer  = new IndexWriter(
-          dir,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-              .setRAMBufferSizeMB(0.000001)
-              .setMergePolicy(newLogMergePolicy(10))
-      );
-      int lastNumSegments = getSegmentCount(dir);
-      for(int j=0;j<9;j++) {
-        Document doc = new Document();
-        doc.add(newField("field", "aaa" + j, storedTextType));
-        writer.addDocument(doc);
-        // Verify that with a tiny RAM buffer we see new
-        // segment after every doc
-        int numSegments = getSegmentCount(dir);
-        assertTrue(numSegments > lastNumSegments);
-        lastNumSegments = numSegments;
+    int lastFlushCount = -1;
+    for(int j=1;j<52;j++) {
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", "aaa" + j);
+      writer.addDocument(doc);
+      TestUtil.syncConcurrentMerges(writer);
+      int flushCount = writer.getFlushCount();
+      if (j == 1)
+        lastFlushCount = flushCount;
+      else if (j < 10)
+        // No new files should be created
+        assertEquals(flushCount, lastFlushCount);
+      else if (10 == j) {
+        assertTrue(flushCount > lastFlushCount);
+        lastFlushCount = flushCount;
+        writer.getConfig().setRAMBufferSizeMB(0.000001);
+        writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      } else if (j < 20) {
+        assertTrue(flushCount > lastFlushCount);
+        lastFlushCount = flushCount;
+      } else if (20 == j) {
+        writer.getConfig().setRAMBufferSizeMB(16);
+        writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        lastFlushCount = flushCount;
+      } else if (j < 30) {
+        assertEquals(flushCount, lastFlushCount);
+      } else if (30 == j) {
+        writer.getConfig().setRAMBufferSizeMB(0.000001);
+        writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      } else if (j < 40) {
+        assertTrue(flushCount> lastFlushCount);
+        lastFlushCount = flushCount;
+      } else if (40 == j) {
+        writer.getConfig().setMaxBufferedDocs(10);
+        writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        lastFlushCount = flushCount;
+      } else if (j < 50) {
+        assertEquals(flushCount, lastFlushCount);
+        writer.getConfig().setMaxBufferedDocs(10);
+        writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      } else if (50 == j) {
+        assertTrue(flushCount > lastFlushCount);
       }
-      writer.close();
-      dir.close();
     }
+    writer.close();
+    dir.close();
+  }
 
-    /** Returns how many unique segment names are in the directory. */
-    private static int getSegmentCount(Directory dir) throws IOException {
-      Set<String> segments = new HashSet<>();
-      for(String file : dir.listAll()) {
-        segments.add(IndexFileNames.parseSegmentName(file));
-      }
+  public void testChangingRAMBuffer2() throws IOException {
+    Directory dir = newDirectory();      
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    writer.getConfig().setMaxBufferedDocs(10);
+    writer.getConfig().setMaxBufferedDeleteTerms(10);
+    writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
 
-      return segments.size();
+    for(int j=1;j<52;j++) {
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", "aaa" + j);
+      writer.addDocument(doc);
     }
-
-    // Make sure it's OK to change RAM buffer size and
-    // maxBufferedDocs in a write session
-    public void testChangingRAMBuffer() throws IOException {
-      Directory dir = newDirectory();      
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      writer.getConfig().setMaxBufferedDocs(10);
-      writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-
-      int lastFlushCount = -1;
-      for(int j=1;j<52;j++) {
-        Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, storedTextType));
-        writer.addDocument(doc);
-        TestUtil.syncConcurrentMerges(writer);
-        int flushCount = writer.getFlushCount();
-        if (j == 1)
-          lastFlushCount = flushCount;
-        else if (j < 10)
-          // No new files should be created
-          assertEquals(flushCount, lastFlushCount);
-        else if (10 == j) {
-          assertTrue(flushCount > lastFlushCount);
-          lastFlushCount = flushCount;
-          writer.getConfig().setRAMBufferSizeMB(0.000001);
-          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-        } else if (j < 20) {
-          assertTrue(flushCount > lastFlushCount);
-          lastFlushCount = flushCount;
-        } else if (20 == j) {
-          writer.getConfig().setRAMBufferSizeMB(16);
-          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-          lastFlushCount = flushCount;
-        } else if (j < 30) {
-          assertEquals(flushCount, lastFlushCount);
-        } else if (30 == j) {
-          writer.getConfig().setRAMBufferSizeMB(0.000001);
-          writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-        } else if (j < 40) {
-          assertTrue(flushCount> lastFlushCount);
-          lastFlushCount = flushCount;
-        } else if (40 == j) {
-          writer.getConfig().setMaxBufferedDocs(10);
-          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-          lastFlushCount = flushCount;
-        } else if (j < 50) {
-          assertEquals(flushCount, lastFlushCount);
-          writer.getConfig().setMaxBufferedDocs(10);
-          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-        } else if (50 == j) {
-          assertTrue(flushCount > lastFlushCount);
-        }
-      }
-      writer.close();
-      dir.close();
-    }
-
-    public void testChangingRAMBuffer2() throws IOException {
-      Directory dir = newDirectory();      
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      writer.getConfig().setMaxBufferedDocs(10);
-      writer.getConfig().setMaxBufferedDeleteTerms(10);
-      writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-
-      for(int j=1;j<52;j++) {
-        Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, storedTextType));
-        writer.addDocument(doc);
-      }
       
-      int lastFlushCount = -1;
-      for(int j=1;j<52;j++) {
-        writer.deleteDocuments(new Term("field", "aaa" + j));
-        TestUtil.syncConcurrentMerges(writer);
-        int flushCount = writer.getFlushCount();
+    int lastFlushCount = -1;
+    for(int j=1;j<52;j++) {
+      writer.deleteDocuments(new Term("field", "aaa" + j));
+      TestUtil.syncConcurrentMerges(writer);
+      int flushCount = writer.getFlushCount();
        
-        if (j == 1)
-          lastFlushCount = flushCount;
-        else if (j < 10) {
-          // No new files should be created
-          assertEquals(flushCount, lastFlushCount);
-        } else if (10 == j) {
-          assertTrue("" + j, flushCount > lastFlushCount);
-          lastFlushCount = flushCount;
-          writer.getConfig().setRAMBufferSizeMB(0.000001);
-          writer.getConfig().setMaxBufferedDeleteTerms(1);
-        } else if (j < 20) {
-          assertTrue(flushCount > lastFlushCount);
-          lastFlushCount = flushCount;
-        } else if (20 == j) {
-          writer.getConfig().setRAMBufferSizeMB(16);
-          writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-          lastFlushCount = flushCount;
-        } else if (j < 30) {
-          assertEquals(flushCount, lastFlushCount);
-        } else if (30 == j) {
-          writer.getConfig().setRAMBufferSizeMB(0.000001);
-          writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-          writer.getConfig().setMaxBufferedDeleteTerms(1);
-        } else if (j < 40) {
-          assertTrue(flushCount> lastFlushCount);
-          lastFlushCount = flushCount;
-        } else if (40 == j) {
-          writer.getConfig().setMaxBufferedDeleteTerms(10);
-          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-          lastFlushCount = flushCount;
-        } else if (j < 50) {
-          assertEquals(flushCount, lastFlushCount);
-          writer.getConfig().setMaxBufferedDeleteTerms(10);
-          writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
-        } else if (50 == j) {
-          assertTrue(flushCount > lastFlushCount);
-        }
+      if (j == 1)
+        lastFlushCount = flushCount;
+      else if (j < 10) {
+        // No new files should be created
+        assertEquals(flushCount, lastFlushCount);
+      } else if (10 == j) {
+        assertTrue("" + j, flushCount > lastFlushCount);
+        lastFlushCount = flushCount;
+        writer.getConfig().setRAMBufferSizeMB(0.000001);
+        writer.getConfig().setMaxBufferedDeleteTerms(1);
+      } else if (j < 20) {
+        assertTrue(flushCount > lastFlushCount);
+        lastFlushCount = flushCount;
+      } else if (20 == j) {
+        writer.getConfig().setRAMBufferSizeMB(16);
+        writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        lastFlushCount = flushCount;
+      } else if (j < 30) {
+        assertEquals(flushCount, lastFlushCount);
+      } else if (30 == j) {
+        writer.getConfig().setRAMBufferSizeMB(0.000001);
+        writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        writer.getConfig().setMaxBufferedDeleteTerms(1);
+      } else if (j < 40) {
+        assertTrue(flushCount> lastFlushCount);
+        lastFlushCount = flushCount;
+      } else if (40 == j) {
+        writer.getConfig().setMaxBufferedDeleteTerms(10);
+        writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+        lastFlushCount = flushCount;
+      } else if (j < 50) {
+        assertEquals(flushCount, lastFlushCount);
+        writer.getConfig().setMaxBufferedDeleteTerms(10);
+        writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+      } else if (50 == j) {
+        assertTrue(flushCount > lastFlushCount);
       }
-      writer.close();
-      dir.close();
     }
+    writer.close();
+    dir.close();
+  }
 
-    public void testEnablingNorms() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                                   .setMaxBufferedDocs(10));
-      // Enable norms for only 1 doc, pre flush
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setOmitNorms(true);
-      for(int j=0;j<10;j++) {
-        Document doc = new Document();
-        Field f = null;
-        if (j != 8) {
-          f = newField("field", "aaa", customType);
-        }
-        else {
-          f = newField("field", "aaa", storedTextType);
-        }
-        doc.add(f);
-        writer.addDocument(doc);
-      }
-      writer.close();
-
-      Term searchTerm = new Term("field", "aaa");
-
-      IndexReader reader = DirectoryReader.open(dir);
-      IndexSearcher searcher = newSearcher(reader);
-      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals(10, hits.length);
-      reader.close();
-
-      writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                      .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
-      // Enable norms for only 1 doc, post flush
-      for(int j=0;j<27;j++) {
-        Document doc = new Document();
-        Field f = null;
-        if (j != 26) {
-          f = newField("field", "aaa", customType);
-        }
-        else {
-          f = newField("field", "aaa", storedTextType);
-        }
-        doc.add(f);
-        writer.addDocument(doc);
-      }
-      writer.close();
-      reader = DirectoryReader.open(dir);
-      searcher = newSearcher(reader);
-      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals(27, hits.length);
-      reader.close();
-
-      reader = DirectoryReader.open(dir);
-      reader.close();
-
-      dir.close();
+  public void testHighFreqTerm() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                                         .setRAMBufferSizeMB(0.01));
+    // Massive doc that has 128 K a's
+    StringBuilder b = new StringBuilder(1024*1024);
+    for(int i=0;i<4096;i++) {
+      b.append(" a a a a a a a a");
+      b.append(" a a a a a a a a");
+      b.append(" a a a a a a a a");
+      b.append(" a a a a a a a a");
     }
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
 
-    public void testHighFreqTerm() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                                  .setRAMBufferSizeMB(0.01));
-      // Massive doc that has 128 K a's
-      StringBuilder b = new StringBuilder(1024*1024);
-      for(int i=0;i<4096;i++) {
-        b.append(" a a a a a a a a");
-        b.append(" a a a a a a a a");
-        b.append(" a a a a a a a a");
-        b.append(" a a a a a a a a");
-      }
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      doc.add(newField("field", b.toString(), customType));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", b.toString());
+    writer.addDocument(doc);
+    writer.close();
+
+    IndexReader reader = DirectoryReader.open(dir);
+    assertEquals(1, reader.maxDoc());
+    assertEquals(1, reader.numDocs());
+    Term t = new Term("field", "a");
+    assertEquals(1, reader.docFreq(t));
+    DocsEnum td = TestUtil.docs(random(), reader,
+                                "field",
+                                new BytesRef("a"),
+                                MultiFields.getLiveDocs(reader),
+                                null,
+                                DocsEnum.FLAG_FREQS);
+    td.nextDoc();
+    assertEquals(128*1024, td.freq());
+    reader.close();
+    dir.close();
+  }
+
+  public void testFlushWithNoMerging() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+                                         dir,
+                                         newIndexWriterConfig(new MockAnalyzer(random()))
+                                         .setMaxBufferedDocs(2)
+                                         .setMergePolicy(newLogMergePolicy(10))
+                                         );
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "aaa");
+    for(int i=0;i<19;i++) {
       writer.addDocument(doc);
-      writer.close();
-
-      IndexReader reader = DirectoryReader.open(dir);
-      assertEquals(1, reader.maxDoc());
-      assertEquals(1, reader.numDocs());
-      Term t = new Term("field", "a");
-      assertEquals(1, reader.docFreq(t));
-      DocsEnum td = TestUtil.docs(random(), reader,
-          "field",
-          new BytesRef("a"),
-          MultiFields.getLiveDocs(reader),
-          null,
-          DocsEnum.FLAG_FREQS);
-      td.nextDoc();
-      assertEquals(128*1024, td.freq());
-      reader.close();
-      dir.close();
     }
+    writer.flush(false, true);
+    writer.close();
+    SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
+    // Since we flushed w/o allowing merging we should now
+    // have 10 segments
+    assertEquals(10, sis.size());
+    dir.close();
+  }
 
-    public void testFlushWithNoMerging() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer = new IndexWriter(
-          dir,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-              .setMaxBufferedDocs(2)
-              .setMergePolicy(newLogMergePolicy(10))
-      );
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      doc.add(newField("field", "aaa", customType));
-      for(int i=0;i<19;i++)
-        writer.addDocument(doc);
-      writer.flush(false, true);
-      writer.close();
-      SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
-      // Since we flushed w/o allowing merging we should now
-      // have 10 segments
-      assertEquals(10, sis.size());
-      dir.close();
+  // Make sure we can flush segment w/ norms, then add
+  // empty doc (no norms) and flush
+  public void testEmptyDocAfterFlushingRealDoc() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "aaa");
+    writer.addDocument(doc);
+    writer.commit();
+    if (VERBOSE) {
+      System.out.println("\nTEST: now add empty doc");
     }
-
-    // Make sure we can flush segment w/ norms, then add
-    // empty doc (no norms) and flush
-    public void testEmptyDocAfterFlushingRealDoc() throws IOException {
-      Directory dir = newDirectory();
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      doc.add(newField("field", "aaa", customType));
-      writer.addDocument(doc);
-      writer.commit();
-      if (VERBOSE) {
-        System.out.println("\nTEST: now add empty doc");
-      }
-      writer.addDocument(new Document());
-      writer.close();
-      IndexReader reader = DirectoryReader.open(dir);
-      assertEquals(2, reader.numDocs());
-      reader.close();
-      dir.close();
-    }
-
+    writer.addDocument(writer.newDocument());
+    writer.close();
+    IndexReader reader = DirectoryReader.open(dir);
+    assertEquals(2, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
 
 
   /**
@@ -574,11 +507,12 @@
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
 
-    Document document = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    document.add(newField("tvtest", "", customType));
-    iw.addDocument(document);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("tvtest");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("tvtest", "");
+    iw.addDocument(doc);
     iw.close();
     dir.close();
   }
@@ -593,10 +527,11 @@
                                  .setMergePolicy(newLogMergePolicy());
       ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
       IndexWriter iw = new IndexWriter(dir, conf);
-      Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      document.add(newField("tvtest", "a b c", customType));
+
+      FieldTypes fieldTypes = iw.getFieldTypes();
+      fieldTypes.enableTermVectors("tvtest");
+      Document document = iw.newDocument();
+      document.addLargeText("tvtest", "a b c");
       Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
       for(int i=0;i<4;i++)
         iw.addDocument(document);
@@ -607,65 +542,19 @@
     }
   }
 
-  public void testVariableSchema() throws Exception {
-    Directory dir = newDirectory();
-    for(int i=0;i<20;i++) {
-      if (VERBOSE) {
-        System.out.println("TEST: iter=" + i);
-      }
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                                  .setMaxBufferedDocs(2)
-                                                  .setMergePolicy(newLogMergePolicy()));
-      //LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
-      //lmp.setMergeFactor(2);
-      //lmp.setNoCFSRatio(0.0);
-      Document doc = new Document();
-      String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      FieldType type = null;
-      if (i == 7) {
-        // Add empty docs here
-        doc.add(newTextField("content3", "", Field.Store.NO));
-      } else {
-        if (i%2 == 0) {
-          doc.add(newField("content4", contents, customType));
-          type = customType;
-        } else
-          type = TextField.TYPE_NOT_STORED; 
-        doc.add(newTextField("content1", contents, Field.Store.NO));
-        doc.add(newField("content3", "", customType));
-        doc.add(newField("content5", "", type));
-      }
-
-      for(int j=0;j<4;j++)
-        writer.addDocument(doc);
-
-      writer.close();
-
-      if (0 == i % 4) {
-        writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
-        //lmp2.setNoCFSRatio(0.0);
-        writer.forceMerge(1);
-        writer.close();
-      }
-    }
-    dir.close();
-  }
-
   // LUCENE-1084: test unlimited field length
   public void testUnlimitedMaxFieldLength() throws IOException {
     Directory dir = newDirectory();
 
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     StringBuilder b = new StringBuilder();
-    for(int i=0;i<10000;i++)
+    for(int i=0;i<10000;i++) {
       b.append(" a");
+    }
     b.append(" x");
-    doc.add(newTextField("field", b.toString(), Field.Store.NO));
+    doc.addLargeText("field", b.toString());
     writer.addDocument(doc);
     writer.close();
 
@@ -676,14 +565,12 @@
     dir.close();
   }
 
-
-
   // LUCENE-1179
   public void testEmptyFieldName() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("", "a b c", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("", "a b c");
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -692,8 +579,8 @@
   public void testEmptyFieldNameTerms() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("", "a b c", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("", "a b c");
     writer.addDocument(doc);  
     writer.close();
     DirectoryReader reader = DirectoryReader.open(dir);
@@ -710,11 +597,13 @@
   public void testEmptyFieldNameWithEmptyTerm() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("", "", Field.Store.NO));
-    doc.add(newStringField("", "a", Field.Store.NO));
-    doc.add(newStringField("", "b", Field.Store.NO));
-    doc.add(newStringField("", "c", Field.Store.NO));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("");
+    Document doc = writer.newDocument();
+    doc.addAtom("", "");
+    doc.addAtom("", "a");
+    doc.addAtom("", "b");
+    doc.addAtom("", "c");
     writer.addDocument(doc);  
     writer.close();
     DirectoryReader reader = DirectoryReader.open(dir);
@@ -729,8 +618,6 @@
     dir.close();
   }
 
-
-
   private static final class MockIndexWriter extends IndexWriter {
 
     public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
@@ -756,9 +643,8 @@
   public void testDoBeforeAfterFlush() throws IOException {
     Directory dir = newDirectory();
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    doc.add(newField("field", "a field", customType));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a field");
     w.addDocument(doc);
     w.commit();
     assertTrue(w.beforeWasCalled);
@@ -800,8 +686,8 @@
 
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new TextField("field", tokens));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", tokens);
     try {
       w.addDocument(doc);
       fail("did not hit expected exception");
@@ -818,14 +704,13 @@
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setPositionIncrementGap( 100 );
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    Field f = newField("field", "", customType);
-    Field f2 = newField("field", "crunch man", customType);
-    doc.add(f);
-    doc.add(f2);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "");
+    doc.addLargeText("field", "crunch man");
     w.addDocument(doc);
     w.close();
 
@@ -855,14 +740,13 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMaxBufferedDocs(2));
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.addDocument(doc);
@@ -871,7 +755,16 @@
 
     Directory dir2 = newDirectory();
     IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random())));
+
+    fieldTypes = writer2.getFieldTypes();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
+
+    doc = writer2.newDocument();
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
     writer2.addDocument(doc);
+    writer2.commit();
     writer2.close();
 
     DirectoryReader r1 = DirectoryReader.open(dir2);
@@ -916,29 +809,36 @@
           });
       }
       IndexWriter w = new IndexWriter(adder, conf);
-      Document doc = new Document();
-      doc.add(newStringField(random, "id", "500", Field.Store.NO));
-      doc.add(newField(random, "field", "some prepackaged text contents", storedTextType));
-      doc.add(new BinaryDocValuesField("binarydv", new BytesRef("500")));
-      doc.add(new NumericDocValuesField("numericdv", 500));
-      doc.add(new SortedDocValuesField("sorteddv", new BytesRef("500")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("one")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("two")));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 4));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 3));
+      FieldTypes fieldTypes = w.getFieldTypes();
+      fieldTypes.setMultiValued("sortedsetdv");
+      fieldTypes.setMultiValued("sortednumericdv");
+      fieldTypes.enableSorting("sorteddv");
+      fieldTypes.enableSorting("sortedsetdv");
+
+      Document doc = w.newDocument();
+      doc.addInt("id", 500);
+      doc.addStoredString("field", "some prepackaged text contents");
+      doc.addBinary("binarydv", new BytesRef("500"));
+      doc.addInt("numericdv", 500);
+      doc.addBinary("sorteddv", new BytesRef("500"));
+      doc.addBinary("sortedsetdv", new BytesRef("one"));
+      doc.addBinary("sortedsetdv", new BytesRef("two"));
+      doc.addInt("sortednumericdv", 4);
+      doc.addInt("sortednumericdv", 3);
       w.addDocument(doc);
-      doc = new Document();
-      doc.add(newStringField(random, "id", "501", Field.Store.NO));
-      doc.add(newField(random, "field", "some more contents", storedTextType));
-      doc.add(new BinaryDocValuesField("binarydv", new BytesRef("501")));
-      doc.add(new NumericDocValuesField("numericdv", 501));
-      doc.add(new SortedDocValuesField("sorteddv", new BytesRef("501")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("two")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("three")));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 6));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 1));
+
+      doc = w.newDocument();
+      doc.addInt("id", 501);
+      doc.addStoredString("field", "some more contents");
+      doc.addBinary("binarydv", new BytesRef("501"));
+      doc.addInt("numericdv", 501);
+      doc.addBinary("sorteddv", new BytesRef("501"));
+      doc.addBinary("sortedsetdv", new BytesRef("two"));
+      doc.addBinary("sortedsetdv", new BytesRef("three"));
+      doc.addInt("sortednumericdv", 6);
+      doc.addInt("sortednumericdv", 1);
       w.addDocument(doc);
-      w.deleteDocuments(new Term("id", "500"));
+      w.deleteDocuments(fieldTypes.newIntTerm("id", 500));
       w.close();
     }
 
@@ -989,33 +889,35 @@
             }
             //conf.setInfoStream(log);
             w = new IndexWriter(dir, conf);
+            FieldTypes fieldTypes = w.getFieldTypes();
+            fieldTypes.enableSorting("sorteddv");
+            fieldTypes.enableSorting("sortedsetdv");
+            fieldTypes.setMultiValued("sortedsetdv");
 
-            Document doc = new Document();
-            Field idField = newStringField(random, "id", "", Field.Store.NO);
-            Field binaryDVField = new BinaryDocValuesField("binarydv", new BytesRef());
-            Field numericDVField = new NumericDocValuesField("numericdv", 0);
-            Field sortedDVField = new SortedDocValuesField("sorteddv", new BytesRef());
-            Field sortedSetDVField = new SortedSetDocValuesField("sortedsetdv", new BytesRef());
-            doc.add(idField);
-            doc.add(newField(random, "field", "some text contents", storedTextType));
-            doc.add(binaryDVField);
-            doc.add(numericDVField);
-            doc.add(sortedDVField);
-            doc.add(sortedSetDVField);
             for(int i=0;i<100;i++) {
               //log.println("\nTEST: i=" + i);
-              idField.setStringValue(Integer.toString(i));
-              binaryDVField.setBytesValue(new BytesRef(idField.stringValue()));
-              numericDVField.setLongValue(i);
-              sortedDVField.setBytesValue(new BytesRef(idField.stringValue()));
-              sortedSetDVField.setBytesValue(new BytesRef(idField.stringValue()));
+
+              BytesRef bytes = new BytesRef("" + i);
+
+              Document doc = w.newDocument();
+              doc.addStoredString("field", "some text contents");
+
+              doc.addInt("id", i);
+              doc.addBinary("binarydv", bytes);
+              doc.addInt("numericdv", i);
+              doc.addBinary("sorteddv", bytes);
+              doc.addBinary("sortedsetdv", bytes);
+
               int action = random.nextInt(100);
               if (action == 17) {
                 w.addIndexes(adder);
               } else if (action%30 == 0) {
                 w.deleteAll();
+                fieldTypes.enableSorting("sorteddv");
+                fieldTypes.enableSorting("sortedsetdv");
+                fieldTypes.setMultiValued("sortedsetdv");
               } else if (action%2 == 0) {
-                w.updateDocument(new Term("id", idField.stringValue()), doc);
+                w.updateDocument(fieldTypes.newIntTerm("id", i), doc);
               } else {
                 w.addDocument(doc);
               }
@@ -1168,99 +1070,14 @@
     }
   }
 
-  public void testIndexStoreCombos() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    byte[] b = new byte[50];
-    for(int i=0;i<50;i++)
-      b[i] = (byte) (i+77);
-
-    Document doc = new Document();
-
-    FieldType customType = new FieldType(StoredField.TYPE);
-    customType.setTokenized(true);
-    
-    Field f = new Field("binary", b, 10, 17, customType);
-    // TODO: this is evil, changing the type after creating the field:
-    customType.setIndexOptions(IndexOptions.DOCS);
-    final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc1field1.setReader(new StringReader("doc1field1"));
-    f.setTokenStream(doc1field1);
-
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    
-    Field f2 = newField("string", "value", customType2);
-    final MockTokenizer doc1field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc1field2.setReader(new StringReader("doc1field2"));
-    f2.setTokenStream(doc1field2);
-    doc.add(f);
-    doc.add(f2);
-    w.addDocument(doc);
-
-    // add 2 docs to test in-memory merging
-    final MockTokenizer doc2field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc2field1.setReader(new StringReader("doc2field1"));
-    f.setTokenStream(doc2field1);
-    final MockTokenizer doc2field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc2field2.setReader(new StringReader("doc2field2"));
-    f2.setTokenStream(doc2field2);
-    w.addDocument(doc);
-
-    // force segment flush so we can force a segment merge with doc3 later.
-    w.commit();
-
-    final MockTokenizer doc3field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc3field1.setReader(new StringReader("doc3field1"));
-    f.setTokenStream(doc3field1);
-    final MockTokenizer doc3field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-    doc3field2.setReader(new StringReader("doc3field2"));
-    f2.setTokenStream(doc3field2);
-
-    w.addDocument(doc);
-    w.commit();
-    w.forceMerge(1);   // force segment merge.
-    w.close();
-
-    IndexReader ir = DirectoryReader.open(dir);
-    StoredDocument doc2 = ir.document(0);
-    StorableField f3 = doc2.getField("binary");
-    b = f3.binaryValue().bytes;
-    assertTrue(b != null);
-    assertEquals(17, b.length, 17);
-    assertEquals(87, b[0]);
-
-    assertTrue(ir.document(0).getField("binary").binaryValue()!=null);
-    assertTrue(ir.document(1).getField("binary").binaryValue()!=null);
-    assertTrue(ir.document(2).getField("binary").binaryValue()!=null);
-
-    assertEquals("value", ir.document(0).get("string"));
-    assertEquals("value", ir.document(1).get("string"));
-    assertEquals("value", ir.document(2).get("string"));
-
-
-    // test that the terms were indexed.
-    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-
-    ir.close();
-    dir.close();
-
-  }
-
   public void testNoDocsIndex() throws Throwable {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.close();
-
     dir.close();
   }
 
-
   public void testDeleteUnusedFiles() throws Exception {
     assumeFalse("test relies on exact filenames", Codec.getDefault() instanceof SimpleTextCodec);
     for(int iter=0;iter<2;iter++) {
@@ -1279,8 +1096,8 @@
             .setMergePolicy(mergePolicy)
             .setUseCompoundFile(true)
       );
-      Document doc = new Document();
-      doc.add(newTextField("field", "go", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("field", "go");
       w.addDocument(doc);
       DirectoryReader r;
       if (iter == 0) {
@@ -1363,17 +1180,17 @@
     }
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
 
     // First commit
-    Document doc = new Document();
-
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
+    fieldTypes.enableTermVectors("c");
+    fieldTypes.enableTermVectorPositions("c");
+    fieldTypes.enableTermVectorOffsets("c");
     
-    doc.add(newField("c", "val", customType));
+    Document doc = writer.newDocument();
+    doc.addLargeText("c", "val");
     writer.addDocument(doc);
     writer.commit();
     assertEquals(1, DirectoryReader.listCommits(dir).size());
@@ -1382,8 +1199,8 @@
     IndexCommit id = sdp.snapshot();
 
     // Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
-    doc = new Document();
-    doc.add(newField("c", "val", customType));
+    doc = writer.newDocument();
+    doc.addLargeText("c", "val");
     writer.addDocument(doc);
     writer.commit();
     assertEquals(2, DirectoryReader.listCommits(dir).size());
@@ -1420,6 +1237,7 @@
                                                 .setMaxBufferedDocs(2)
                                                 .setMergePolicy(newLogMergePolicy())
                                                 .setUseCompoundFile(false));
+    FieldTypes fieldTypes = writer.getFieldTypes();
     String[] files = dir.listAll();
 
     // Creating over empty dir should not create any files,
@@ -1433,13 +1251,13 @@
       extraFileCount = 0;
     }
 
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
+    fieldTypes.enableTermVectors("c");
+    fieldTypes.enableTermVectorPositions("c");
+    fieldTypes.enableTermVectorOffsets("c");
+
+    Document doc = writer.newDocument();
     // create as many files as possible
-    doc.add(newField("c", "val", customType));
+    doc.addLargeText("c", "val");
     writer.addDocument(doc);
     // Adding just one document does not call flush yet.
     int computedExtraFileCount = 0;
@@ -1452,8 +1270,8 @@
     }
     assertEquals("only the stored and term vector files should exist in the directory", extraFileCount, computedExtraFileCount);
 
-    doc = new Document();
-    doc.add(newField("c", "val", customType));
+    doc = writer.newDocument();
+    doc.addLargeText("c", "val");
     writer.addDocument(doc);
 
     // The second document should cause a flush.
@@ -1476,17 +1294,18 @@
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                            .setMaxBufferedDocs(2));
 
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("c", "val", customType));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("c");
+    fieldTypes.enableTermVectorPositions("c");
+    fieldTypes.enableTermVectorOffsets("c");
+
+    Document doc = w.newDocument();
+    doc.addLargeText("c", "val");
     w.addDocument(doc);
     w.addDocument(doc);
     IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                            .setMaxBufferedDocs(2)
-                                            .setOpenMode(OpenMode.CREATE));
+                                     .setMaxBufferedDocs(2)
+                                     .setOpenMode(OpenMode.CREATE));
 
     w2.close();
     // If we don't do that, the test fails on Windows
@@ -1508,25 +1327,21 @@
     IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                      .setRAMBufferSizeMB(0.01)
                                                      .setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
     indexWriter.getConfig().getMergePolicy().setNoCFSRatio(0.0);
 
     String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
     BIG=BIG+BIG+BIG+BIG;
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setOmitNorms(true);
-    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setTokenized(false);
-    FieldType customType3 = new FieldType(TextField.TYPE_STORED);
-    customType3.setTokenized(false);
-    customType3.setOmitNorms(true);
-    
+    fieldTypes.disableNorms("id");
+    fieldTypes.disableNorms("str3");
+
     for (int i=0; i<2; i++) {
-      Document doc = new Document();
-      doc.add(new Field("id", Integer.toString(i)+BIG, customType3));
-      doc.add(new Field("str", Integer.toString(i)+BIG, customType2));
-      doc.add(new Field("str2", Integer.toString(i)+BIG, storedTextType));
-      doc.add(new Field("str3", Integer.toString(i)+BIG, customType));
+      Document doc = indexWriter.newDocument();
+      doc.addAtom("id", Integer.toString(i)+BIG);
+      doc.addAtom("str", Integer.toString(i)+BIG);
+      doc.addLargeText("str2", Integer.toString(i)+BIG);
+      doc.addLargeText("str3", Integer.toString(i)+BIG);
       indexWriter.addDocument(doc);
     }
 
@@ -1597,12 +1412,12 @@
 
     char[] chars = new char[DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8];
     Arrays.fill(chars, 'x');
-    Document doc = new Document();
+    Document doc = w.newDocument();
     final String bigTerm = new String(chars);
 
     // This contents produces a too-long term:
     String contents = "abc xyz x" + bigTerm + " another term";
-    doc.add(new TextField("content", contents, Field.Store.NO));
+    doc.addLargeText("content", contents);
     try {
       w.addDocument(doc);
       fail("should have hit exception");
@@ -1611,8 +1426,8 @@
     }
 
     // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(new TextField("content", "abc bbb ccc", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("content", "abc bbb ccc");
     w.addDocument(doc);
 
     // So we remove the deleted doc:
@@ -1636,24 +1451,23 @@
 
     // Make sure we can add a document with exactly the
     // maximum length term, and search on that term:
-    doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setTokenized(false);
-    Field contentField = new Field("content", "", customType);
-    doc.add(contentField);
 
     w = new RandomIndexWriter(random(), dir);
 
-    contentField.setStringValue("other");
+    doc = w.newDocument();
+    doc.addAtom("content", "other");
     w.addDocument(doc);
 
-    contentField.setStringValue("term");
+    doc = w.newDocument();
+    doc.addAtom("content", "term");
     w.addDocument(doc);
 
-    contentField.setStringValue(bigTerm);
+    doc = w.newDocument();
+    doc.addAtom("content", bigTerm);
     w.addDocument(doc);
 
-    contentField.setStringValue("zzz");
+    doc = w.newDocument();
+    doc.addAtom("content", "zzz");
     w.addDocument(doc);
 
     reader = w.getReader();
@@ -1669,8 +1483,8 @@
     MockDirectoryWrapper d = new MockDirectoryWrapper(random(), new RAMDirectory());
     d.setEnableVirusScanner(false); // needs for files to actually be deleted
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
     for(int i = 0; i < 20; i++) {
+      Document doc = w.newDocument();
       for(int j = 0; j < 100; ++j) {
         w.addDocument(doc);
       }
@@ -1691,8 +1505,8 @@
   public void testNRTReaderVersion() throws Exception {
     Directory d = new MockDirectoryWrapper(random(), new RAMDirectory());
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addAtom("id", "0");
     w.addDocument(doc);
     DirectoryReader r = w.getReader();
     long version = r.getVersion();
@@ -1736,19 +1550,18 @@
     IndexWriter w = new IndexWriter(dir,
                                     new IndexWriterConfig(new MockAnalyzer(random())));
 
-    FieldType docsAndFreqs = new FieldType(TextField.TYPE_NOT_STORED);
-    docsAndFreqs.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("field");
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS);
 
-    FieldType docsOnly = new FieldType(TextField.TYPE_NOT_STORED);
-    docsOnly.setIndexOptions(IndexOptions.DOCS);
-
-    Document doc = new Document();
-    doc.add(new Field("field", "a b c", docsAndFreqs));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
     w.addDocument(doc);
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new Field("field", "a b c", docsOnly));
+    doc = w.newDocument();
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS);
+    doc.addLargeText("field", "a b c");
     w.addDocument(doc);
     w.close();
     dir.close();
@@ -1760,9 +1573,8 @@
                                     new IndexWriterConfig(new MockAnalyzer(random())));
 
     final List<Document> docs = new ArrayList<>();
-    docs.add(new Document());
-    w.updateDocuments(new Term("foo", "bar"),
-                      docs);
+    docs.add(w.newDocument());
+    w.updateDocuments(new Term("foo", "bar"), docs);
     w.close();
     dir.close();
   }
@@ -1811,7 +1623,7 @@
                                     new IndexWriterConfig(new MockAnalyzer(random())));
 
     w.commit();
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.prepareCommit();
     w.rollback();
     assertTrue(DirectoryReader.indexExists(dir));
@@ -1821,41 +1633,6 @@
     dir.close();
   }
   
-  public void testDontInvokeAnalyzerForUnAnalyzedFields() throws Exception {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-
-      @Override
-      public int getPositionIncrementGap(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-
-      @Override
-      public int getOffsetGap(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-    };
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
-    Field f2 = newField("field", "", customType);
-    doc.add(f2);
-    doc.add(f);
-    w.addDocument(doc);
-    w.close();
-    dir.close();
-  }
-  
   //LUCENE-1468 -- make sure opening an IndexWriter with
   // create=true does not remove non-index files
   
@@ -1863,7 +1640,7 @@
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, 
         newIndexWriterConfig(new MockAnalyzer(random())));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.close();
     try {
       // Create my own random file:
@@ -1891,9 +1668,12 @@
       }
     };
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, a);
-    Document doc = new Document();
-    doc.add(new TextField("body", "just a", Field.Store.NO));
-    doc.add(new TextField("body", "test of gaps", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("body");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "just a");
+    doc.addLargeText("body", "test of gaps");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -1922,9 +1702,12 @@
       }
     };
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, a);
-    Document doc = new Document();
-    doc.add(new TextField("body", "just a foobar", Field.Store.NO));
-    doc.add(new TextField("body", "test of gaps", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("body");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "just a foobar");
+    doc.addLargeText("body", "test of gaps");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -2005,14 +1788,14 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
     // add 3 good docs
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       iw.addDocument(doc);
     }
     // add broken doc
     try {
-      Document broke = new Document();
-      broke.add(newTextField("test", "broken", Field.Store.NO));
+      Document broke = iw.newDocument();
+      broke.addLargeText("test", "broken");
       iw.addDocument(broke);
       fail();
     } catch (NullPointerException expected) {}
@@ -2029,15 +1812,16 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     // add 3 good docs
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       iw.addDocument(doc);
     }
     // add broken doc
     try {
       iw.addDocument(null);
       fail();
-    } catch (NullPointerException expected) {}
+    } catch (NullPointerException expected) {
+    }
     // ensure good docs are still ok
     IndexReader ir = iw.getReader();
     assertEquals(3, ir.numDocs());
@@ -2051,8 +1835,8 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     // add 3 good docs
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       iw.addDocument(doc);
     }
     // add broken doc block
@@ -2073,23 +1857,13 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     // add 3 good docs
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       iw.addDocument(doc);
     }
     // add broken doc
     try {
-      iw.addDocument(new IndexDocument() {
-        @Override
-        public Iterable<IndexableField> indexableFields() {
-          return null;
-        }
-        
-        @Override
-        public Iterable<StorableField> storableFields() {
-          return Collections.emptyList();
-        }
-      });
+      iw.addDocument(null);
       fail();
     } catch (NullPointerException expected) {}
     // ensure good docs are still ok
@@ -2100,40 +1874,10 @@
     dir.close();
   }
   
-  public void testNullIterable2() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    // add 3 good docs
-    for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
-      iw.addDocument(doc);
-    }
-    // add broken doc
-    try {
-      iw.addDocument(new IndexDocument() {
-        @Override
-        public Iterable<IndexableField> indexableFields() {
-          return Collections.emptyList();
-        }
-        
-        @Override
-        public Iterable<StorableField> storableFields() {
-          return null;
-        }
-      });
-    } catch (NullPointerException expected) {}
-    // ensure good docs are still ok
-    IndexReader ir = iw.getReader();
-    assertEquals(3, ir.numDocs());
-    ir.close();
-    iw.close();
-    dir.close();
-  }
-  
   public void testIterableFieldThrowsException() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
     int iters = atLeast(100);
     int docCount = 0;
     int docId = 0;
@@ -2142,27 +1886,20 @@
       int numDocs = atLeast(4);
       for (int j = 0; j < numDocs; j++) {
         String id = Integer.toString(docId++);
-        final List<StorableField> storedFields = new ArrayList<>();
-        storedFields.add(new StoredField("id", id));
-        storedFields.add(new StoredField("foo",TestUtil.randomSimpleString(random())));
         final List<IndexableField> indexFields = new ArrayList<>();
-        indexFields.add(new StringField("id", id, Field.Store.NO));
-        indexFields.add(new StringField("foo", TestUtil.randomSimpleString(random()), Field.Store.NO));
+        indexFields.add(new LowSchemaField(a, "id", id, IndexOptions.DOCS, false));
+        indexFields.add(new LowSchemaField(a, "foo", TestUtil.randomSimpleString(random()), IndexOptions.DOCS, false));
+        LowSchemaField f = new LowSchemaField(a, "id", id, IndexOptions.NONE, false);
+        f.doNotStore();
+        indexFields.add(f);
+        f = new LowSchemaField(a, "foo", TestUtil.randomSimpleString(random()), IndexOptions.NONE, false);
+        f.doNotStore();
+        indexFields.add(f);
         docId++;
         
         boolean success = false;
         try {
-          w.addDocument(new IndexDocument() {
-            @Override
-            public Iterable<IndexableField> indexableFields() {
-              return new RandomFailingIterable<IndexableField>(indexFields, random());
-            }
-
-            @Override
-            public Iterable<StorableField> storableFields() {
-              return new RandomFailingIterable<StorableField>(storedFields, random());
-            }        
-          });
+          w.addDocument(new RandomFailingIterable<IndexableField>(indexFields, random()));
           success = true;
         } catch (RuntimeException e) {
           assertEquals("boom", e.getMessage());
@@ -2198,30 +1935,28 @@
     int iters = atLeast(100);
     int docCount = 0;
     int docId = 0;
-    Set<String> liveIds = new HashSet<>();
+    Set<Integer> liveIds = new HashSet<>();
     for (int i = 0; i < iters; i++) {
-      List<Document> docs = new ArrayList<>();
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      FieldType idFt = new FieldType(TextField.TYPE_STORED);
+      List<Iterable<IndexableField>> docs = new ArrayList<>();
       
       int numDocs = atLeast(4);
       for (int j = 0; j < numDocs; j++) {
-        Document doc = new Document();
-        doc.add(newField("id", ""+ (docId++), idFt));
-        doc.add(newField("foo", TestUtil.randomSimpleString(random()), ft));
+        Document doc = w.newDocument();
+        doc.addUniqueInt("id", docId++);
+        doc.addLargeText("foo", TestUtil.randomSimpleString(random()));
         docs.add(doc);
       }
       boolean success = false;
       try {
-        w.addDocuments(new RandomFailingIterable<IndexDocument>(docs, random()));
+        w.addDocuments(new RandomFailingIterable<Iterable<IndexableField>>(docs, random()));
         success = true;
       } catch (RuntimeException e) {
         assertEquals("boom", e.getMessage());
       } finally {
         if (success) {
           docCount += docs.size();
-          for (Document indexDocument : docs) {
-            liveIds.add(indexDocument.get("id"));  
+          for (Iterable<IndexableField> indexDocument : docs) {
+            liveIds.add(((Document) indexDocument).getInt("id"));
           }
         }
       }
@@ -2235,7 +1970,7 @@
       int maxDoc = ar.maxDoc();
       for (int i = 0; i < maxDoc; i++) {
         if (liveDocs == null || liveDocs.get(i)) {
-          assertTrue(liveIds.remove(ar.document(i).get("id")));
+          assertTrue(liveIds.remove(ar.document(i).getInt("id")));
         }
       }
     }
@@ -2379,8 +2114,8 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
     assertTrue(writer.hasUncommittedChanges());  // this will be true because a commit will create an empty index
-    Document doc = new Document();
-    doc.add(newTextField("myfield", "a b c", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("myfield", "a b c");
     writer.addDocument(doc);
     assertTrue(writer.hasUncommittedChanges());
 
@@ -2393,8 +2128,8 @@
     writer.addDocument(doc);
     assertTrue(writer.hasUncommittedChanges());
     writer.commit();
-    doc = new Document();
-    doc.add(newStringField("id", "xyz", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("id", "xyz");
     writer.addDocument(doc);
     assertTrue(writer.hasUncommittedChanges());
 
@@ -2417,6 +2152,8 @@
 
     writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
     assertFalse(writer.hasUncommittedChanges());
+    doc = writer.newDocument();
+    doc.addAtom("id", "xyz");
     writer.addDocument(doc);
     assertTrue(writer.hasUncommittedChanges());
 
@@ -2456,8 +2193,8 @@
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(new TextField("a", "foo", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("a", "foo");
     w.addDocument(doc);
 
     // Should not delete the document; with LUCENE-5239 the
@@ -2474,34 +2211,11 @@
     dir.close();
   }
 
-  public void testHasUncommittedChangesAfterException() throws IOException {
-    Analyzer analyzer = new MockAnalyzer(random());
-
-    Directory directory = newDirectory();
-    // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!
-    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
-    iwc.setMergePolicy(newLogMergePolicy());
-    IndexWriter iwriter = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
-    doc.add(new SortedDocValuesField("dv", new BytesRef("bar!")));
-    try {
-      iwriter.addDocument(doc);
-      fail("didn't hit expected exception");
-    } catch (IllegalArgumentException expected) {
-      // expected
-    }
-    iwriter.commit();
-    assertFalse(iwriter.hasUncommittedChanges());
-    iwriter.close();
-    directory.close();
-  }
-
   public void testDoubleClose() throws IOException {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = w.newDocument();
+    doc.addBinary("dv", new BytesRef("foo!"));
     w.addDocument(doc);
     w.close();
     // Close again should have no effect
@@ -2512,8 +2226,8 @@
   public void testRollbackThenClose() throws IOException {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = w.newDocument();
+    doc.addBinary("dv", new BytesRef("foo!"));
     w.addDocument(doc);
     w.rollback();
     // Close after rollback should have no effect
@@ -2524,8 +2238,8 @@
   public void testCloseThenRollback() throws IOException {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = w.newDocument();
+    doc.addBinary("dv", new BytesRef("foo!"));
     w.addDocument(doc);
     w.close();
     // Rollback after close should have no effect
@@ -2579,8 +2293,8 @@
         }
       });
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
+    Document doc = w.newDocument();
+    doc.addShortText("dv", "foo!");
     w.addDocument(doc);
     w.commit();
     w.addDocument(doc);
@@ -2617,8 +2331,8 @@
     iwc.setMergePolicy(lmp);
 
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(new TextField("a", "foo", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("a", "foo");
     w.addDocument(doc);
     w.commit();
     w.addDocument(doc);
@@ -2633,6 +2347,8 @@
     }
 
     w = new RandomIndexWriter(random(), dir);
+    doc = w.newDocument();
+    doc.addLargeText("a", "foo");
     w.addDocument(doc);
     w.close();
     r.close();
@@ -2689,7 +2405,7 @@
   public void testIds() throws Exception {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.close();
     
     SegmentInfos sis = SegmentInfos.readLatestCommit(d);
@@ -2729,8 +2445,8 @@
   public void testEmptyNorm() throws Exception {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new TextField("foo", new CannedTokenStream()));
+    Document doc = w.newDocument();
+    doc.addLargeText("foo", new CannedTokenStream());
     w.addDocument(doc);
     w.commit();
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
index 735e2e0..c7bf065 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
@@ -25,7 +25,6 @@
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -241,7 +240,7 @@
 
     );
     for(int j=0;j<1470;j++) {
-      TestIndexWriter.addDocWithIndex(writer, j);
+      TestIndexWriter.addDocWithIndex(writer, 30+j);
     }
     long midDiskUsage = dir.getMaxUsedSizeInBytes();
     dir.resetMaxUsedSizeInBytes();
@@ -359,16 +358,14 @@
           @Override
           public void run() {
             try {
-              final Document doc = new Document();
               DirectoryReader r = DirectoryReader.open(dir);
-              Field f = newStringField("f", "", Field.Store.NO);
-              doc.add(f);
               int count = 0;
               do {
                 if (failed.get()) break;
                 for(int j=0;j<10;j++) {
                   final String s = finalI + "_" + String.valueOf(count++);
-                  f.setStringValue(s);
+                  Document doc = w.newDocument();
+                  doc.addAtom("f", s);
                   w.addDocument(doc);
                   w.commit();
                   DirectoryReader r2 = DirectoryReader.openIfChanged(r);
@@ -441,8 +438,7 @@
 
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                            .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
-    Document doc = new Document();
-    w.addDocument(doc);
+    w.addDocument(w.newDocument());
 
     // commit to "first"
     Map<String,String> commitData = new HashMap<>();
@@ -451,7 +447,7 @@
     w.commit();
 
     // commit to "second"
-    w.addDocument(doc);
+    w.addDocument(w.newDocument());
     commitData.put("tag", "second");
     w.setCommitData(commitData);
     w.close();
@@ -474,7 +470,7 @@
     assertEquals(1, w.numDocs());
 
     // commit IndexWriter to "third"
-    w.addDocument(doc);
+    w.addDocument(w.newDocument());
     commitData.put("tag", "third");
     w.setCommitData(commitData);
     w.close();
@@ -666,7 +662,7 @@
 
     DirectoryReader r = DirectoryReader.open(dir);
     // commit(Map) never called for this index
-    assertEquals(0, r.getIndexCommit().getUserData().size());
+    assertEquals(1, r.getIndexCommit().getUserData().size());
     r.close();
 
     w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
@@ -692,7 +688,7 @@
   public void testPrepareCommitThenClose() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.prepareCommit();
     try {
       w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
index a575425..e9a0295 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
@@ -26,7 +26,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
@@ -327,13 +326,14 @@
     // Change to true:
     w.getConfig().setUseCompoundFile(true);
 
-    Document doc = new Document();
-    doc.add(newStringField("field", "foo", Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "foo");
     w.addDocument(doc);
     w.commit();
     assertTrue("Expected CFS after commit", w.newestSegment().info.getUseCompoundFile());
     
-    doc.add(newStringField("field", "foo", Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "foo");
     w.addDocument(doc);
     w.commit();
     w.forceMerge(1);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index 08ad33d..3b8336a8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -33,10 +33,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -63,14 +60,16 @@
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
                                                   .setMaxBufferedDeleteTerms(1));
 
-    FieldType custom1 = new FieldType();
-    custom1.setStored(true);
+
+    FieldTypes fieldTypes = modifier.getFieldTypes();
+    fieldTypes.disableStored("contents");
+
     for (int i = 0; i < keywords.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", keywords[i], Field.Store.YES));
-      doc.add(newField("country", unindexed[i], custom1));
-      doc.add(newTextField("contents", unstored[i], Field.Store.NO));
-      doc.add(newTextField("city", text[i], Field.Store.YES));
+      Document doc = modifier.newDocument();
+      doc.addAtom("id", keywords[i]);
+      doc.addStoredString("country", unindexed[i]);
+      doc.addLargeText("contents", unstored[i]);
+      doc.addLargeText("city", text[i]);
       modifier.addDocument(doc);
     }
     modifier.forceMerge(1);
@@ -102,6 +101,7 @@
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
         .setMaxBufferedDocs(2)
         .setMaxBufferedDeleteTerms(2));
+    FieldTypes fieldTypes = modifier.getFieldTypes();
     int id = 0;
     int value = 100;
 
@@ -119,7 +119,7 @@
     assertEquals(7, reader.numDocs());
     reader.close();
 
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+    modifier.deleteDocuments(fieldTypes.newIntTerm("value", value));
 
     modifier.commit();
 
@@ -135,11 +135,11 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
                                                 .setMaxBufferedDeleteTerms(1));
 
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
-    assertEquals(3, writer.getFlushDeletesCount());
+    assertEquals(4, writer.getFlushDeletesCount());
     writer.close();
     dir.close();
   }
@@ -154,22 +154,25 @@
       IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
           .setMaxBufferedDocs(4)
           .setMaxBufferedDeleteTerms(4));
+      FieldTypes fieldTypes = modifier.getFieldTypes();
+
       int id = 0;
       int value = 100;
 
       addDoc(modifier, ++id, value);
-      if (0 == t)
-        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-      else
-        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
+      if (0 == t) {
+        modifier.deleteDocuments(fieldTypes.newIntTerm("value", value));
+      } else {
+        modifier.deleteDocuments(fieldTypes.newExactIntQuery("value", value));
+      }
       addDoc(modifier, ++id, value);
       if (0 == t) {
-        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+        modifier.deleteDocuments(fieldTypes.newIntTerm("value", value));
         assertEquals(2, modifier.getNumBufferedDeleteTerms());
         assertEquals(1, modifier.getBufferedDeleteTermsSize());
+      } else {
+        modifier.deleteDocuments(fieldTypes.newExactIntQuery("value", value));
       }
-      else
-        modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
 
       addDoc(modifier, ++id, value);
       assertEquals(0, modifier.getSegmentCount());
@@ -178,7 +181,7 @@
       IndexReader reader = DirectoryReader.open(dir);
       assertEquals(1, reader.numDocs());
 
-      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
+      int hitCount = getHitCount(dir, fieldTypes.newIntTerm("id", id));
       assertEquals(1, hitCount);
       reader.close();
       modifier.close();
@@ -192,6 +195,7 @@
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
         .setMaxBufferedDocs(100)
         .setMaxBufferedDeleteTerms(100));
+    FieldTypes fieldTypes = modifier.getFieldTypes();
 
     int id = 0;
     int value = 100;
@@ -209,7 +213,7 @@
     for (int i = 0; i < 5; i++) {
       addDoc(modifier, ++id, value);
     }
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+    modifier.deleteDocuments(fieldTypes.newIntTerm("value", value));
 
     modifier.commit();
 
@@ -226,6 +230,7 @@
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
         .setMaxBufferedDocs(2)
         .setMaxBufferedDeleteTerms(2));
+    FieldTypes fieldTypes = modifier.getFieldTypes();
 
     int id = 0;
     int value = 100;
@@ -240,8 +245,8 @@
     reader.close();
 
     id = 0;
-    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
-    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+    modifier.deleteDocuments(fieldTypes.newIntTerm("id", ++id));
+    modifier.deleteDocuments(fieldTypes.newIntTerm("id", ++id));
 
     modifier.commit();
 
@@ -251,7 +256,7 @@
 
     Term[] terms = new Term[3];
     for (int i = 0; i < terms.length; i++) {
-      terms[i] = new Term("id", String.valueOf(++id));
+      terms[i] = fieldTypes.newIntTerm("id", ++id);
     }
     modifier.deleteDocuments(terms);
     modifier.commit();
@@ -327,16 +332,19 @@
           try {
             latch.await();
             for (int j = 0; j < 1000; j++) {
-              Document doc = new Document();
-              doc.add(newTextField("content", "aaa", Field.Store.NO));
-              doc.add(newStringField("id", String.valueOf(id++), Field.Store.YES));
-              doc.add(newStringField("value", String.valueOf(value), Field.Store.NO));
-              doc.add(new NumericDocValuesField("dv", value));
+              Document doc = modifier.newDocument();
+              doc.addLargeText("content", "aaa");
+              doc.addUniqueInt("id", id++);
+              doc.addInt("value", value);
+              doc.addInt("dv", value);
               modifier.addDocument(doc);
               if (VERBOSE) {
                 System.out.println("\tThread["+offset+"]: add doc: " + id);
               }
             }
+          } catch (IllegalArgumentException e) {
+            // Expected: we are deleting FieldTypes while indexing
+            assertTrue(e.getMessage().contains("unknown field"));
           } catch (Exception e) {
             throw new RuntimeException(e);
           } finally {
@@ -452,22 +460,24 @@
 
   private void updateDoc(IndexWriter modifier, int id, int value)
       throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
-    doc.add(newStringField("id", String.valueOf(id), Field.Store.YES));
-    doc.add(newStringField("value", String.valueOf(value), Field.Store.NO));
-    doc.add(new NumericDocValuesField("dv", value));
-    modifier.updateDocument(new Term("id", String.valueOf(id)), doc);
+    Document doc = modifier.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addUniqueInt("id", id);
+    doc.addInt("value", value);
+    doc.addInt("dv", value);
+
+    FieldTypes fieldTypes = modifier.getFieldTypes();
+    modifier.updateDocument(fieldTypes.newIntTerm("id", id), doc);
   }
 
 
   private void addDoc(IndexWriter modifier, int id, int value)
       throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
-    doc.add(newStringField("id", String.valueOf(id), Field.Store.YES));
-    doc.add(newStringField("value", String.valueOf(value), Field.Store.NO));
-    doc.add(new NumericDocValuesField("dv", value));
+    Document doc = modifier.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addUniqueInt("id", id);
+    doc.addInt("value", value);
+    doc.addInt("dv", value);
     modifier.addDocument(doc);
   }
 
@@ -505,10 +515,10 @@
     startDir.setEnableVirusScanner(false);
     IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
     for (int i = 0; i < 157; i++) {
-      Document d = new Document();
-      d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
-      d.add(newTextField("content", "aaa " + i, Field.Store.NO));
-      d.add(new NumericDocValuesField("dv", i));
+      Document d = writer.newDocument();
+      d.addUniqueInt("id", i);
+      d.addLargeText("content", "aaa " + i);
+      d.addInt("dv", i);
       writer.addDocument(d);
     }
     writer.close();
@@ -536,6 +546,7 @@
                                              .setMaxBufferedDeleteTerms(1000)
                                              .setMergeScheduler(new ConcurrentMergeScheduler()));
       ((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions();
+      FieldTypes fieldTypes = modifier.getFieldTypes();
 
       // For each disk size, first try to commit against
       // dir that will hit random IOExceptions & disk
@@ -588,13 +599,13 @@
             int docId = 12;
             for (int i = 0; i < 13; i++) {
               if (updates) {
-                Document d = new Document();
-                d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
-                d.add(newTextField("content", "bbb " + i, Field.Store.NO));
-                d.add(new NumericDocValuesField("dv", i));
-                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
+                Document d = modifier.newDocument();
+                d.addUniqueInt("id", docId);
+                d.addLargeText("content", "bbb " + i);
+                d.addInt("dv", i);
+                modifier.updateDocument(fieldTypes.newIntTerm("id", docId), d);
               } else { // deletes
-                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
+                modifier.deleteDocuments(fieldTypes.newIntTerm("id", docId));
                 // modifier.setNorm(docId, "contents", (float)2.0);
               }
               docId += 12;
@@ -747,7 +758,8 @@
           if (!failed) {
             StackTraceElement[] trace = new Exception().getStackTrace();
             for (int i = 0; i < trace.length; i++) {
-              if ("applyDeletesAndUpdates".equals(trace[i].getMethodName())) {
+              if ("writeLiveDocs".equals(trace[i].getMethodName()) ||
+                  "applyDeletesAndUpdates".equals(trace[i].getMethodName())) {
                 if (VERBOSE) {
                   System.out.println("TEST: mock failure: saw applyDeletes");
                   new Throwable().printStackTrace(System.out);
@@ -779,14 +791,12 @@
 
     dir.failOn(failure.reset());
 
-    FieldType custom1 = new FieldType();
-    custom1.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", keywords[i], Field.Store.YES));
-      doc.add(newField("country", unindexed[i], custom1));
-      doc.add(newTextField("contents", unstored[i], Field.Store.NO));
-      doc.add(newTextField("city", text[i], Field.Store.YES));
+      Document doc = modifier.newDocument();
+      doc.addAtom("id", keywords[i]);
+      doc.addStoredString("country", unindexed[i]);
+      doc.addLargeText("contents", unstored[i]);
+      doc.addLargeText("city", text[i]);
       modifier.addDocument(doc);
     }
     // flush
@@ -824,8 +834,7 @@
     if (VERBOSE) {
       System.out.println("TEST: add empty doc");
     }
-    Document doc = new Document();
-    modifier.addDocument(doc);
+    modifier.addDocument(modifier.newDocument());
 
     // commit the changes, the buffered deletes, and the new doc
 
@@ -902,14 +911,12 @@
     modifier.commit();
     dir.failOn(failure.reset());
 
-    FieldType custom1 = new FieldType();
-    custom1.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", keywords[i], Field.Store.YES));
-      doc.add(newField("country", unindexed[i], custom1));
-      doc.add(newTextField("contents", unstored[i], Field.Store.NO));
-      doc.add(newTextField("city", text[i], Field.Store.YES));
+      Document doc = modifier.newDocument();
+      doc.addAtom("id", keywords[i]);
+      doc.addStoredString("country", unindexed[i]);
+      doc.addLargeText("contents", unstored[i]);
+      doc.addLargeText("city", text[i]);
       try {
         modifier.addDocument(doc);
       } catch (IOException io) {
@@ -944,6 +951,7 @@
   public void testDeleteAllSlowly() throws Exception {
     final Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
     final int NUM_DOCS = atLeast(1000);
     final List<Integer> ids = new ArrayList<>(NUM_DOCS);
     for(int id=0;id<NUM_DOCS;id++) {
@@ -951,8 +959,8 @@
     }
     Collections.shuffle(ids, random());
     for(int id : ids) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+id, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", id);
       w.addDocument(doc);
     }
     Collections.shuffle(ids, random());
@@ -962,7 +970,7 @@
       final int inc = Math.min(left, TestUtil.nextInt(random(), 1, 20));
       final int limit = upto + inc;
       while(upto < limit) {
-        w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
+        w.deleteDocuments(fieldTypes.newIntTerm("id", ids.get(upto++)));
       }
       final IndexReader r = w.getReader();
       assertEquals(NUM_DOCS - upto, r.numDocs());
@@ -993,8 +1001,8 @@
                                            .setRAMBufferSizeMB(1.0)
                                            .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                                            .setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
-    Document doc = new Document();
-    doc.add(newTextField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20");
     int num = atLeast(3);
     for (int iter = 0; iter < num; iter++) {
       int count = 0;
@@ -1038,26 +1046,23 @@
                                       .setMaxBufferedDocs(1000)
                                       .setMergePolicy(NoMergePolicy.INSTANCE)
                                       .setReaderPooling(false));
+    FieldTypes fieldTypes = w.getFieldTypes();
     int count = 0;
+    int startDelCount = w.getFlushDeletesCount();
     while(true) {
-      Document doc = new Document();
-      doc.add(new StringField("id", count+"", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", count);
       final Term delTerm;
       if (count == 1010) {
         // This is the only delete that applies
-        delTerm = new Term("id", ""+0);
+        delTerm = fieldTypes.newIntTerm("id", 0);
       } else {
         // These get buffered, taking up RAM, but delete
         // nothing when applied:
-        delTerm = new Term("id", "x" + count);
+        delTerm = fieldTypes.newIntTerm("id", 100000 + count);
       }
       w.updateDocument(delTerm, doc);
-      // Eventually segment 0 should get a del docs:
-      // TODO: fix this test
-      if (slowFileExists(dir, "_0_1.del") || slowFileExists(dir, "_0_1.liv") ) {
-        if (VERBOSE) {
-          System.out.println("TEST: deletes created @ count=" + count);
-        }
+      if (w.getFlushDeletesCount() > startDelCount) {
         break;
       }
       count++;
@@ -1066,7 +1071,7 @@
       // sizable improvements to RAM efficiency of buffered
       // del term we're unlikely to go over 100K:
       if (count > 100000) {
-        fail("delete's were not applied");
+        fail("deletes were not applied");
       }
     }
     w.close();
@@ -1088,23 +1093,23 @@
                                       .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                                       .setMergePolicy(NoMergePolicy.INSTANCE)
                                       .setReaderPooling(false));
+    FieldTypes fieldTypes = w.getFieldTypes();
     int count = 0;
+    int startDelCount = w.getFlushDeletesCount();
     while(true) {
-      Document doc = new Document();
-      doc.add(new StringField("id", count+"", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", count);
       final Term delTerm;
       if (count == 1010) {
         // This is the only delete that applies
-        delTerm = new Term("id", ""+0);
+        delTerm = fieldTypes.newIntTerm("id", 0);
       } else {
         // These get buffered, taking up RAM, but delete
         // nothing when applied:
-        delTerm = new Term("id", "x" + count);
+        delTerm = fieldTypes.newIntTerm("id", 100000 + count);
       }
       w.updateDocument(delTerm, doc);
-      // Eventually segment 0 should get a del docs:
-      // TODO: fix this test
-      if (slowFileExists(dir, "_0_1.del") || slowFileExists(dir, "_0_1.liv")) {
+      if (w.getFlushDeletesCount() > startDelCount) {
         break;
       }
       count++;
@@ -1139,6 +1144,9 @@
           sawAfterFlush.set(true);
         }
       };
+
+    FieldTypes fieldTypes = w.getFieldTypes();
+
     int id = 0;
     while(true) {
       StringBuilder sb = new StringBuilder();
@@ -1148,10 +1156,10 @@
       if (id == 500) {
         w.deleteDocuments(new Term("id", "0"));
       }
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+id, Field.Store.NO));
-      doc.add(newTextField("body", sb.toString(), Field.Store.NO));
-      w.updateDocument(new Term("id", ""+id), doc);
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", id);
+      doc.addLargeText("body", sb.toString());
+      w.updateDocument(fieldTypes.newIntTerm("id", id), doc);
       docsInSegment.incrementAndGet();
       // TODO: fix this test
       if (slowFileExists(dir, "_0_1.del") || slowFileExists(dir, "_0_1.liv")) {
@@ -1174,12 +1182,12 @@
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMaxBufferedDocs(2);
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(newField("field", "0", StringField.TYPE_NOT_STORED));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "0");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newField("field", "1", StringField.TYPE_NOT_STORED));
+    doc = w.newDocument();
+    doc.addAtom("field", "1");
     w.addDocument(doc);
     w.commit();
     assertEquals(1, w.getSegmentCount());
@@ -1221,7 +1229,7 @@
 
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter w = new IndexWriter(d, iwc);
-    Document doc = new Document();
+    Document doc = w.newDocument();
     w.addDocument(doc);
     w.addDocument(doc);
     w.addDocument(doc);
@@ -1253,8 +1261,8 @@
     iwc.setMergeScheduler(new SerialMergeScheduler());
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<38;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", ""+i);
       w.addDocument(doc);
     }
     w.commit();
@@ -1281,8 +1289,8 @@
     iwc.setMergeScheduler(new SerialMergeScheduler());
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<38;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", ""+i);
       w.addDocument(doc);
     }
     w.commit();
@@ -1312,8 +1320,8 @@
     iwc.setMaxBufferedDeleteTerms(18);
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<38;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", ""+i);
       w.addDocument(doc);
     }
     w.commit();
@@ -1342,8 +1350,8 @@
     iwc.setMaxBufferedDeleteTerms(18);
     IndexWriter w = new IndexWriter(dir, iwc);
     for(int i=0;i<38;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", ""+i);
       w.addDocument(doc);
     }
     w.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index 77ee7e5..c7c8f6c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -38,17 +38,8 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
@@ -75,35 +66,6 @@
     private final Document doc;
     private final int count;
     
-    /* private field types */
-    /* private field types */
-
-    private static final FieldType custom1 = new FieldType(TextField.TYPE_NOT_STORED);
-    private static final FieldType custom2 = new FieldType();
-    private static final FieldType custom3 = new FieldType();
-    private static final FieldType custom4 = new FieldType(StringField.TYPE_NOT_STORED);
-    private static final FieldType custom5 = new FieldType(TextField.TYPE_STORED);
-    
-    static {
-
-      custom1.setStoreTermVectors(true);
-      custom1.setStoreTermVectorPositions(true);
-      custom1.setStoreTermVectorOffsets(true);
-      
-      custom2.setStored(true);
-      custom2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-      
-      custom3.setStored(true);
-
-      custom4.setStoreTermVectors(true);
-      custom4.setStoreTermVectorPositions(true);
-      custom4.setStoreTermVectorOffsets(true);
-      
-      custom5.setStoreTermVectors(true);
-      custom5.setStoreTermVectorPositions(true);
-      custom5.setStoreTermVectorOffsets(true);
-    }
-
     public DocCopyIterator(Document doc, int count) {
       this.count = count;
       this.doc = doc;
@@ -147,28 +109,16 @@
 
     @Override
     public void run() {
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.setMultiValued("sortedsetdv");
+      fieldTypes.setMultiValued("sortednumericdv");
 
-      final Document doc = new Document();
-
-      doc.add(newTextField(r, "content1", "aaa bbb ccc ddd", Field.Store.YES));
-      doc.add(newField(r, "content6", "aaa bbb ccc ddd", DocCopyIterator.custom1));
-      doc.add(newField(r, "content2", "aaa bbb ccc ddd", DocCopyIterator.custom2));
-      doc.add(newField(r, "content3", "aaa bbb ccc ddd", DocCopyIterator.custom3));
-
-      doc.add(newTextField(r, "content4", "aaa bbb ccc ddd", Field.Store.NO));
-      doc.add(newStringField(r, "content5", "aaa bbb ccc ddd", Field.Store.NO));
-      doc.add(new NumericDocValuesField("numericdv", 5));
-      doc.add(new BinaryDocValuesField("binarydv", new BytesRef("hello")));
-      doc.add(new SortedDocValuesField("sorteddv", new BytesRef("world")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("hellllo")));
-      doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("again")));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 10));
-      doc.add(new SortedNumericDocValuesField("sortednumericdv", 5));
-
-      doc.add(newField(r, "content7", "aaa bbb ccc ddd", DocCopyIterator.custom4));
-
-      final Field idField = newField(r, "id", "", DocCopyIterator.custom2);
-      doc.add(idField);
+      fieldTypes.enableTermVectors("content6");
+      fieldTypes.enableTermVectorPositions("content6");
+      fieldTypes.enableTermVectorOffsets("content6");
+      fieldTypes.enableTermVectors("content4");
+      fieldTypes.enableTermVectorPositions("content4");
+      fieldTypes.enableTermVectorOffsets("content4");
 
       final long stopTime = System.currentTimeMillis() + 500;
 
@@ -176,9 +126,28 @@
         if (VERBOSE) {
           System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
         }
-        doFail.set(this);
+        final Document doc = writer.newDocument();
+        doc.addLargeText("content1", "aaa bbb ccc ddd");
+        doc.addLargeText("content6", "aaa bbb ccc ddd");
+        doc.addLargeText("content2", "aaa bbb ccc ddd");
+        doc.addStoredString("content3", "aaa bbb ccc ddd");
+
+        doc.addLargeText("content4", "aaa bbb ccc ddd");
+        doc.addAtom("content5", "aaa bbb ccc ddd");
+        doc.addInt("numericdv", 5);
+        doc.addBinary("binarydv", new BytesRef("hello"));
+        doc.addShortText("sorteddv", "world");
+        doc.addShortText("sortedsetdv", "hellllo");
+        doc.addShortText("sortedsetdv", "again");
+        doc.addInt("sortednumericdv", 10);
+        doc.addInt("sortednumericdv", 5);
+
+        doc.addAtom("content7", "aaa bbb ccc ddd");
         final String id = ""+r.nextInt(50);
-        idField.setStringValue(id);
+
+        doc.addLargeText("id", id);
+
+        doFail.set(this);
         Term idTerm = new Term("id", id);
         try {
           if (r.nextBoolean()) {
@@ -375,8 +344,8 @@
     Directory dir = newDirectory();
     TestPoint2 testPoint = new TestPoint2();
     IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())), testPoint);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a field", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a field");
     w.addDocument(doc);
     testPoint.doFail = true;
     try {
@@ -412,12 +381,12 @@
                                                       newIndexWriterConfig(analyzer)
                                                         .setMaxBufferedDocs(2), 
                                                       new TestPoint1());
-    Document doc = new Document();
-    doc.add(newTextField("field", "a field", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a field");
     w.addDocument(doc);
 
-    Document crashDoc = new Document();
-    crashDoc.add(newTextField("crash", "do it on token 4", Field.Store.YES));
+    Document crashDoc = w.newDocument();
+    crashDoc.addLargeText("crash", "do it on token 4");
     doCrash.set(true);
     try {
       w.addDocument(crashDoc);
@@ -456,8 +425,8 @@
     TestPoint3 testPoint = new TestPoint3();
     IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, conf, testPoint);
     testPoint.doFail = true;
-    Document doc = new Document();
-    doc.add(newTextField("field", "a field", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a field");
     for(int i=0;i<10;i++)
       try {
         w.addDocument(doc);
@@ -504,9 +473,9 @@
 
     IndexWriter writer = new IndexWriter(dir, conf);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newTextField("content", contents, Field.Store.NO));
+    doc.addLargeText("content", contents);
     try {
       writer.addDocument(doc);
       fail("did not hit expected exception");
@@ -514,19 +483,19 @@
     }
 
     // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(newTextField("content", "aa bb cc dd", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "aa bb cc dd");
     writer.addDocument(doc);
 
     // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(newTextField("content", "aa bb cc dd", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("content", "aa bb cc dd");
     writer.addDocument(doc);
 
     writer.close();
     IndexReader reader = DirectoryReader.open(dir);
     final Term t = new Term("content", "aa");
-    assertEquals(3, reader.docFreq(t));
+    assertEquals(2, reader.docFreq(t));
 
     // Make sure the doc that hit the exception was marked
     // as deleted:
@@ -593,9 +562,9 @@
 
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMaxBufferedDocs(2));
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newTextField("content", contents, Field.Store.NO));
+    doc.addLargeText("content", contents);
     boolean hitError = false;
     writer.addDocument(doc);
     try {
@@ -629,18 +598,24 @@
       Directory dir = newDirectory();
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
                                                   .setMergePolicy(newLogMergePolicy()));
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      for(String fieldName : new String[] {"contents", "crash", "other"}) {
+        fieldTypes.enableTermVectors(fieldName);
+        fieldTypes.enableTermVectorPositions(fieldName);
+        fieldTypes.enableTermVectorOffsets(fieldName);
+      }
 
       // don't allow a sudden merge to clean up the deleted
       // doc below:
       LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
       lmp.setMergeFactor(Math.max(lmp.getMergeFactor(), 5));
 
-      Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
+      Document doc = writer.newDocument();
+      doc.addLargeText("contents", "here are some contents");
       writer.addDocument(doc);
       writer.addDocument(doc);
-      doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
-      doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
+      doc.addLargeText("crash", "this should crash after 4 terms");
+      doc.addLargeText("other", "this will not get indexed");
       try {
         writer.addDocument(doc);
         fail("did not hit expected exception");
@@ -652,8 +627,8 @@
       }
 
       if (0 == i) {
-        doc = new Document();
-        doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
+        doc = writer.newDocument();
+        doc.addLargeText("contents", "here are some contents");
         writer.addDocument(doc);
         writer.addDocument(doc);
       }
@@ -665,7 +640,7 @@
       IndexReader reader = DirectoryReader.open(dir);
       if (i == 0) { 
         int expected = 5;
-        assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+        assertEquals(expected-1, reader.docFreq(new Term("contents", "here")));
         assertEquals(expected, reader.maxDoc());
         int numDel = 0;
         final Bits liveDocs = MultiFields.getLiveDocs(reader);
@@ -684,10 +659,11 @@
 
       writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
                                       .setMaxBufferedDocs(10));
-      doc = new Document();
-      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
-      for(int j=0;j<17;j++)
+      doc = writer.newDocument();
+      doc.addLargeText("contents", "here are some contents");
+      for(int j=0;j<17;j++) {
         writer.addDocument(doc);
+      }
       writer.forceMerge(1);
       writer.close();
 
@@ -730,6 +706,12 @@
             .setMergePolicy(NoMergePolicy.INSTANCE));
         // don't use a merge policy here they depend on the DWPThreadPool and its max thread states etc.
         final int finalI = i;
+        FieldTypes fieldTypes = writer.getFieldTypes();
+        for(String fieldName : new String[] {"contents", "crash", "other"}) {
+          fieldTypes.enableTermVectors(fieldName);
+          fieldTypes.enableTermVectorPositions(fieldName);
+          fieldTypes.enableTermVectorOffsets(fieldName);
+        }
 
         Thread[] threads = new Thread[NUM_THREAD];
         for(int t=0;t<NUM_THREAD;t++) {
@@ -738,12 +720,12 @@
               public void run() {
                 try {
                   for(int iter=0;iter<NUM_ITER;iter++) {
-                    Document doc = new Document();
-                    doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
+                    Document doc = writer.newDocument();
+                    doc.addLargeText("contents", "here are some contents");
                     writer.addDocument(doc);
                     writer.addDocument(doc);
-                    doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
-                    doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
+                    doc.addLargeText("crash", "this should crash after 4 terms");
+                    doc.addLargeText("other", "this will not get indexed");
                     try {
                       writer.addDocument(doc);
                       fail("did not hit expected exception");
@@ -751,8 +733,8 @@
                     }
 
                     if (0 == finalI) {
-                      doc = new Document();
-                      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
+                      doc = writer.newDocument();
+                      doc.addLargeText("contents", "here are some contents");
                       writer.addDocument(doc);
                       writer.addDocument(doc);
                     }
@@ -769,16 +751,17 @@
           threads[t].start();
         }
 
-        for(int t=0;t<NUM_THREAD;t++)
+        for(int t=0;t<NUM_THREAD;t++) {
           threads[t].join();
+        }
 
         writer.close();
       }
 
       IndexReader reader = DirectoryReader.open(dir);
       int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
-      assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
-      assertEquals(expected, reader.maxDoc());
+      assertEquals("i=" + i, expected - NUM_THREAD*NUM_ITER, reader.docFreq(new Term("contents", "here")));
+      assertEquals("i=" + i, expected, reader.maxDoc());
       int numDel = 0;
       final Bits liveDocs = MultiFields.getLiveDocs(reader);
       assertNotNull(liveDocs);
@@ -796,10 +779,11 @@
 
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
                                                   .setMaxBufferedDocs(10));
-      Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
-      for(int j=0;j<17;j++)
+      Document doc = writer.newDocument();
+      doc.addLargeText("contents", "here are some contents");
+      for(int j=0;j<17;j++) {
         writer.addDocument(doc);
+      }
       writer.forceMerge(1);
       writer.close();
 
@@ -841,11 +825,10 @@
 
   // TODO: these are also in TestIndexWriter... add a simple doc-writing method
   // like this to LuceneTestCase?
-  private void addDoc(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
-      writer.addDocument(doc);
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    writer.addDocument(doc);
   }
 
   // LUCENE-1044: test exception during sync
@@ -946,8 +929,8 @@
       dir.setFailOnCreateOutput(false);
       dir.setEnableVirusScanner(false); // we check for specific list of files
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      Document doc = new Document();
-      doc.add(newTextField("field", "a field", Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addLargeText("field", "a field");
       w.addDocument(doc);
       dir.failOn(failure);
       try {
@@ -1041,12 +1024,13 @@
 
     final AtomicBoolean thrown = new AtomicBoolean(false);
     final Directory dir = newDirectory();
+    final AtomicBoolean doFail = new AtomicBoolean(false);
     final IndexWriter writer = new IndexWriter(dir,
         newIndexWriterConfig(new MockAnalyzer(random()))
           .setInfoStream(new InfoStream() {
         @Override
         public void message(String component, final String message) {
-          if (message.contains("startFullFlush") && thrown.compareAndSet(false, true)) {
+          if (doFail.get() && message.contains("startFullFlush") && thrown.compareAndSet(false, true)) {
             throw new OutOfMemoryError("fake OOME at " + message);
           }
         }
@@ -1059,7 +1043,8 @@
         @Override
         public void close() {}
       }));
-    writer.addDocument(new Document());
+    doFail.set(true);
+    writer.addDocument(writer.newDocument());
 
     try {
       writer.commit();
@@ -1074,7 +1059,7 @@
     }
 
     try {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
     } catch (AlreadyClosedException ace) {
       // expected
     }
@@ -1277,16 +1262,15 @@
       for (FailOnTermVectors failure : failures) {
         MockDirectoryWrapper dir = newMockDirectory();
         IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+        FieldTypes fieldTypes = w.getFieldTypes();
         dir.failOn(failure);
         int numDocs = 10 + random().nextInt(30);
         for (int i = 0; i < numDocs; i++) {
-          Document doc = new Document();
-          // random TV
-          Field field = newTextField(random(), "field", "a field", Field.Store.YES);
-          doc.add(field);
+          Document doc = w.newDocument();
+          doc.addLargeText("field", "a field");
           try {
             w.addDocument(doc);
-            assertFalse(field.fieldType().storeTermVectors());
+            assertFalse(fieldTypes.getTermVectors("field"));
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
             // This is an aborting exception, so writer is closed:
@@ -1301,18 +1285,17 @@
           }
             
         }
-        Document document = new Document();
-        document.add(new TextField("field", "a field", Field.Store.YES));
+        Document document = w.newDocument();
+        document.addLargeText("field", "a field");
         w.addDocument(document);
 
         for (int i = 0; i < numDocs; i++) {
-          Document doc = new Document();
-          Field field = newTextField(random(), "field", "a field", Field.Store.YES);
-          doc.add(field);
+          Document doc = w.newDocument();
+          doc.addLargeText("field", "a field");
           // random TV
           try {
             w.addDocument(doc);
-            assertFalse(field.fieldType().storeTermVectors());
+            assertFalse(fieldTypes.getTermVectors("field"));
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
           }
@@ -1321,8 +1304,8 @@
             TestUtil.checkIndex(dir);
           }
         }
-        document = new Document();
-        document.add(new TextField("field", "a field", Field.Store.YES));
+        document = w.newDocument();
+        document.addLargeText("field", "a field");
         w.addDocument(document);
         w.close();
         IndexReader reader = DirectoryReader.open(dir);
@@ -1371,24 +1354,22 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     final int numDocs1 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "good content", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "good content");
       w.addDocument(doc);
     }
     
     final List<Document> docs = new ArrayList<>();
     for(int docCount=0;docCount<7;docCount++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       docs.add(doc);
-      doc.add(newStringField("id", docCount+"", Field.Store.NO));
-      doc.add(newTextField("content", "silly content " + docCount, Field.Store.NO));
+      doc.addAtom("id", docCount+"");
+      doc.addLargeText("content", "silly content " + docCount);
       if (docCount == 4) {
-        Field f = newTextField("crash", "", Field.Store.NO);
-        doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
         tokenizer.setReader(new StringReader("crash me on the 4th token"));
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
-        f.setTokenStream(new CrashingFilter("crash", tokenizer));
+        doc.addLargeText("crash", new CrashingFilter("crash", tokenizer));
       }
     }
     try {
@@ -1402,8 +1383,8 @@
 
     final int numDocs2 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs2;docCount++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "good content", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "good content");
       w.addDocument(doc);
     }
 
@@ -1430,8 +1411,8 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     final int numDocs1 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "good content", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "good content");
       w.addDocument(doc);
     }
 
@@ -1439,18 +1420,18 @@
     final List<Document> docs = new ArrayList<>();
     final int numDocs2 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs2;docCount++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       docs.add(doc);
-      doc.add(newStringField("subid", "subs", Field.Store.NO));
-      doc.add(newStringField("id", docCount+"", Field.Store.NO));
-      doc.add(newTextField("content", "silly content " + docCount, Field.Store.NO));
+      doc.addAtom("subid", "subs");
+      doc.addAtom("id", docCount+"");
+      doc.addLargeText("content", "silly content " + docCount);
     }
     w.addDocuments(docs);
 
     final int numDocs3 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs3;docCount++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "good content", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "good content");
       w.addDocument(doc);
     }
 
@@ -1458,17 +1439,15 @@
     final int limit = TestUtil.nextInt(random(), 2, 25);
     final int crashAt = random().nextInt(limit);
     for(int docCount=0;docCount<limit;docCount++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       docs.add(doc);
-      doc.add(newStringField("id", docCount+"", Field.Store.NO));
-      doc.add(newTextField("content", "silly content " + docCount, Field.Store.NO));
+      doc.addAtom("id", docCount+"");
+      doc.addLargeText("content", "silly content " + docCount);
       if (docCount == crashAt) {
-        Field f = newTextField("crash", "", Field.Store.NO);
-        doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
         tokenizer.setReader(new StringReader("crash me on the 4th token"));
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
-        f.setTokenStream(new CrashingFilter("crash", tokenizer));
+        doc.addLargeText("crash", new CrashingFilter("crash", tokenizer));
       }
     }
 
@@ -1483,8 +1462,8 @@
 
     final int numDocs4 = random().nextInt(25);
     for(int docCount=0;docCount<numDocs4;docCount++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "good content", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "good content");
       w.addDocument(doc);
     }
 
@@ -1511,39 +1490,16 @@
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
-    iw.addDocument(doc);
+    iw.addDocument(iw.newDocument());
+
+    Document doc = iw.newDocument();
+    String value = null;
     try {
       // set to null value
-      String value = null;
-      doc.add(new StoredField("foo", value));
-      iw.addDocument(doc);
+      doc.addStoredString("foo", value);
       fail("didn't get expected exception");
-    } catch (IllegalArgumentException expected) {}
-    iw.close();
-    // make sure we see our good doc
-    DirectoryReader r = DirectoryReader.open(dir);
-    assertEquals(1, r.numDocs());
-    r.close();
-    dir.close();
-  }
-  
-  /** test a null string value doesn't abort the entire segment */
-  public void testNullStoredFieldReuse() throws Exception {
-    Directory dir = newDirectory();
-    Analyzer analyzer = new MockAnalyzer(random());
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
-    // add good document
-    Document doc = new Document();
-    Field theField = new StoredField("foo", "hello", StoredField.TYPE);
-    doc.add(theField);
-    iw.addDocument(doc);
-    try {
-      // set to null value
-      theField.setStringValue(null);
-      iw.addDocument(doc);
-      fail("didn't get expected exception");
-    } catch (IllegalArgumentException expected) {}
+    } catch (IllegalArgumentException expected) {
+    }
     iw.close();
     // make sure we see our good doc
     DirectoryReader r = DirectoryReader.open(dir);
@@ -1558,42 +1514,16 @@
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
-    iw.addDocument(doc);
+    iw.addDocument(iw.newDocument());
 
+    Document doc = iw.newDocument();
+    byte v[] = null;
     try {
       // set to null value
-      byte v[] = null;
-      Field theField = new StoredField("foo", v);
-      doc.add(theField);
-      iw.addDocument(doc);
+      doc.addStoredBinary("foo", v);
       fail("didn't get expected exception");
-    } catch (NullPointerException expected) {}
-    iw.close();
-    // make sure we see our good doc
-    DirectoryReader r = DirectoryReader.open(dir);
-    assertEquals(1, r.numDocs());
-    r.close();
-    dir.close();
-  }
-  
-  /** test a null byte[] value doesn't abort the entire segment */
-  public void testNullStoredBytesFieldReuse() throws Exception {
-    Directory dir = newDirectory();
-    Analyzer analyzer = new MockAnalyzer(random());
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
-    // add good document
-    Document doc = new Document();
-    Field theField = new StoredField("foo", new BytesRef("hello").bytes);
-    doc.add(theField);
-    iw.addDocument(doc);
-    try {
-      // set to null value
-      byte v[] = null;
-      theField.setBytesValue(v);
-      iw.addDocument(doc);
-      fail("didn't get expected exception");
-    } catch (NullPointerException expected) {}
+    } catch (NullPointerException expected) {
+    }
     iw.close();
     // make sure we see our good doc
     DirectoryReader r = DirectoryReader.open(dir);
@@ -1608,42 +1538,17 @@
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
-    iw.addDocument(doc);
+    iw.addDocument(iw.newDocument());
 
+    BytesRef v = null;
+    Document doc = iw.newDocument();
     try {
       // set to null value
-      BytesRef v = null;
-      Field theField = new StoredField("foo", v);
-      doc.add(theField);
+      doc.addStoredBinary("foo", v);
       iw.addDocument(doc);
       fail("didn't get expected exception");
-    } catch (IllegalArgumentException expected) {}
-    iw.close();
-    // make sure we see our good doc
-    DirectoryReader r = DirectoryReader.open(dir);
-    assertEquals(1, r.numDocs());
-    r.close();
-    dir.close();
-  }
-  
-  /** test a null bytesref value doesn't abort the entire segment */
-  public void testNullStoredBytesRefFieldReuse() throws Exception {
-    Directory dir = newDirectory();
-    Analyzer analyzer = new MockAnalyzer(random());
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
-    // add good document
-    Document doc = new Document();
-    Field theField = new StoredField("foo", new BytesRef("hello"));
-    doc.add(theField);
-    iw.addDocument(doc);
-    try {
-      // set to null value
-      BytesRef v = null;
-      theField.setBytesValue(v);
-      iw.addDocument(doc);
-      fail("didn't get expected exception");
-    } catch (IllegalArgumentException expected) {}
+    } catch (IllegalArgumentException expected) {
+    }
     iw.close();
     // make sure we see our good doc
     DirectoryReader r = DirectoryReader.open(dir);
@@ -1667,11 +1572,14 @@
     };
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("foo");
+    Document doc = iw.newDocument();
     iw.addDocument(doc);
+
+    doc.addLargeText("foo", "bar");
+    doc.addLargeText("foo", "bar");
     try {
-      doc.add(newTextField("foo", "bar", Field.Store.NO));
-      doc.add(newTextField("foo", "bar", Field.Store.NO));
       iw.addDocument(doc);
       fail("didn't get expected exception");
     } catch (IllegalArgumentException expected) {}
@@ -1707,7 +1615,7 @@
     UOEDirectory uoe = new UOEDirectory();
     Directory d = new MockDirectoryWrapper(random(), uoe);
     IndexWriter iw = new IndexWriter(d, newIndexWriterConfig(null));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.close();
     uoe.doFail = true;
     try {
@@ -1723,7 +1631,7 @@
   public void testIllegalPositions() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     Token t1 = new Token("foo", 0, 3);
     t1.setPositionIncrement(Integer.MAX_VALUE);
     Token t2 = new Token("bar", 4, 7);
@@ -1731,8 +1639,7 @@
     TokenStream overflowingTokenStream = new CannedTokenStream(
         new Token[] { t1, t2 }
     );
-    Field field = new TextField("foo", overflowingTokenStream);
-    doc.add(field);
+    doc.addLargeText("foo", overflowingTokenStream);
     try {
       iw.addDocument(doc);
       fail();
@@ -1746,7 +1653,7 @@
   public void testLegalbutVeryLargePositions() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     Token t1 = new Token("foo", 0, 3);
     t1.setPositionIncrement(Integer.MAX_VALUE-500);
     if (random().nextBoolean()) {
@@ -1755,8 +1662,7 @@
     TokenStream overflowingTokenStream = new CannedTokenStream(
         new Token[] { t1 }
     );
-    Field field = new TextField("foo", overflowingTokenStream);
-    doc.add(field);
+    doc.addLargeText("foo", overflowingTokenStream);
     iw.addDocument(doc);
     iw.close();
     dir.close();
@@ -1767,21 +1673,19 @@
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter iw = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(new StringField("field1", "sometext", Field.Store.YES));
-    doc.add(new TextField("field2", "sometext", Field.Store.NO));
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addAtom("field1", "sometext");
+    doc.addLargeText("field2", "sometext");
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc); // add an 'ok' document
     try {
-      doc = new Document();
       // try to boost with norms omitted
-      IndexDocument docList = new IndexDocument() {
+      Iterable<IndexableField> doc2 = new Iterable<IndexableField>() {
         
         List<IndexableField> list = new ArrayList<>();
-        List<StorableField> storedList = new ArrayList<>();
         
         @Override
-        public Iterable<IndexableField> indexableFields() {
+        public Iterator<IndexableField> iterator() {
           if (list.size() == 0) {
             list.add(new IndexableField() {
               @Override
@@ -1791,30 +1695,34 @@
 
               @Override
               public IndexableFieldType fieldType() {
-                return StringField.TYPE_NOT_STORED;
+                return new IndexableFieldType() {
+                  @Override
+                  public boolean stored() {
+                    return false;
+                  }
+
+                  @Override
+                  public boolean omitNorms() {
+                    return true;
+                  }
+
+                  @Override
+                  public IndexOptions indexOptions() {
+                    return IndexOptions.DOCS;
+                  }
+                };
               }
 
               @Override
               public float boost() {
                 return 5f;
               }
-
-              @Override
-              public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
-                return null;
-              }
             });
           }
-          return list;
+          return list.iterator();
         }
-
-        @Override
-        public Iterable<StorableField> storableFields() {
-          return storedList;
-        }
-        
       };
-      iw.addDocument(docList);
+      iw.addDocument(doc2);
       fail("didn't get any exception, boost silently discarded");
     } catch (UnsupportedOperationException expected) {
       // expected
@@ -1858,8 +1766,8 @@
     // Create an index with one document
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter iw = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc); // add a document
     iw.commit();
     DirectoryReader ir = DirectoryReader.open(dir);
@@ -1978,14 +1886,17 @@
         // forceMerge can easily return when there are still
         // too many segments in the index:
         w.setDoRandomForceMergeAssert(false);
+        FieldTypes fieldTypes = w.getFieldTypes();
+        fieldTypes.disableSorting("bf");
+        fieldTypes.disableSorting("bcf");
       }
       for(int i=0;i<numDocs;i++) {
-        Document doc = new Document();
-        doc.add(new StringField("id", ""+(docBase+i), Field.Store.NO));
-        doc.add(new NumericDocValuesField("f", 1L));
-        doc.add(new NumericDocValuesField("cf", 2L));
-        doc.add(new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(1L)));
-        doc.add(new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(2L)));
+        Document doc = w.newDocument();
+        doc.addAtom("id", ""+(docBase+i));
+        doc.addLong("f", 1L);
+        doc.addLong("cf", 2L);
+        doc.addBinary("bf", TestBinaryDocValuesUpdates.toBytes(1L));
+        doc.addBinary("bcf", TestBinaryDocValuesUpdates.toBytes(2L));
         w.addDocument(doc);
       }
       docCount += numDocs;
@@ -2009,18 +1920,21 @@
                 System.out.println("  update id=" + docid + " to value " + value);
               }
               Term idTerm = new Term("id", Integer.toString(docid));
+              Document update = w.newDocument();
+              update.disableExistsField();
               if (random().nextBoolean()) { // update only numeric field
-                w.updateDocValues(idTerm, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value*2));
+                update.addLong("f", value);
+                update.addLong("cf", value*2);
               } else if (random().nextBoolean()) {
-                w.updateDocValues(idTerm, new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)),
-                    new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value*2)));
+                update.addBinary("bf", TestBinaryDocValuesUpdates.toBytes(value));
+                update.addBinary("bcf", TestBinaryDocValuesUpdates.toBytes(value*2));
               } else {
-                w.updateDocValues(idTerm, 
-                    new NumericDocValuesField("f", value), 
-                    new NumericDocValuesField("cf", value*2),
-                    new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)),
-                    new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value*2)));
+                update.addLong("f", value);
+                update.addLong("cf", value*2);
+                update.addBinary("bf", TestBinaryDocValuesUpdates.toBytes(value));
+                update.addBinary("bcf", TestBinaryDocValuesUpdates.toBytes(value*2));
               }
+              w.updateDocValues(idTerm, update);
             }
             
             // sometimes do both deletes and updates
@@ -2142,10 +2056,10 @@
   public void testTooManyTokens() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    doc.add(new Field("foo", new TokenStream() {
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setIndexOptions("foo", IndexOptions.DOCS_AND_FREQS);
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", new TokenStream() {
       CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
       PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
       long num = 0;
@@ -2168,7 +2082,7 @@
         }
         return true;
       }
-    }, ft));
+      });
     try {
       iw.addDocument(doc);
       fail("didn't hit exception");
@@ -2208,7 +2122,7 @@
     IndexWriter iw = new IndexWriter(dir, iwc);
     // TODO: cutover to RandomIndexWriter.mockIndexWriter?
     iw.enableTestPoints = true;
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     for (int i = 0; i < 10; i++) {
       iw.addDocument(doc);
     }
@@ -2279,7 +2193,7 @@
       
       IndexWriterConfig iwc = new IndexWriterConfig(null);
       IndexWriter iw = new IndexWriter(dir, iwc);
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       for (int i = 0; i < 10; i++) {
         iw.addDocument(doc);
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java
index 58e3ac7..565dbc2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java
@@ -30,16 +30,8 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.cranky.CrankyCodec;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -49,6 +41,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Rethrow;
 import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 
 /** 
  * Causes a bunch of non-aborting and aborting exceptions and checks that
@@ -102,29 +95,55 @@
     int numDocs = atLeast(500);
     
     IndexWriter iw = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("text_vectors");
+    fieldTypes.disableSorting("dv2");
+    fieldTypes.setMultiValued("dv4");
+    fieldTypes.setMultiValued("dv5");
+    fieldTypes.setMultiValued("stored1");
+
     try {
       boolean allowAlreadyClosed = false;
       for (int i = 0; i < numDocs; i++) {
         // TODO: add crankyDocValuesFields, etc
-        Document doc = new Document();
-        doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
-        doc.add(new NumericDocValuesField("dv", i));
-        doc.add(new BinaryDocValuesField("dv2", new BytesRef(Integer.toString(i))));
-        doc.add(new SortedDocValuesField("dv3", new BytesRef(Integer.toString(i))));
-        doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i))));
-        doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i-1))));
-        doc.add(new SortedNumericDocValuesField("dv5", i));
-        doc.add(new SortedNumericDocValuesField("dv5", i-1));
-        doc.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
+        Document doc;
+        try {
+          doc = iw.newDocument();
+        } catch (AlreadyClosedException ace) {
+          // OK: writer was closed by abort; we just reopen now:
+          assertTrue(iw.deleter.isClosed());
+          assertTrue(allowAlreadyClosed);
+          allowAlreadyClosed = false;
+          conf = newIndexWriterConfig(analyzer);
+          // just for now, try to keep this test reproducible
+          conf.setMergeScheduler(new SerialMergeScheduler());
+          conf.setCodec(codec);
+          iw = new IndexWriter(dir, conf);            
+          fieldTypes = iw.getFieldTypes();
+          fieldTypes.enableTermVectors("text_vectors");
+          fieldTypes.disableSorting("dv2");
+          fieldTypes.setMultiValued("dv4");
+          fieldTypes.setMultiValued("dv5");
+          fieldTypes.setMultiValued("stored1");
+          continue;
+        }
+
+        doc.addAtom("id", Integer.toString(i));
+        doc.addInt("dv", i);
+        doc.addBinary("dv2", new BytesRef(Integer.toString(i)));
+        doc.addShortText("dv3", Integer.toString(i));
+        doc.addShortText("dv4", Integer.toString(i));
+        doc.addShortText("dv4", Integer.toString(i-1));
+        doc.addInt("dv5", i);
+        doc.addInt("dv5", i-1);
+        doc.addLargeText("text1", TestUtil.randomAnalysisString(random(), 20, true));
         // ensure we store something
-        doc.add(new StoredField("stored1", "foo"));
-        doc.add(new StoredField("stored1", "bar"));    
+        doc.addStoredString("stored1", "foo");
+        doc.addStoredString("stored1", "bar");
         // ensure we get some payloads
-        doc.add(newTextField("text_payloads", TestUtil.randomAnalysisString(random(), 6, true), Field.Store.NO));
+        doc.addLargeText("text_payloads", TestUtil.randomAnalysisString(random(), 6, true));
         // ensure we get some vectors
-        FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-        ft.setStoreTermVectors(true);
-        doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
+        doc.addLargeText("text_vectors", TestUtil.randomAnalysisString(random(), 6, true));
         
         if (random().nextInt(10) > 0) {
           // single doc
@@ -149,6 +168,13 @@
             conf.setMergeScheduler(new SerialMergeScheduler());
             conf.setCodec(codec);
             iw = new IndexWriter(dir, conf);            
+            fieldTypes = iw.getFieldTypes();
+            fieldTypes.enableTermVectors("text_vectors");
+            fieldTypes.disableSorting("dv2");
+            fieldTypes.setMultiValued("dv4");
+            fieldTypes.setMultiValued("dv5");
+            fieldTypes.setMultiValued("stored1");
+
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
@@ -160,12 +186,12 @@
           }
         } else {
           // block docs
-          Document doc2 = new Document();
-          doc2.add(newStringField("id", Integer.toString(-i), Field.Store.NO));
-          doc2.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
-          doc2.add(new StoredField("stored1", "foo"));
-          doc2.add(new StoredField("stored1", "bar"));
-          doc2.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
+          Document doc2 = iw.newDocument();
+          doc2.addAtom("id", Integer.toString(-i));
+          doc2.addLargeText("text1", TestUtil.randomAnalysisString(random(), 20, true));
+          doc2.addStoredString("stored1", "foo");
+          doc2.addStoredString("stored1", "bar");
+          doc2.addLargeText("text_vectors", TestUtil.randomAnalysisString(random(), 6, true));
           
           try {
             iw.addDocuments(Arrays.asList(doc, doc2));
@@ -183,6 +209,12 @@
             conf.setMergeScheduler(new SerialMergeScheduler());
             conf.setCodec(codec);
             iw = new IndexWriter(dir, conf);            
+            fieldTypes = iw.getFieldTypes();
+            fieldTypes.enableTermVectors("text_vectors");
+            fieldTypes.disableSorting("dv2");
+            fieldTypes.setMultiValued("dv4");
+            fieldTypes.setMultiValued("dv5");
+            fieldTypes.setMultiValued("stored1");
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
@@ -221,6 +253,12 @@
             conf.setMergeScheduler(new SerialMergeScheduler());
             conf.setCodec(codec);
             iw = new IndexWriter(dir, conf);            
+            fieldTypes = iw.getFieldTypes();
+            fieldTypes.enableTermVectors("text_vectors");
+            fieldTypes.disableSorting("dv2");
+            fieldTypes.setMultiValued("dv4");
+            fieldTypes.setMultiValued("dv5");
+            fieldTypes.setMultiValued("stored1");
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
index fcbc461..7672e15 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
@@ -21,7 +21,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -33,8 +32,6 @@
 
     Directory dir = newDirectory();
 
-    final Document doc = new Document();
-    doc.add(newStringField("content", "aaa", Field.Store.NO));
     final int incrMin = TEST_NIGHTLY ? 15 : 40;
     for(int numDocs=10;numDocs<500;numDocs += TestUtil.nextInt(random(), incrMin, 5 * incrMin)) {
       LogDocMergePolicy ldmp = new LogDocMergePolicy();
@@ -44,8 +41,11 @@
                                                   .setOpenMode(OpenMode.CREATE)
                                                   .setMaxBufferedDocs(2)
                                                   .setMergePolicy(ldmp));
-      for(int j=0;j<numDocs;j++)
+      final Document doc = writer.newDocument();
+      doc.addAtom("content", "aaa");
+      for(int j=0;j<numDocs;j++) {
         writer.addDocument(doc);
+      }
       writer.close();
 
       SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
@@ -72,9 +72,6 @@
   public void testMaxNumSegments2() throws IOException {
     Directory dir = newDirectory();
 
-    final Document doc = new Document();
-    doc.add(newStringField("content", "aaa", Field.Store.NO));
-
     LogDocMergePolicy ldmp = new LogDocMergePolicy();
     ldmp.setMinMergeDocs(1);
     ldmp.setMergeFactor(4);
@@ -83,9 +80,13 @@
                                                 .setMergePolicy(ldmp)
                                                 .setMergeScheduler(new ConcurrentMergeScheduler()));
     
+    final Document doc = writer.newDocument();
+    doc.addAtom("content", "aaa");
+
     for(int iter=0;iter<10;iter++) {
-      for(int i=0;i<19;i++)
+      for(int i=0;i<19;i++) {
         writer.addDocument(doc);
+      }
 
       writer.commit();
       writer.waitForMerges();
@@ -200,10 +201,11 @@
               .setMaxBufferedDocs(2)
               .setMergePolicy(newLogMergePolicy(51))
       );
-      Document doc = new Document();
-      doc.add(newStringField("field", "aaa", Field.Store.NO));
-      for(int i=0;i<100;i++)
+      Document doc = writer.newDocument();
+      doc.addAtom("field", "aaa");
+      for(int i=0;i<100;i++) {
         writer.addDocument(doc);
+      }
       writer.forceMerge(1, false);
 
       if (0 == pass) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java
index 2fdbfea..58fa981 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMaxDocs.java
@@ -21,7 +21,7 @@
 import java.util.Collections;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
@@ -45,9 +45,9 @@
   public void testExactlyAtTrueLimit() throws Exception {
     Directory dir = newFSDirectory(createTempDir("2BDocs3"));
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(newStringField("field", "text", Field.Store.NO));
     for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
+      Document doc = iw.newDocument();
+      doc.addAtom("field", "text");
       iw.addDocument(doc);
       /*
       if (i%1000000 == 0) {
@@ -86,12 +86,12 @@
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
       for(int i=0;i<10;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -109,12 +109,12 @@
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
       for(int i=0;i<10;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.addDocuments(Collections.singletonList(new Document()));
+        w.addDocuments(Collections.singletonList(w.newDocument()));
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -132,12 +132,12 @@
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
       for(int i=0;i<10;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.updateDocument(new Term("field", "foo"), new Document());
+        w.updateDocument(new Term("field", "foo"), w.newDocument());
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -155,12 +155,12 @@
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
       for(int i=0;i<10;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.updateDocuments(new Term("field", "foo"), Collections.singletonList(new Document()));
+        w.updateDocuments(new Term("field", "foo"), Collections.singletonList(w.newDocument()));
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -177,15 +177,16 @@
     try {
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
+      FieldTypes fieldTypes = w.getFieldTypes();
       for(int i=0;i<10;i++) {
-        Document doc = new Document();
-        doc.add(newStringField("id", ""+i, Field.Store.NO));
+        Document doc = w.newDocument();
+        doc.addUniqueInt("id", i);
         w.addDocument(doc);
       }
 
       // Delete 5 of them:
       for(int i=0;i<5;i++) {
-        w.deleteDocuments(new Term("id", ""+i));
+        w.deleteDocuments(fieldTypes.newIntTerm("id", i));
       }
 
       w.forceMerge(1);
@@ -194,12 +195,12 @@
 
       // Add 5 more docs
       for(int i=0;i<5;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -219,9 +220,11 @@
       IndexWriterConfig iwc = new IndexWriterConfig(null);
       iwc.setMergePolicy(NoMergePolicy.INSTANCE);
       IndexWriter w = new IndexWriter(dir, iwc);
+      FieldTypes fieldTypes = w.getFieldTypes();
+
       for(int i=0;i<10;i++) {
-        Document doc = new Document();
-        doc.add(newStringField("id", ""+i, Field.Store.NO));
+        Document doc = w.newDocument();
+        doc.addUniqueInt("id", i);
         w.addDocument(doc);
         if (i % 2 == 0) {
           // Make a new segment every 2 docs:
@@ -231,7 +234,7 @@
 
       // Delete 5 of them:
       for(int i=0;i<5;i++) {
-        w.deleteDocuments(new Term("id", ""+i));
+        w.deleteDocuments(fieldTypes.newIntTerm("id", i));
       }
 
       w.forceMerge(1);
@@ -240,12 +243,12 @@
 
       // Add 5 more docs
       for(int i=0;i<5;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
 
       // 11th document should fail:
       try {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
         fail("didn't hit exception");
       } catch (IllegalStateException ise) {
         // expected
@@ -263,13 +266,13 @@
       Directory dir = newDirectory();
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
       for(int i=0;i<10;i++) {
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
       }
       w.close();
 
       Directory dir2 = newDirectory();
       IndexWriter w2 = new IndexWriter(dir2, new IndexWriterConfig(null));
-      w2.addDocument(new Document());
+      w2.addDocument(w2.newDocument());
       try {
         w2.addIndexes(new Directory[] {dir});
         fail("didn't hit exception");
@@ -296,8 +299,8 @@
   // Make sure MultiReader lets you search exactly the limit number of docs:
   public void testMultiReaderExactLimit() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
     IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
+    Document doc = w.newDocument();
     for (int i = 0; i < 100000; i++) {
       w.addDocument(doc);
     }
@@ -306,6 +309,7 @@
     int remainder = IndexWriter.MAX_DOCS % 100000;
     Directory dir2 = newDirectory();
     w = new IndexWriter(dir2, new IndexWriterConfig(null));
+    doc = w.newDocument();
     for (int i = 0; i < remainder; i++) {
       w.addDocument(doc);
     }
@@ -331,8 +335,8 @@
   // Make sure MultiReader is upset if you exceed the limit
   public void testMultiReaderBeyondLimit() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
     IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
+    Document doc = w.newDocument();
     for (int i = 0; i < 100000; i++) {
       w.addDocument(doc);
     }
@@ -345,6 +349,7 @@
 
     Directory dir2 = newDirectory();
     w = new IndexWriter(dir2, new IndexWriterConfig(null));
+    doc = w.newDocument();
     for (int i = 0; i < remainder; i++) {
       w.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
index f10de0e..93ebc07 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
@@ -21,10 +21,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
-
 import org.apache.lucene.util.LuceneTestCase;
 
 public class TestIndexWriterMergePolicy extends LuceneTestCase {
@@ -228,8 +226,8 @@
   }
 
   private void addDoc(IndexWriter writer) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("content", "aaa", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
index 295c1ef..63dcc5c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
@@ -21,9 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
@@ -46,15 +44,13 @@
 
     fillIndex(random(), indexA, 0, num);
     boolean fail = verifyIndex(indexA, 0);
-    if (fail)
-    {
+    if (fail) {
       fail("Index a is invalid");
     }
 
     fillIndex(random(), indexB, num, num);
     fail = verifyIndex(indexB, num);
-    if (fail)
-    {
+    if (fail) {
       fail("Index b is invalid");
     }
 
@@ -85,13 +81,12 @@
     int max = reader.maxDoc();
     for (int i = 0; i < max; i++)
     {
-      StoredDocument temp = reader.document(i);
+      Document temp = reader.document(i);
       //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
       //compare the index doc number to the value that it should be
-      if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
-      {
+      if (temp.getInt("count") != i + startAt) {
         fail = true;
-        System.out.println("Document " + (i + startAt) + " is returning document " + temp.getField("count").stringValue());
+        System.out.println("Document " + (i + startAt) + " is returning document " + temp.getInt("count"));
       }
     }
     reader.close();
@@ -110,9 +105,8 @@
 
     for (int i = start; i < (start + numDocs); i++)
     {
-      Document temp = new Document();
-      temp.add(newStringField("count", (""+i), Field.Store.YES));
-
+      Document temp = writer.newDocument();
+      temp.addUniqueInt("count", i);
       writer.addDocument(temp);
     }
     writer.close();
@@ -125,26 +119,18 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMaxBufferedDocs(2)
                                                 .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
-    Document document = new Document();
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termVector");
+    fieldTypes.enableTermVectorOffsets("termVector");
+    fieldTypes.enableTermVectorPositions("termVector");
 
-    FieldType customType1 = new FieldType(TextField.TYPE_STORED);
-    customType1.setTokenized(false);
-    customType1.setStoreTermVectors(true);
-    customType1.setStoreTermVectorPositions(true);
-    customType1.setStoreTermVectorOffsets(true);
-    
-    Field idField = newStringField("id", "", Field.Store.NO);
-    document.add(idField);
-    Field storedField = newField("stored", "stored", customType);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector", customType1);
-    document.add(termVectorField);
     for(int i=0;i<10;i++) {
-      idField.setStringValue("" + i);
-      writer.addDocument(document);
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
+      doc.addStoredString("stored", "stored");
+      doc.addAtom("termVector", "termVector");
+      writer.addDocument(doc);
     }
     writer.close();
 
@@ -156,8 +142,8 @@
     IndexWriterConfig dontMergeConfig = new IndexWriterConfig(new MockAnalyzer(random()))
       .setMergePolicy(NoMergePolicy.INSTANCE);
     writer = new IndexWriter(dir, dontMergeConfig);
-    writer.deleteDocuments(new Term("id", "0"));
-    writer.deleteDocuments(new Term("id", "7"));
+    writer.deleteDocuments(fieldTypes.newIntTerm("id", 0));
+    writer.deleteDocuments(fieldTypes.newIntTerm("id", 7));
     writer.close();
     
     ir = DirectoryReader.open(dir);
@@ -189,26 +175,17 @@
           .setMergePolicy(newLogMergePolicy(50))
     );
 
-    Document document = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termVector");
+    fieldTypes.enableTermVectorOffsets("termVector");
+    fieldTypes.enableTermVectorPositions("termVector");
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-
-    FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType1.setTokenized(false);
-    customType1.setStoreTermVectors(true);
-    customType1.setStoreTermVectorPositions(true);
-    customType1.setStoreTermVectorOffsets(true);
-    
-    Field storedField = newField("stored", "stored", customType);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector", customType1);
-    document.add(termVectorField);
-    Field idField = newStringField("id", "", Field.Store.NO);
-    document.add(idField);
     for(int i=0;i<98;i++) {
-      idField.setStringValue("" + i);
-      writer.addDocument(document);
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
+      doc.addStoredString("stored", "stored");
+      doc.addAtom("termVector", "termVector");
+      writer.addDocument(doc);
     }
     writer.close();
 
@@ -221,7 +198,7 @@
       .setMergePolicy(NoMergePolicy.INSTANCE);
     writer = new IndexWriter(dir, dontMergeConfig);
     for(int i=0;i<98;i+=2) {
-      writer.deleteDocuments(new Term("id", "" + i));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", i));
     }
     writer.close();
     
@@ -256,25 +233,17 @@
             .setMergePolicy(newLogMergePolicy(50))
     );
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termVector");
+    fieldTypes.enableTermVectorOffsets("termVector");
+    fieldTypes.enableTermVectorPositions("termVector");
 
-    FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType1.setTokenized(false);
-    customType1.setStoreTermVectors(true);
-    customType1.setStoreTermVectorPositions(true);
-    customType1.setStoreTermVectorOffsets(true);
-    
-    Document document = new Document();
-    Field storedField = newField("stored", "stored", customType);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector", customType1);
-    document.add(termVectorField);
-    Field idField = newStringField("id", "", Field.Store.NO);
-    document.add(idField);
     for(int i=0;i<98;i++) {
-      idField.setStringValue("" + i);
-      writer.addDocument(document);
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
+      doc.addStoredString("stored", "stored");
+      doc.addAtom("termVector", "termVector");
+      writer.addDocument(doc);
     }
     writer.close();
 
@@ -287,7 +256,7 @@
       .setMergePolicy(NoMergePolicy.INSTANCE);
     writer = new IndexWriter(dir, dontMergeConfig);
     for(int i=0;i<98;i+=2) {
-      writer.deleteDocuments(new Term("id", "" + i));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", i));
     }
     writer.close();
     ir = DirectoryReader.open(dir);
@@ -341,14 +310,13 @@
     lmp.setMaxMergeDocs(20);
     lmp.setMergeFactor(2);
     IndexWriter iw = new IndexWriter(dir, conf);
-    Document document = new Document();
-
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    
-    document.add(newField("tvtest", "a b c", customType));
-    for(int i=0;i<177;i++)
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("tvtest");
+    Document document = iw.newDocument();
+    document.addLargeText("tvtest", "a b c");
+    for(int i=0;i<177;i++) {
       iw.addDocument(document);
+    }
     iw.close();
     dir.close();
   }
@@ -361,13 +329,6 @@
       ((MockDirectoryWrapper) directory).setPreventDoubleWrite(false);
     }
 
-    final Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setTokenized(false);
-
-    Field idField = newField("id", "", customType);
-    doc.add(idField);
-
     for(int pass=0;pass<2;pass++) {
       if (VERBOSE) {
         System.out.println("TEST: pass=" + pass);
@@ -383,6 +344,8 @@
       }
 
       IndexWriter writer = new IndexWriter(directory, conf);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);          
 
       for(int iter=0;iter<10;iter++) {
@@ -390,13 +353,14 @@
           System.out.println("TEST: iter=" + iter);
         }
         for(int j=0;j<199;j++) {
-          idField.setStringValue(Integer.toString(iter*201+j));
+          Document doc = writer.newDocument();
+          doc.addUniqueInt("id", iter*201+j);
           writer.addDocument(doc);
         }
 
         int delID = iter*199;
         for(int j=0;j<20;j++) {
-          writer.deleteDocuments(new Term("id", Integer.toString(delID)));
+          writer.deleteDocuments(fieldTypes.newIntTerm("id", delID));
           delID += 5;
         }
 
@@ -415,7 +379,7 @@
               while(!done) {
                 for(int i=0;i<100;i++) {
                   try {
-                    finalWriter.addDocument(doc);
+                    finalWriter.addDocument(finalWriter.newDocument());
                   } catch (AlreadyClosedException e) {
                     done = true;
                     break;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterNRTIsCurrent.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterNRTIsCurrent.java
index 207b170..79a257e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterNRTIsCurrent.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterNRTIsCurrent.java
@@ -22,8 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -90,8 +88,8 @@
       DirectoryReader currentReader = null;
       Random random = LuceneTestCase.random();
       try {
-        Document doc = new Document();
-        doc.add(new TextField("id", "1", Field.Store.NO));
+        Document doc = writer.newDocument();
+        doc.addLargeText("id", "1");
         writer.addDocument(doc);
         holder.reader = currentReader = writer.getReader(true);
         Term term = new Term("id");
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
index 4a9fa9c..b89f085 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -22,10 +22,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.LiveDocsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -501,9 +497,8 @@
     // we can do this because we add/delete/add (and dont merge to "nothing")
     w.setKeepFullyDeletedSegments(true);
 
-    Document doc = new Document();
-
-    doc.add(newTextField("f", "doctor who", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("f", "doctor who");
     w.addDocument(doc);
     w.commit();
 
@@ -541,9 +536,8 @@
                                                 .setCommitOnClose(false));
     writer.commit(); // empty commit, to not create confusing situation with first commit
     dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
-    final Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
+    final Document doc = writer.newDocument();
+    doc.addLargeText("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj");
     try {
       writer.addDocument(doc);
       fail("did not hit disk full");
@@ -556,20 +550,18 @@
   
   // TODO: these are also in TestIndexWriter... add a simple doc-writing method
   // like this to LuceneTestCase?
-  private void addDoc(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
-      doc.add(new NumericDocValuesField("numericdv", 1));
-      writer.addDocument(doc);
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    doc.addInt("numericdv", 1);
+    writer.addDocument(doc);
   }
   
-  private void addDocWithIndex(IndexWriter writer, int index) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa " + index, Field.Store.NO));
-      doc.add(newTextField("id", "" + index, Field.Store.NO));
-      doc.add(new NumericDocValuesField("numericdv", 1));
-      writer.addDocument(doc);
+  private void addDocWithIndex(IndexWriter writer, int index) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa " + index);
+    doc.addLargeText("id", "" + index);
+    doc.addInt("numericdv", 1);
+    writer.addDocument(doc);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java
index 4431530..ad212ec 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java
@@ -38,7 +38,7 @@
     //System.out.println("rate=" + rate);
     dir.setRandomIOExceptionRateOnOpen(rate);
     int iters = atLeast(20);
-    LineFileDocs docs = new LineFileDocs(random());
+    LineFileDocs docs = null;
     DirectoryReader r = null;
     DirectoryReader r2 = null;
     boolean any = false;
@@ -65,6 +65,7 @@
           ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
         }
         w = new IndexWriter(dir, iwc);
+        docs = new LineFileDocs(w, random());
         if (r != null && random().nextInt(5) == 3) {
           if (random().nextBoolean()) {
             if (VERBOSE) {
@@ -87,6 +88,9 @@
         w.close();
         w = null;
 
+        docs.close();
+        docs = null;
+
         // NOTE: This is O(N^2)!  Only enable for temporary debugging:
         //dir.setRandomIOExceptionRateOnOpen(0.0);
         //_TestUtil.checkIndex(dir);
@@ -125,6 +129,9 @@
           // anything:
           w.rollback();
         }
+        if (docs != null) {
+          docs.close();
+        }
       }
 
       if (any && r == null && random().nextBoolean()) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfMemory.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfMemory.java
index 9e211f3..f597791 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfMemory.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfMemory.java
@@ -27,25 +27,16 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.MockVariableLengthPayloadFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.MockDirectoryWrapper.Failure;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.Nightly;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.Rethrow;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 
 /** 
@@ -101,30 +92,39 @@
         int numDocs = atLeast(2000);
       
         IndexWriter iw = new IndexWriter(dir, conf);
+
+        FieldTypes fieldTypes = iw.getFieldTypes();
+        fieldTypes.setMultiValued("dv4");
+        fieldTypes.setDocValuesType("dv2", DocValuesType.BINARY);
+        fieldTypes.setDocValuesType("dv3", DocValuesType.SORTED);
+        fieldTypes.enableSorting("dv4");
+        fieldTypes.setMultiValued("dv4");
+        fieldTypes.setMultiValued("dv5");
+        fieldTypes.setMultiValued("stored1");
+        fieldTypes.enableTermVectors("text_vectors");
+
         iw.commit(); // ensure there is always a commit
 
         dir.failOn(failOn);
         
         for (int i = 0; i < numDocs; i++) {
-          Document doc = new Document();
-          doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
-          doc.add(new NumericDocValuesField("dv", i));
-          doc.add(new BinaryDocValuesField("dv2", new BytesRef(Integer.toString(i))));
-          doc.add(new SortedDocValuesField("dv3", new BytesRef(Integer.toString(i))));
-          doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i))));
-          doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i-1))));
-          doc.add(new SortedNumericDocValuesField("dv5", i));
-          doc.add(new SortedNumericDocValuesField("dv5", i-1));
-          doc.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
+          Document doc = iw.newDocument();
+          doc.addAtom("id", Integer.toString(i));
+          doc.addInt("dv", i);
+          doc.addBinary("dv2", new BytesRef(Integer.toString(i))); 
+          doc.addBinary("dv3", new BytesRef(Integer.toString(i))); 
+          doc.addBinary("dv4", new BytesRef(Integer.toString(i)));
+          doc.addBinary("dv4", new BytesRef(Integer.toString(i-1)));
+          doc.addInt("dv5", i);
+          doc.addInt("dv5", i-1);
+          doc.addLargeText("text1", TestUtil.randomAnalysisString(random(), 20, true));
           // ensure we store something
-          doc.add(new StoredField("stored1", "foo"));
-          doc.add(new StoredField("stored1", "bar"));    
-          // ensure we get some payloads
-          doc.add(newTextField("text_payloads", TestUtil.randomAnalysisString(random(), 6, true), Field.Store.NO));
+          doc.addStoredString("stored1", "foo");
+          doc.addStoredString("stored1", "bar");
+          // ensure we get some payloads (analyzer will insert them for this field):
+          doc.addLargeText("text_payloads", TestUtil.randomAnalysisString(random(), 6, true));
           // ensure we get some vectors
-          FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-          ft.setStoreTermVectors(true);
-          doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
+          doc.addLargeText("text_vectors", TestUtil.randomAnalysisString(random(), 6, true));
           
           if (random().nextInt(10) > 0) {
             // single doc
@@ -145,12 +145,12 @@
             }
           } else {
             // block docs
-            Document doc2 = new Document();
-            doc2.add(newStringField("id", Integer.toString(-i), Field.Store.NO));
-            doc2.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
-            doc2.add(new StoredField("stored1", "foo"));
-            doc2.add(new StoredField("stored1", "bar"));
-            doc2.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
+            Document doc2 = iw.newDocument();
+            doc2.addAtom("id", Integer.toString(-i));
+            doc2.addLargeText("text1", TestUtil.randomAnalysisString(random(), 20, true));
+            doc2.addStoredString("stored1", "foo");
+            doc2.addStoredString("stored1", "bar");
+            doc2.addLargeText("text_vectors", TestUtil.randomAnalysisString(random(), 6, true));
             
             try {
               iw.addDocuments(Arrays.asList(doc, doc2));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 42e75dc..d9160bc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -27,7 +27,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -35,8 +34,8 @@
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
 import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.InfoStream;
@@ -78,7 +77,7 @@
     for (int i = 0; i < 97 ; i++) {
       DirectoryReader reader = writer.getReader();
       if (i == 0) {
-        writer.addDocument(DocHelper.createDocument(i, "x", 1 + random().nextInt(5)));
+        writer.addDocument(DocHelper.createDocument(writer, i, "x", 1 + random().nextInt(5)));
       } else {
         int previous = random().nextInt(i);
         // a check if the reader is current here could fail since there might be
@@ -87,10 +86,10 @@
         case 0:
         case 1:
         case 2:
-          writer.addDocument(DocHelper.createDocument(i, "x", 1 + random().nextInt(5)));
+          writer.addDocument(DocHelper.createDocument(writer, i, "x", 1 + random().nextInt(5)));
           break;
         case 3:
-          writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(
+          writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(writer,
               previous, "x", 1 + random().nextInt(5)));
           break;
         case 4:
@@ -109,7 +108,7 @@
     iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir1, iwc);
     assertTrue(reader.isCurrent());
-    writer.addDocument(DocHelper.createDocument(1, "x", 1+random().nextInt(5)));
+    writer.addDocument(DocHelper.createDocument(writer, 1, "x", 1+random().nextInt(5)));
     assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
     writer.close();
     assertFalse(reader.isCurrent()); // segments written
@@ -135,17 +134,17 @@
     // create the index
     createIndexNoClose(!doFullMerge, "index1", writer);
 
-    // writer.flush(false, true, true);
-
     // get a reader
     DirectoryReader r1 = writer.getReader();
     assertTrue(r1.isCurrent());
 
     String id10 = r1.document(10).getField("id").stringValue();
     
-    Document newDoc = new Document(r1.document(10));
-    newDoc.removeField("id");
-    newDoc.add(newStringField("id", Integer.toString(8000), Field.Store.YES));
+    Document newDoc = writer.newDocument();
+    newDoc.addAll(r1.document(10));
+
+    newDoc = writer.newDocument();
+    newDoc.addAtom("id", Integer.toString(8000));
     writer.updateDocument(new Term("id", id10), newDoc);
     assertFalse(r1.isCurrent());
 
@@ -169,8 +168,8 @@
     assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
 
     writer = new IndexWriter(dir1, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "a b c");
     writer.addDocument(doc);
     assertTrue(r2.isCurrent());
     assertTrue(r3.isCurrent());
@@ -191,15 +190,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     
     IndexWriter writer = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "a b c");
     writer.addDocument(doc);
     writer.close();
     
     iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     writer = new IndexWriter(dir, iwc);
-    doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("field", "a b c");
     DirectoryReader nrtReader = writer.getReader();
     assertTrue(nrtReader.isCurrent());
     writer.addDocument(doc);
@@ -268,10 +267,10 @@
     assertEquals(100, index2df);
 
     // verify the docs are from different indexes
-    StoredDocument doc5 = r1.document(5);
-    assertEquals("index1", doc5.get("indexname"));
-    StoredDocument doc150 = r1.document(150);
-    assertEquals("index2", doc150.get("indexname"));
+    Document doc5 = r1.document(5);
+    assertEquals("index1", doc5.getString("indexname"));
+    Document doc150 = r1.document(150);
+    assertEquals("index2", doc150.getString("indexname"));
     r1.close();
     writer.close();
     dir1.close();
@@ -412,15 +411,15 @@
                                                      .setMaxBufferedDocs(2));
       TestUtil.reduceOpenFiles(writer);
       for (int i = 0; i < NUM_INIT_DOCS; i++) {
-        Document doc = DocHelper.createDocument(i, "addindex", 4);
-        writer.addDocument(doc);
+        writer.addDocument(DocHelper.createDocument(writer, i, "addindex", 4));
       }
         
       writer.close();
       
       readers = new DirectoryReader[numDirs];
-      for (int i = 0; i < numDirs; i++)
+      for (int i = 0; i < numDirs; i++) {
         readers[i] = DirectoryReader.open(addDir);
+      }
     }
     
     void joinThreads() {
@@ -462,8 +461,9 @@
           public void run() {
             try {
               final Directory[] dirs = new Directory[numDirs];
-              for (int k = 0; k < numDirs; k++)
+              for (int k = 0; k < numDirs; k++) {
                 dirs[k] = new MockDirectoryWrapper(random(), new RAMDirectory(addDir, newIOContext(random())));
+              }
               //int j = 0;
               //while (true) {
                 // System.out.println(Thread.currentThread().getName() + ": iter
@@ -483,8 +483,9 @@
           }
         };
       }
-      for (int i = 0; i < numThreads; i++)
+      for (int i = 0; i < numThreads; i++) {
         threads[i].start();
+      }
     }
     
     void doBody(int j, Directory[] dirs) throws Throwable {
@@ -534,8 +535,7 @@
     assertEquals(r2.maxDoc(), 100);
     // add 100 documents
     for (int x = 10000; x < 10000 + 100; x++) {
-      Document d = DocHelper.createDocument(x, "index1", 5);
-      writer.addDocument(d);
+      writer.addDocument(DocHelper.createDocument(writer, x, "index1", 5));
     }
     writer.flush(false, true);
     // verify the reader was reopened internally
@@ -581,7 +581,7 @@
     IndexWriter w = new IndexWriter(dir1, LuceneTestCase.newIndexWriterConfig(random, new MockAnalyzer(random))
         .setMergePolicy(new LogDocMergePolicy()));
     for (int i = 0; i < 100; i++) {
-      w.addDocument(DocHelper.createDocument(i, indexName, 4));
+      w.addDocument(DocHelper.createDocument(w, i, indexName, 4));
     }
     if (!multiSegment) {
       w.forceMerge(1);
@@ -592,7 +592,7 @@
   public static void createIndexNoClose(boolean multiSegment, String indexName,
       IndexWriter w) throws IOException {
     for (int i = 0; i < 100; i++) {
-      w.addDocument(DocHelper.createDocument(i, indexName, 4));
+      w.addDocument(DocHelper.createDocument(w, i, indexName, 4));
     }
     if (!multiSegment) {
       w.forceMerge(1);
@@ -631,14 +631,14 @@
 
     int num = TEST_NIGHTLY ? atLeast(100) : atLeast(10);
     for (int i = 0; i < num; i++) {
-      writer.addDocument(DocHelper.createDocument(i, "test", 4));
+      writer.addDocument(DocHelper.createDocument(writer, i, "test", 4));
     }
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
 
     assertTrue(warmer.warmCount > 0);
     final int count = warmer.warmCount;
 
-    writer.addDocument(DocHelper.createDocument(17, "test", 4));
+    writer.addDocument(DocHelper.createDocument(writer, 17, "test", 4));
     writer.forceMerge(1);
     assertTrue(warmer.warmCount > count);
     
@@ -664,7 +664,7 @@
     assertEquals(100, r1.numDocs());
 
     for (int i = 0; i < 10; i++) {
-      writer.addDocument(DocHelper.createDocument(i, "test", 4));
+      writer.addDocument(DocHelper.createDocument(writer, i, "test", 4));
     }
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
 
@@ -834,7 +834,7 @@
             do {
               try {
                 for(int docUpto=0;docUpto<10;docUpto++) {
-                  writer.addDocument(DocHelper.createDocument(10*count+docUpto, "test", 4));
+                  writer.addDocument(DocHelper.createDocument(writer, 10*count+docUpto, "test", 4));
                 }
                 count++;
                 final int limit = count*10;
@@ -891,13 +891,14 @@
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                  .setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
-    Field id = newStringField("id", "", Field.Store.NO);
-    doc.add(id);
-    id.setStringValue("0");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
+    doc.addAtom("id", "0");
     w.addDocument(doc);
-    id.setStringValue("1");
+
+    doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
+    doc.addAtom("id", "1");
     w.addDocument(doc);
     w.deleteDocuments(new Term("id", "0"));
 
@@ -915,14 +916,16 @@
   public void testDeletesNumDocs() throws Throwable {
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c", Field.Store.NO));
-    Field id = newStringField("id", "", Field.Store.NO);
-    doc.add(id);
-    id.setStringValue("0");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
+    doc.addAtom("id", "0");
     w.addDocument(doc);
-    id.setStringValue("1");
+
+    doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
+    doc.addAtom("id", "1");
     w.addDocument(doc);
+
     IndexReader r = w.getReader();
     assertEquals(2, r.numDocs());
     r.close();
@@ -972,8 +975,8 @@
             setMergePolicy(newLogMergePolicy(10))
     );
 
-    Document doc = new Document();
-    doc.add(newStringField("foo", "bar", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("foo", "bar");
     for(int i=0;i<20;i++) {
       w.addDocument(doc);
     }
@@ -1012,8 +1015,8 @@
            .setMergePolicy(newLogMergePolicy(10))
     );
 
-    Document doc = new Document();
-    doc.add(newStringField("foo", "bar", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("foo", "bar");
     for(int i=0;i<20;i++) {
       w.addDocument(doc);
     }
@@ -1034,7 +1037,7 @@
     DirectoryReader r2 = DirectoryReader.openIfChanged(r);
     assertNull(r2);
     
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     DirectoryReader r3 = DirectoryReader.openIfChanged(r);
     assertNotNull(r3);
     assertTrue(r3.getVersion() != r.getVersion());
@@ -1089,11 +1092,11 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     // create a segment and open an NRT reader
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.getReader().close();
     
     // add a new document so a new NRT reader is required
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
 
     // try to obtain an NRT reader twice: first time it fails and closes all the
     // other NRT readers. second time it fails, but also fails to close the
@@ -1124,8 +1127,8 @@
     IndexWriter w = new IndexWriter(dir, iwc);
     // Create 500 segments:
     for(int i=0;i<500;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
       w.addDocument(doc);
       IndexReader r = DirectoryReader.open(w, true);
       // Make sure segment count never exceeds 100:
@@ -1141,12 +1144,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter w = new IndexWriter(dir, iwc);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
 
     // Pull NRT reader; it has 1 segment:
     DirectoryReader r1 = DirectoryReader.open(w, true);
     assertEquals(1, r1.leaves().size());
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.commit();
 
     List<IndexCommit> commits = DirectoryReader.listCommits(dir);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
index 2a522bc..b0374bc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
@@ -29,8 +29,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -59,8 +57,8 @@
           public void run() {
             try {
               startingGun.await();
-              Document doc = new Document();
-              doc.add(newTextField("field", "here is some text", Field.Store.NO));
+              Document doc = w.newDocument();
+              doc.addLargeText("field", "here is some text");
               w.addDocument(doc);
               startDone.countDown();
 
@@ -209,8 +207,8 @@
                   }
 
                   // We get to index on this cycle:
-                  Document doc = new Document();
-                  doc.add(new TextField("field", "here is some text that is a bit longer than normal trivial text", Field.Store.NO));
+                  Document doc = w.newDocument();
+                  doc.addLargeText("field", "here is some text that is a bit longer than normal trivial text");
                   for(int j=0;j<200;j++) {
                     w.addDocument(doc);
                   }
@@ -252,8 +250,8 @@
           public void run() {
             try {
               startingGun.await();
-              Document doc = new Document();
-              doc.add(new TextField("field", "here is some text that is a bit longer than normal trivial text", Field.Store.NO));
+              Document doc = w.newDocument();
+              doc.addLargeText("field", "here is some text that is a bit longer than normal trivial text");
               for(int j=0;j<1000;j++) {
                 w.addDocument(doc);
               }
@@ -300,8 +298,8 @@
             try {
               startingGun.await();
               for(int j=0;j<1000;j++) {
-                Document doc = new Document();
-                doc.add(newStringField("field", "threadID" + threadID, Field.Store.NO));
+                Document doc = w.newDocument();
+                doc.addAtom("field", "threadID" + threadID);
                 w.addDocument(doc);
               }
             } catch (Exception e) {
@@ -326,8 +324,8 @@
     long counter = 0;
     long checkAt = 100;
     while (thread0Count < 1000 || thread1Count < 1000) {
-      Document doc = new Document();
-      doc.add(newStringField("field", "threadIDmain", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("field", "threadIDmain");
       w.addDocument(doc);
       if (counter++ == checkAt) {
         for(String fileName : dir.listAll()) {
@@ -339,7 +337,8 @@
               SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
               si.setCodec(codec);
               SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, -1, -1, -1);
-              SegmentReader sr = new SegmentReader(sci, IOContext.DEFAULT);
+              SegmentReader sr = new SegmentReader(w.getFieldTypes(),
+                                                   sci, IOContext.DEFAULT);
               try {
                 thread0Count += sr.docFreq(new Term("field", "threadID0"));
                 thread1Count += sr.docFreq(new Term("field", "threadID1"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
index 504a0df..0db1b25 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
@@ -27,7 +27,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -234,11 +233,11 @@
   public void testEmbeddedFFFF() throws Throwable {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a a\uffffb", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a a\uffffb");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("field", "a", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "a");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
@@ -251,16 +250,17 @@
   public void testInvalidUTF16() throws Throwable {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new TestIndexWriter.StringSplitAnalyzer()));
-    Document doc = new Document();
+    Document doc = w.newDocument();
 
     final int count = utf8Data.length/2;
-    for(int i=0;i<count;i++)
-      doc.add(newTextField("f" + i, utf8Data[2*i], Field.Store.YES));
+    for(int i=0;i<count;i++) {
+      doc.addLargeText("f" + i, utf8Data[2*i]);
+    }
     w.addDocument(doc);
     w.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    StoredDocument doc2 = ir.document(0);
+    Document doc2 = ir.document(0);
     for(int i=0;i<count;i++) {
       assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
       assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
@@ -275,10 +275,6 @@
     Random rnd = random();
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
-    Document d = new Document();
-    // Single segment
-    Field f = newStringField("f", "", Field.Store.NO);
-    d.add(f);
     char[] chars = new char[2];
     final Set<String> allTerms = new HashSet<>();
 
@@ -304,8 +300,10 @@
         s = new String(chars, 0, 2);
       }
       allTerms.add(s);
-      f.setStringValue(s);
 
+      // Single segment
+      Document d = writer.newDocument();
+      d.addAtom("f", s);
       writer.addDocument(d);
 
       if ((1+i) % 42 == 0) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index 8a6c6ce..766fb55 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -26,10 +26,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -39,10 +36,10 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.ThreadInterruptedException;
-import org.apache.lucene.util.LuceneTestCase.Slow;
 
 /**
  * MultiThreaded IndexWriter tests
@@ -63,26 +60,25 @@
     public IndexerThread(IndexWriter writer, boolean noErrors) {
       this.writer = writer;
       this.noErrors = noErrors;
+
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("field");
+      fieldTypes.enableTermVectorOffsets("field");
+      fieldTypes.enableTermVectorPositions("field");
     }
 
     @Override
     public void run() {
 
-      final Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      
-      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
-      doc.add(new NumericDocValuesField("dv", 5));
-
       int idUpto = 0;
       int fullCount = 0;
       final long stopTime = System.currentTimeMillis() + 200;
 
       do {
         try {
+          Document doc = writer.newDocument();
+          doc.addLargeText("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj");
+          doc.addInt("dv", 5);
           writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
           addCount++;
         } catch (IOException ioe) {
@@ -354,15 +350,16 @@
     }
 
     IndexWriter writer = new IndexWriter(dir, iwc);
-    final Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    final Document doc = writer.newDocument();
+    doc.addLargeText("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj");
 
-    for(int i=0;i<6;i++)
+    for(int i=0;i<6;i++) {
       writer.addDocument(doc);
+    }
 
     dir.failOn(failure);
     failure.setDoFail();
@@ -561,10 +558,9 @@
     @Override
     public void run() {
       try {
-        Document doc = new Document();
-        Field field = newTextField("field", "testData", Field.Store.YES);
-        doc.add(field);
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+        Document doc = writer.newDocument();
+        doc.addLargeText("field", "testData");
         iwConstructed.countDown();
         startIndexing.await();
         writer.addDocument(doc);
@@ -594,7 +590,7 @@
     writerRef.set(new IndexWriter(d, newIndexWriterConfig(analyzer)));
     // Make initial commit so the test doesn't trip "corrupt first commit" when virus checker refuses to delete partial segments_N file:
     writerRef.get().commit();
-    final LineFileDocs docs = new LineFileDocs(random());
+    final LineFileDocs docs = new LineFileDocs(writerRef.get(), random());
     final Thread[] threads = new Thread[threadCount];
     final int iters = atLeast(100);
     final AtomicBoolean failed = new AtomicBoolean();
@@ -605,7 +601,6 @@
           @Override
           public void run() {
             for(int iter=0;iter<iters && !failed.get();iter++) {
-              //final int x = random().nextInt(5);
               final int x = random().nextInt(3);
               try {
                 switch(x) {
@@ -620,6 +615,7 @@
                       System.out.println("TEST: " + Thread.currentThread().getName() + ": rollback done; now open new writer");
                     }
                     writerRef.set(new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random()))));
+                    docs.setIndexWriter(writerRef.get());
                   } finally {
                     rollbackLock.unlock();
                   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index 85c95dc..f828277 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -24,10 +24,10 @@
 import java.util.Iterator;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -41,7 +41,8 @@
 
 public class TestIndexableField extends LuceneTestCase {
 
-  private class MyField implements IndexableField, StorableField {
+  private class MyField implements IndexableField {
+    private final Analyzer a = new MockAnalyzer(random());
 
     private final int counter;
     private final IndexableFieldType fieldType = new IndexableFieldType() {
@@ -51,11 +52,6 @@
       }
 
       @Override
-      public boolean tokenized() {
-        return true;
-      }
-
-      @Override
       public boolean storeTermVectors() {
         return indexOptions() != IndexOptions.NONE && counter % 2 == 1 && counter % 10 != 9;
       }
@@ -76,18 +72,12 @@
       }
 
       @Override
-      public boolean omitNorms() {
-        return false;
-      }
-
-      @Override
       public IndexOptions indexOptions() {
-        return counter%10 == 3 ? IndexOptions.NONE : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
-      }
-
-      @Override
-      public DocValuesType docValuesType() {
-        return DocValuesType.NONE;
+        if ((counter % 10) != 3) {
+          return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        } else {
+          return IndexOptions.NONE;
+        }
       }
     };
 
@@ -119,6 +109,11 @@
     }
 
     @Override
+    public BytesRef binaryDocValue() {
+      return binaryValue();
+    }
+
+    @Override
     public String stringValue() {
       final int fieldID = counter%10;
       if (fieldID != 3 && fieldID != 7) {
@@ -128,8 +123,7 @@
       }
     }
 
-    @Override
-    public Reader readerValue() {
+    private Reader readerValue() {
       if (counter%10 == 7) {
         return new StringReader("text " + counter);
       } else {
@@ -138,19 +132,14 @@
     }
 
     @Override
-    public Number numericValue() {
-      return null;
-    }
-
-    @Override
     public IndexableFieldType fieldType() {
       return fieldType;
     }
 
     @Override
-    public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
-      return readerValue() != null ? analyzer.tokenStream(name(), readerValue()) :
-        analyzer.tokenStream(name(), new StringReader(stringValue()));
+    public TokenStream tokenStream(TokenStream previous) throws IOException {
+      return readerValue() != null ? a.tokenStream(name(), readerValue()) :
+        a.tokenStream(name(), new StringReader(stringValue()));
     }
   }
 
@@ -159,7 +148,9 @@
   public void testArbitraryFields() throws Exception {
 
     final Directory dir = newDirectory();
-    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    Analyzer a = new MockAnalyzer(random());
+
+    final RandomIndexWriter w = newRandomIndexWriter(dir, a);
 
     final int NUM_DOCS = atLeast(27);
     if (VERBOSE) {
@@ -180,10 +171,7 @@
       final int finalBaseCount = baseCount;
       baseCount += fieldCount-1;
 
-      IndexDocument d = new IndexDocument() {
-        @Override
-        public Iterable<IndexableField> indexableFields() {
-          return new Iterable<IndexableField>() {
+      Iterable<IndexableField> d = new Iterable<IndexableField>() {
             @Override
             public Iterator<IndexableField> iterator() {
               return new Iterator<IndexableField>() {
@@ -197,13 +185,12 @@
                   next = null;
                   if (fieldUpto == 0) {
                     fieldUpto = 1;
-                    next = newStringField("id", ""+finalDocCount, Field.Store.YES);
+                    next = new LowSchemaField(a, "id", ""+finalDocCount, IndexOptions.DOCS, false);
                   } else {
                     next = new MyField(finalBaseCount + (fieldUpto++-1));
                   }
                   
-                  if (next != null && next.fieldType().indexOptions() != IndexOptions.NONE) return true;
-                  else return this.hasNext();
+                  return true;
                 }
 
                 @Override
@@ -224,54 +211,6 @@
               };
             }
           };
-        }
-
-        @Override
-        public Iterable<StorableField> storableFields() {
-          return new Iterable<StorableField>() {
-            @Override
-            public Iterator<StorableField> iterator() {
-              return new Iterator<StorableField>() {
-                int fieldUpto = 0;
-                private StorableField next = null;
-
-                @Override
-                public boolean hasNext() {
-
-                  if (fieldUpto == fieldCount) return false;
-                  
-                  next = null;
-                  if (fieldUpto == 0) {
-                    fieldUpto = 1;
-                    next = newStringField("id", ""+finalDocCount, Field.Store.YES);
-                  } else {
-                    next = new MyField(finalBaseCount + (fieldUpto++-1));
-                  }
-                  
-                  if (next != null && next.fieldType().stored()) return true;
-                  else return this.hasNext();
-                }
-
-                @Override
-                public StorableField next() {
-                  assert fieldUpto <= fieldCount;
-                  if (next == null && !hasNext()) {
-                    return null;
-                  }
-                  else {
-                    return next;
-                  }
-                }
-
-                @Override
-                public void remove() {
-                  throw new UnsupportedOperationException();
-                }
-              };
-            }
-          };
-        }
-      };
       
       w.addDocument(d);
     }
@@ -289,7 +228,7 @@
       final TopDocs hits = s.search(new TermQuery(new Term("id", ""+id)), 1);
       assertEquals(1, hits.totalHits);
       final int docID = hits.scoreDocs[0].doc;
-      final StoredDocument doc = s.doc(docID);
+      final Document doc = s.doc(docID);
       final int endCounter = counter + fieldsPerDoc[id];
       while(counter < endCounter) {
         final String name = "f" + counter;
@@ -308,7 +247,7 @@
 
         // stored:
         if (stored) {
-          StorableField f = doc.getField(name);
+          IndexableField f = doc.getField(name);
           assertNotNull("doc " + id + " doesn't have field f" + counter, f);
           if (binary) {
             assertNotNull("doc " + id + " doesn't have field f" + counter, f);
@@ -376,11 +315,7 @@
     dir.close();
   }
 
-  private static class CustomField implements StorableField {
-    @Override
-    public BytesRef binaryValue() {
-      return null;
-    }
+  private static class CustomField implements IndexableField {
 
     @Override
     public String stringValue() {
@@ -388,26 +323,18 @@
     }
 
     @Override
-    public Reader readerValue() {
-      return null;
-    }
-
-    @Override
-    public Number numericValue() {
-      return null;
-    }
-
-    @Override
     public String name() {
       return "field";
     }
 
     @Override
     public IndexableFieldType fieldType() {
-      FieldType ft = new FieldType(StoredField.TYPE);
-      ft.setStoreTermVectors(true);
-      ft.freeze();
-      return ft;
+      return new IndexableFieldType() {
+        @Override
+        public boolean storeTermVectors() {
+          return true;
+        }
+      };
     }
   }
 
@@ -416,17 +343,7 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     try {
-      w.addDocument(
-                    new IndexDocument() {
-                      @Override
-                      public Iterable<IndexableField> indexableFields() {
-                        return Collections.emptyList();
-                      }
-                      @Override
-                      public Iterable<StorableField> storableFields() {
-                        return Collections.<StorableField>singletonList(new CustomField());
-                      }
-                    });
+      w.addDocument(Collections.<IndexableField>singletonList(new CustomField()));
       fail("didn't hit exception");
     } catch (IllegalArgumentException iae) {
       // expected
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestInfoStream.java b/lucene/core/src/test/org/apache/lucene/index/TestInfoStream.java
index cd3cbc0..d6a3091 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestInfoStream.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestInfoStream.java
@@ -48,7 +48,7 @@
       }
     });
     IndexWriter iw = new IndexWriter(dir, iwc);
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.close();
     dir.close();
   }
@@ -76,7 +76,7 @@
     });
     IndexWriter iw = new IndexWriter(dir, iwc);
     iw.enableTestPoints = true;
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.close();
     dir.close();
     assertTrue(seenTestPoint.get());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIsCurrent.java b/lucene/core/src/test/org/apache/lucene/index/TestIsCurrent.java
index d942196..0fa720e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIsCurrent.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIsCurrent.java
@@ -17,15 +17,13 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.*;
-import org.apache.lucene.store.*;
-
-import org.junit.Test;
-
 import java.io.IOException;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.*;
+import org.junit.Test;
+
 public class TestIsCurrent extends LuceneTestCase {
 
   private RandomIndexWriter writer;
@@ -41,8 +39,8 @@
     writer = new RandomIndexWriter(random(), directory);
 
     // write document
-    Document doc = new Document();
-    doc.add(newTextField("UUID", "1", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("UUID", "1");
     writer.addDocument(doc);
     writer.commit();
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
index 9e4ae9c..67f37bf 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -21,7 +21,6 @@
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.ScoreDoc;
@@ -30,8 +29,8 @@
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
 /**
@@ -39,198 +38,198 @@
  *
  */
 public class TestLazyProxSkipping extends LuceneTestCase {
-    private IndexSearcher searcher;
-    private int seeksCounter = 0;
+  private IndexSearcher searcher;
+  private int seeksCounter = 0;
     
-    private String field = "tokens";
-    private String term1 = "xx";
-    private String term2 = "yy";
-    private String term3 = "zz";
+  private String field = "tokens";
+  private String term1 = "xx";
+  private String term2 = "yy";
+  private String term3 = "zz";
 
-    private class SeekCountingDirectory extends MockDirectoryWrapper {
-      public SeekCountingDirectory(Directory delegate) {
-        super(random(), delegate);
-      }
+  private class SeekCountingDirectory extends MockDirectoryWrapper {
+    public SeekCountingDirectory(Directory delegate) {
+      super(random(), delegate);
+    }
 
-      @Override
-      public IndexInput openInput(String name, IOContext context) throws IOException {
-        IndexInput ii = super.openInput(name, context);
-        if (name.endsWith(".prx") || name.endsWith(".pos") ) {
-          // we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
-          ii = new SeeksCountingStream(ii);
-        }
-        return ii;
+    @Override
+    public IndexInput openInput(String name, IOContext context) throws IOException {
+      IndexInput ii = super.openInput(name, context);
+      if (name.endsWith(".prx") || name.endsWith(".pos") ) {
+        // we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
+        ii = new SeeksCountingStream(ii);
       }
+      return ii;
+    }
       
-    }
+  }
     
-    private void createIndex(int numHits) throws IOException {
-        int numDocs = 500;
+  private void createIndex(int numHits) throws IOException {
+    int numDocs = 500;
         
-        final Analyzer analyzer = new Analyzer() {
-          @Override
-          public TokenStreamComponents createComponents(String fieldName) {
-            return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true));
-          }
-        };
-        Directory directory = new SeekCountingDirectory(new RAMDirectory());
-        // note: test explicitly disables payloads
-        IndexWriter writer = new IndexWriter(
-            directory,
-            newIndexWriterConfig(analyzer)
-              .setMaxBufferedDocs(10)
-              .setMergePolicy(newLogMergePolicy(false))
-        );
-        
-        for (int i = 0; i < numDocs; i++) {
-            Document doc = new Document();
-            String content;
-            if (i % (numDocs / numHits) == 0) {
-                // add a document that matches the query "term1 term2"
-                content = this.term1 + " " + this.term2;
-            } else if (i % 15 == 0) {
-                // add a document that only contains term1
-                content = this.term1 + " " + this.term1;
-            } else {
-                // add a document that contains term2 but not term 1
-                content = this.term3 + " " + this.term2;
-            }
-
-            doc.add(newTextField(this.field, content, Field.Store.YES));
-            writer.addDocument(doc);
+    final Analyzer analyzer = new Analyzer() {
+        @Override
+        public TokenStreamComponents createComponents(String fieldName) {
+          return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true));
         }
+      };
+    Directory directory = new SeekCountingDirectory(new RAMDirectory());
+    // note: test explicitly disables payloads
+    IndexWriter writer = new IndexWriter(
+                                         directory,
+                                         newIndexWriterConfig(analyzer)
+                                         .setMaxBufferedDocs(10)
+                                         .setMergePolicy(newLogMergePolicy(false))
+                                         );
         
-        // make sure the index has only a single segment
-        writer.forceMerge(1);
-        writer.close();
+    for (int i = 0; i < numDocs; i++) {
+      String content;
+      if (i % (numDocs / numHits) == 0) {
+        // add a document that matches the query "term1 term2"
+        content = this.term1 + " " + this.term2;
+      } else if (i % 15 == 0) {
+        // add a document that only contains term1
+        content = this.term1 + " " + this.term1;
+      } else {
+        // add a document that contains term2 but not term 1
+        content = this.term3 + " " + this.term2;
+      }
 
-      SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(directory));
-
-      this.searcher = newSearcher(reader);
+      Document doc = writer.newDocument();
+      doc.addLargeText(this.field, content);
+      writer.addDocument(doc);
     }
-    
-    private ScoreDoc[] search() throws IOException {
-        // create PhraseQuery "term1 term2" and search
-        PhraseQuery pq = new PhraseQuery();
-        pq.add(new Term(this.field, this.term1));
-        pq.add(new Term(this.field, this.term2));
-        return this.searcher.search(pq, null, 1000).scoreDocs;        
-    }
-    
-    private void performTest(int numHits) throws IOException {
-        createIndex(numHits);
-        this.seeksCounter = 0;
-        ScoreDoc[] hits = search();
-        // verify that the right number of docs was found
-        assertEquals(numHits, hits.length);
         
-        // check if the number of calls of seek() does not exceed the number of hits
-        assertTrue(this.seeksCounter > 0);
-        assertTrue("seeksCounter=" + this.seeksCounter + " numHits=" + numHits, this.seeksCounter <= numHits + 1);
-        searcher.getIndexReader().close();
-    }
+    // make sure the index has only a single segment
+    writer.forceMerge(1);
+    writer.close();
+
+    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(directory));
+
+    this.searcher = newSearcher(reader);
+  }
+    
+  private ScoreDoc[] search() throws IOException {
+    // create PhraseQuery "term1 term2" and search
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(new Term(this.field, this.term1));
+    pq.add(new Term(this.field, this.term2));
+    return this.searcher.search(pq, null, 1000).scoreDocs;        
+  }
+    
+  private void performTest(int numHits) throws IOException {
+    createIndex(numHits);
+    this.seeksCounter = 0;
+    ScoreDoc[] hits = search();
+    // verify that the right number of docs was found
+    assertEquals(numHits, hits.length);
+        
+    // check if the number of calls of seek() does not exceed the number of hits
+    assertTrue(this.seeksCounter > 0);
+    assertTrue("seeksCounter=" + this.seeksCounter + " numHits=" + numHits, this.seeksCounter <= numHits + 1);
+    searcher.getIndexReader().close();
+  }
  
-    public void testLazySkipping() throws IOException {
-      final String fieldFormat = TestUtil.getPostingsFormat(this.field);
-      assumeFalse("This test cannot run with Memory postings format", fieldFormat.equals("Memory"));
-      assumeFalse("This test cannot run with Direct postings format", fieldFormat.equals("Direct"));
-      assumeFalse("This test cannot run with SimpleText postings format", fieldFormat.equals("SimpleText"));
+  public void testLazySkipping() throws IOException {
+    final String fieldFormat = TestUtil.getPostingsFormat(this.field);
+    assumeFalse("This test cannot run with Memory postings format", fieldFormat.equals("Memory"));
+    assumeFalse("This test cannot run with Direct postings format", fieldFormat.equals("Direct"));
+    assumeFalse("This test cannot run with SimpleText postings format", fieldFormat.equals("SimpleText"));
 
-        // test whether only the minimum amount of seeks()
-        // are performed
-        performTest(5);
-        performTest(10);
-    }
+    // test whether only the minimum amount of seeks()
+    // are performed
+    performTest(5);
+    performTest(10);
+  }
     
-    public void testSeek() throws IOException {
-        Directory directory = newDirectory();
-        IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
-        for (int i = 0; i < 10; i++) {
-            Document doc = new Document();
-            doc.add(newTextField(this.field, "a b", Field.Store.YES));
-            writer.addDocument(doc);
-        }
-        
-        writer.close();
-        IndexReader reader = DirectoryReader.open(directory);
-
-        DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
-                                                                   MultiFields.getLiveDocs(reader),
-                                                                   this.field,
-                                                                   new BytesRef("b"));
-
-        for (int i = 0; i < 10; i++) {
-            tp.nextDoc();
-            assertEquals(tp.docID(), i);
-            assertEquals(tp.nextPosition(), 1);
-        }
-
-        tp = MultiFields.getTermPositionsEnum(reader,
-                                              MultiFields.getLiveDocs(reader),
-                                              this.field,
-                                              new BytesRef("a"));
-
-        for (int i = 0; i < 10; i++) {
-            tp.nextDoc();
-            assertEquals(tp.docID(), i);
-            assertEquals(tp.nextPosition(), 0);
-        }
-        reader.close();
-        directory.close();
-        
+  public void testSeek() throws IOException {
+    Directory directory = newDirectory();
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
+    for (int i = 0; i < 10; i++) {
+      Document doc = writer.newDocument();
+      doc.addLargeText(this.field, "a b");
+      writer.addDocument(doc);
     }
+        
+    writer.close();
+    IndexReader reader = DirectoryReader.open(directory);
+
+    DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+                                                               MultiFields.getLiveDocs(reader),
+                                                               this.field,
+                                                               new BytesRef("b"));
+
+    for (int i = 0; i < 10; i++) {
+      tp.nextDoc();
+      assertEquals(tp.docID(), i);
+      assertEquals(tp.nextPosition(), 1);
+    }
+
+    tp = MultiFields.getTermPositionsEnum(reader,
+                                          MultiFields.getLiveDocs(reader),
+                                          this.field,
+                                          new BytesRef("a"));
+
+    for (int i = 0; i < 10; i++) {
+      tp.nextDoc();
+      assertEquals(tp.docID(), i);
+      assertEquals(tp.nextPosition(), 0);
+    }
+    reader.close();
+    directory.close();
+        
+  }
     
 
-    // Simply extends IndexInput in a way that we are able to count the number
-    // of invocations of seek()
-    class SeeksCountingStream extends IndexInput {
-          private IndexInput input;      
+  // Simply extends IndexInput in a way that we are able to count the number
+  // of invocations of seek()
+  class SeeksCountingStream extends IndexInput {
+    private IndexInput input;      
           
           
-          SeeksCountingStream(IndexInput input) {
-              super("SeekCountingStream(" + input + ")");
-              this.input = input;
-          }      
+    SeeksCountingStream(IndexInput input) {
+      super("SeekCountingStream(" + input + ")");
+      this.input = input;
+    }      
                 
-          @Override
-          public byte readByte() throws IOException {
-              return this.input.readByte();
-          }
-    
-          @Override
-          public void readBytes(byte[] b, int offset, int len) throws IOException {
-              this.input.readBytes(b, offset, len);        
-          }
-    
-          @Override
-          public void close() throws IOException {
-              this.input.close();
-          }
-    
-          @Override
-          public long getFilePointer() {
-              return this.input.getFilePointer();
-          }
-    
-          @Override
-          public void seek(long pos) throws IOException {
-              TestLazyProxSkipping.this.seeksCounter++;
-              this.input.seek(pos);
-          }
-    
-          @Override
-          public long length() {
-              return this.input.length();
-          }
-          
-          @Override
-          public SeeksCountingStream clone() {
-              return new SeeksCountingStream(this.input.clone());
-          }
-
-          @Override
-          public IndexInput slice(String sliceDescription, long offset, long length) throws IOException {
-            return new SeeksCountingStream(this.input.slice(sliceDescription, offset, length));
-          }
+    @Override
+    public byte readByte() throws IOException {
+      return this.input.readByte();
     }
+    
+    @Override
+    public void readBytes(byte[] b, int offset, int len) throws IOException {
+      this.input.readBytes(b, offset, len);        
+    }
+    
+    @Override
+    public void close() throws IOException {
+      this.input.close();
+    }
+    
+    @Override
+    public long getFilePointer() {
+      return this.input.getFilePointer();
+    }
+    
+    @Override
+    public void seek(long pos) throws IOException {
+      TestLazyProxSkipping.this.seeksCounter++;
+      this.input.seek(pos);
+    }
+    
+    @Override
+    public long length() {
+      return this.input.length();
+    }
+          
+    @Override
+    public SeeksCountingStream clone() {
+      return new SeeksCountingStream(this.input.clone());
+    }
+
+    @Override
+    public IndexInput slice(String sliceDescription, long offset, long length) throws IOException {
+      return new SeeksCountingStream(this.input.slice(sliceDescription, offset, length));
+    }
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
index 0212f66..3f29ead 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
@@ -24,9 +24,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -114,14 +112,14 @@
     iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
     iwc.setMaxBufferedDocs(-1);
     final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-
+    FieldTypes fieldTypes = riw.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     for(int idx=0;idx<NUM_DOCS;idx++) {
-      final Document doc = new Document();
+      final Document doc = riw.newDocument();
       String s = isS1.get(idx) ? s1 : s2;
-      final Field f = newTextField("field", s, Field.Store.NO);
       final int count = TestUtil.nextInt(random(), 1, 4);
       for(int ct=0;ct<count;ct++) {
-        doc.add(f);
+        doc.addLargeText("field", s);
       }
       riw.addDocument(doc);
     }
@@ -312,16 +310,16 @@
       iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
       iwc.setMaxBufferedDocs(-1);
       final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(options);
+      FieldTypes fieldTypes = riw.getFieldTypes();
+      fieldTypes.disableHighlighting("field");
+      fieldTypes.setIndexOptions("field", options);
+      fieldTypes.setMultiValued("field");
       for(int idx=0;idx<NUM_DOCS;idx++) {
-        final Document doc = new Document();
+        final Document doc = riw.newDocument();
         String s = isS1.get(idx) ? s1 : s2;
-        final Field f = newField("field", s, ft);
         final int count = TestUtil.nextInt(random(), 1, 4);
         for(int ct=0;ct<count;ct++) {
-          doc.add(f);
+          doc.addLargeText("field", s);
         }
         riw.addDocument(doc);
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestManyFields.java b/lucene/core/src/test/org/apache/lucene/index/TestManyFields.java
index 1a57b0e..acb4c48 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestManyFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestManyFields.java
@@ -21,9 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
@@ -31,20 +29,19 @@
 
 /** Test that creates way, way, way too many fields */
 public class TestManyFields extends LuceneTestCase {
-  private static final FieldType storedTextType = new FieldType(TextField.TYPE_NOT_STORED);
   
   public void testManyFields() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                  .setMaxBufferedDocs(10));
     for(int j=0;j<100;j++) {
-      Document doc = new Document();
-      doc.add(newField("a"+j, "aaa" + j, storedTextType));
-      doc.add(newField("b"+j, "aaa" + j, storedTextType));
-      doc.add(newField("c"+j, "aaa" + j, storedTextType));
-      doc.add(newField("d"+j, "aaa", storedTextType));
-      doc.add(newField("e"+j, "aaa", storedTextType));
-      doc.add(newField("f"+j, "aaa", storedTextType));
+      Document doc = writer.newDocument();
+      doc.addAtom("a"+j, "aaa" + j);
+      doc.addAtom("b"+j, "aaa" + j);
+      doc.addAtom("c"+j, "aaa" + j);
+      doc.addAtom("d"+j, "aaa");
+      doc.addAtom("e"+j, "aaa");
+      doc.addAtom("f"+j, "aaa");
       writer.addDocument(doc);
     }
     writer.close();
@@ -68,14 +65,17 @@
     Directory dir = newDirectory();
     IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                  .setRAMBufferSizeMB(0.5));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
     int n = atLeast(1);
     for(int i=0;i<n;i++) {
       // First, docs where every term is unique (heavy on
       // Posting instances)
       for(int j=0;j<100;j++) {
-        Document doc = new Document();
+        Document doc = writer.newDocument();
         for(int k=0;k<100;k++) {
-          doc.add(newField("field", Integer.toString(random().nextInt()), storedTextType));
+          doc.addLargeText("field", Integer.toString(random().nextInt()));
         }
         writer.addDocument(doc);
       }
@@ -83,8 +83,8 @@
       // Next, many single term docs where only one term
       // occurs (heavy on byte blocks)
       for(int j=0;j<100;j++) {
-        Document doc = new Document();
-        doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", storedTextType));
+        Document doc = writer.newDocument();
+        doc.addLargeText("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa");
         writer.addDocument(doc);
       }
 
@@ -94,12 +94,13 @@
       for(int j=0;j<100;j++) {
         StringBuilder b = new StringBuilder();
         String x = Integer.toString(j) + ".";
-        for(int k=0;k<1000;k++)
+        for(int k=0;k<1000;k++) {
           b.append(x);
+        }
         String longTerm = b.toString();
 
-        Document doc = new Document();
-        doc.add(newField("field", longTerm, storedTextType));
+        Document doc = writer.newDocument();
+        doc.addLargeText("field", longTerm);
         writer.addDocument(doc);
       }
     }
@@ -121,19 +122,19 @@
     iwc.setRAMBufferSizeMB(0.2);
     iwc.setMaxBufferedDocs(-1);
     IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
     int upto = 0;
 
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setOmitNorms(true);
-
     int firstDocCount = -1;
     for(int iter=0;iter<10;iter++) {
       final int startFlushCount = w.getFlushCount();
       int docCount = 0;
       while(w.getFlushCount() == startFlushCount) {
-        Document doc = new Document();
+        Document doc = w.newDocument();
         for(int i=0;i<10;i++) {
-          doc.add(new Field("field" + (upto++), "content", ft));
+          String fieldName = "field" + (upto++);
+          fieldTypes.disableNorms(fieldName);
+          doc.addLargeText(fieldName, "content");
         }
         w.addDocument(doc);
         docCount++;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
index c2dc509..4ead759 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.similarities.TFIDFSimilarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -49,11 +48,9 @@
                                  .setMergePolicy(newLogMergePolicy());
     config.setSimilarity(new TestSimilarity());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    Document doc = new Document();
-    Field foo = newTextField("foo", "", Field.Store.NO);
-    doc.add(foo);
     for (int i = 0; i < 100; i++) {
-      foo.setStringValue(addValue());
+      Document doc = writer.newDocument();
+      doc.addLargeText("foo", addValue());
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMergeRateLimiter.java b/lucene/core/src/test/org/apache/lucene/index/TestMergeRateLimiter.java
index fa2f4b0..bd1e416 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMergeRateLimiter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMergeRateLimiter.java
@@ -27,7 +27,7 @@
   public void testInitDefaults() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.close();
     MergePolicy.OneMerge merge = new MergePolicy.OneMerge(SegmentInfos.readLatestCommit(dir).asList());
     MergeRateLimiter rateLimiter = new MergeRateLimiter(merge);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java
index 6fc5c84..06d724e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -58,8 +58,8 @@
         w = new RandomIndexWriter(random(), dir, iwc);
         docsLeftInThisSegment = TestUtil.nextInt(random(), 10, 100);
       }
-      final Document doc = new Document();
-      doc.add(newStringField("id", String.valueOf(docUpto), Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", docUpto);
       w.addDocument(doc);
       docUpto++;
       docsLeftInThisSegment--;
@@ -69,13 +69,15 @@
       System.out.println("\nTEST: now delete...");
     }
 
+    FieldTypes fieldTypes = w.getFieldTypes();
+
     // Random delete half the docs:
     final Set<Integer> deleted = new HashSet<>();
     while(deleted.size() < NUM_DOCS/2) {
       final Integer toDelete = random().nextInt(NUM_DOCS);
       if (!deleted.contains(toDelete)) {
         deleted.add(toDelete);
-        w.deleteDocuments(new Term("id", String.valueOf(toDelete)));
+        w.deleteDocuments(fieldTypes.newIntTerm("id", toDelete));
         if (random().nextInt(17) == 6) {
           final IndexReader r = w.getReader();
           assertEquals(NUM_DOCS - deleted.size(), r.numDocs());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMixedDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestMixedDocValuesUpdates.java
index 385384f..68e5b03 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMixedDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMixedDocValuesUpdates.java
@@ -8,19 +8,14 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.lucene.util.TestUtil;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 /*
@@ -50,7 +45,8 @@
     lmp.setMergeFactor(3); // merge often
     conf.setMergePolicy(lmp);
     IndexWriter writer = new IndexWriter(dir, conf);
-
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    
     final boolean isNRT = random.nextBoolean();
     DirectoryReader reader;
     if (isNRT) {
@@ -66,6 +62,9 @@
     for (int i = 0; i < fieldValues.length; i++) {
       fieldValues[i] = 1;
     }
+    for(int i=numNDVFields;i<numFields;i++) {
+      fieldTypes.disableSorting("f" + i);
+    }
     
     int numRounds = atLeast(15);
     int docID = 0;
@@ -73,15 +72,15 @@
       int numDocs = atLeast(5);
       // System.out.println("TEST: round=" + i + ", numDocs=" + numDocs);
       for (int j = 0; j < numDocs; j++) {
-        Document doc = new Document();
-        doc.add(new StringField("id", "doc-" + docID, Store.NO));
-        doc.add(new StringField("key", "all", Store.NO)); // update key
+        Document doc = writer.newDocument();
+        doc.addAtom("id", "doc-" + docID);
+        doc.addAtom("key", "all"); // update key
         // add all fields with their current value
         for (int f = 0; f < fieldValues.length; f++) {
           if (f < numNDVFields) {
-            doc.add(new NumericDocValuesField("f" + f, fieldValues[f]));
+            doc.addLong("f" + f, fieldValues[f]);
           } else {
-            doc.add(new BinaryDocValuesField("f" + f, TestBinaryDocValuesUpdates.toBytes(fieldValues[f])));
+            doc.addBinary("f" + f, TestBinaryDocValuesUpdates.toBytes(fieldValues[f]));
           }
         }
         writer.addDocument(doc);
@@ -156,25 +155,31 @@
     final Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     // create index
     final int numFields = TestUtil.nextInt(random(), 2, 4);
     final int numThreads = TestUtil.nextInt(random(), 3, 6);
     final int numDocs = atLeast(2000);
+
+    for(int i=0;i<numFields;i++) {
+      fieldTypes.disableSorting("f" + i);
+    }
+
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "doc" + i);
       double group = random().nextDouble();
       String g;
       if (group < 0.1) g = "g0";
       else if (group < 0.5) g = "g1";
       else if (group < 0.8) g = "g2";
       else g = "g3";
-      doc.add(new StringField("updKey", g, Store.NO));
+      doc.addAtom("updKey", g);
       for (int j = 0; j < numFields; j++) {
         long value = random().nextInt();
-        doc.add(new BinaryDocValuesField("f" + j, TestBinaryDocValuesUpdates.toBytes(value)));
-        doc.add(new NumericDocValuesField("cf" + j, value * 2)); // control, always updated to f * 2
+        doc.addBinary("f" + j, TestBinaryDocValuesUpdates.toBytes(value));
+        doc.addLong("cf" + j, value * 2); // control, always updated to f * 2
       }
       writer.addDocument(doc);
     }
@@ -204,9 +209,12 @@
               final String f = "f" + field;
               final String cf = "cf" + field;
               long updValue = random.nextInt();
+              Document update = writer.newDocument();
+              update.disableExistsField();
+              update.addBinary(f, TestBinaryDocValuesUpdates.toBytes(updValue));
+              update.addLong(cf, updValue*2);
 //              System.err.println("[" + Thread.currentThread().getName() + "] t=" + t + ", f=" + f + ", updValue=" + updValue);
-              writer.updateDocValues(t, new BinaryDocValuesField(f, TestBinaryDocValuesUpdates.toBytes(updValue)),
-                  new NumericDocValuesField(cf, updValue*2));
+              writer.updateDocValues(t, update);
               
               if (random.nextDouble() < 0.2) {
                 // delete a random document
@@ -293,13 +301,16 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(4);
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("f");
+
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "doc" + i);
       long value = random().nextInt();
-      doc.add(new BinaryDocValuesField("f", TestBinaryDocValuesUpdates.toBytes(value)));
-      doc.add(new NumericDocValuesField("cf", value * 2));
+      doc.addBinary("f", TestBinaryDocValuesUpdates.toBytes(value));
+      doc.addLong("cf", value * 2);
       writer.addDocument(doc);
     }
     
@@ -308,8 +319,11 @@
       int doc = random().nextInt(numDocs);
       Term t = new Term("id", "doc" + doc);
       long value = random().nextLong();
-      writer.updateDocValues(t, new BinaryDocValuesField("f", TestBinaryDocValuesUpdates.toBytes(value)),
-          new NumericDocValuesField("cf", value*2));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addBinary("f", TestBinaryDocValuesUpdates.toBytes(value));
+      update.addLong("cf", value*2);
+      writer.updateDocValues(t, update);
       DirectoryReader reader = DirectoryReader.open(writer, true);
       for (LeafReaderContext context : reader.leaves()) {
         LeafReader r = context.reader();
@@ -334,7 +348,15 @@
     conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
     conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); // don't flush by doc
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("upd");
+    fieldTypes.setMultiValued("upd");
+
+    final int numUpdates = atLeast(100);
+    for(int i=0;i<numUpdates;i++) {
+      fieldTypes.disableSorting("f" + i);
+    }
+
     // test data: lots of documents (few 10Ks) and lots of update terms (few hundreds)
     final int numDocs = atLeast(20000);
     final int numBinaryFields = atLeast(5);
@@ -348,32 +370,35 @@
     
     // build a large index with many BDV fields and update terms
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       int numUpdateTerms = TestUtil.nextInt(random, 1, numTerms / 10);
       for (int j = 0; j < numUpdateTerms; j++) {
-        doc.add(new StringField("upd", RandomPicks.randomFrom(random, updateTerms), Store.NO));
+        doc.addAtom("upd", RandomPicks.randomFrom(random, updateTerms));
       }
       for (int j = 0; j < numBinaryFields; j++) {
         long val = random.nextInt();
-        doc.add(new BinaryDocValuesField("f" + j, TestBinaryDocValuesUpdates.toBytes(val)));
-        doc.add(new NumericDocValuesField("cf" + j, val * 2));
+        doc.addBinary("f" + j, TestBinaryDocValuesUpdates.toBytes(val));
+        doc.addLong("cf" + j, val * 2);
       }
       writer.addDocument(doc);
     }
     
     writer.commit(); // commit so there's something to apply to
-    
+
     // set to flush every 2048 bytes (approximately every 12 updates), so we get
     // many flushes during binary updates
     writer.getConfig().setRAMBufferSizeMB(2048.0 / 1024 / 1024);
-    final int numUpdates = atLeast(100);
+
 //    System.out.println("numUpdates=" + numUpdates);
     for (int i = 0; i < numUpdates; i++) {
       int field = random.nextInt(numBinaryFields);
       Term updateTerm = new Term("upd", RandomPicks.randomFrom(random, updateTerms));
       long value = random.nextInt();
-      writer.updateDocValues(updateTerm, new BinaryDocValuesField("f"+field, TestBinaryDocValuesUpdates.toBytes(value)),
-          new NumericDocValuesField("cf"+field, value*2));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addBinary("f"+field, TestBinaryDocValuesUpdates.toBytes(value));
+      update.addLong("cf"+field, value*2);
+      writer.updateDocValues(updateTerm, update);
     }
 
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java
index e335233..b58bab4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java
@@ -19,13 +19,8 @@
 
 import java.util.ArrayList;
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -37,9 +32,6 @@
   
   public void testNumerics() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
-    Field field = new NumericDocValuesField("numbers", 0);
-    doc.add(field);
     
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
@@ -47,7 +39,8 @@
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      field.setLongValue(random().nextLong());
+      Document doc = iw.newDocument();
+      doc.addLong("numbers", random().nextLong());
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
         iw.commit();
@@ -71,18 +64,17 @@
   
   public void testBinary() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
-    Field field = new BinaryDocValuesField("bytes", new BytesRef());
-    doc.add(field);
     
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("bytes");
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      BytesRef ref = new BytesRef(TestUtil.randomUnicodeString(random()));
-      field.setBytesValue(ref);
+      Document doc = iw.newDocument();
+      doc.addBinary("bytes", new BytesRef(TestUtil.randomUnicodeString(random())));
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
         iw.commit();
@@ -108,9 +100,6 @@
   
   public void testSorted() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
-    Field field = new SortedDocValuesField("bytes", new BytesRef());
-    doc.add(field);
     
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
@@ -118,10 +107,10 @@
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      BytesRef ref = new BytesRef(TestUtil.randomUnicodeString(random()));
-      field.setBytesValue(ref);
+      Document doc = iw.newDocument();
+      doc.addAtom("bytes", TestUtil.randomUnicodeString(random()));
       if (random().nextInt(7) == 0) {
-        iw.addDocument(new Document());
+        iw.addDocument(iw.newDocument());
       }
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
@@ -153,9 +142,6 @@
   // tries to make more dups than testSorted
   public void testSortedWithLotsOfDups() throws Exception {
     Directory dir = newDirectory();
-    Document doc = new Document();
-    Field field = new SortedDocValuesField("bytes", new BytesRef());
-    doc.add(field);
     
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
@@ -163,8 +149,8 @@
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      BytesRef ref = new BytesRef(TestUtil.randomSimpleString(random(), 2));
-      field.setBytesValue(ref);
+      Document doc = iw.newDocument();
+      doc.addAtom("bytes", TestUtil.randomSimpleString(random(), 2));
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
         iw.commit();
@@ -198,13 +184,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("bytes");
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       int numValues = random().nextInt(5);
       for (int j = 0; j < numValues; j++) {
-        doc.add(new SortedSetDocValuesField("bytes", new BytesRef(TestUtil.randomUnicodeString(random()))));
+        doc.addAtom("bytes", TestUtil.randomUnicodeString(random()));
       }
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
@@ -260,13 +248,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("bytes");
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       int numValues = random().nextInt(5);
       for (int j = 0; j < numValues; j++) {
-        doc.add(new SortedSetDocValuesField("bytes", new BytesRef(TestUtil.randomSimpleString(random(), 2))));
+        doc.addAtom("bytes", TestUtil.randomSimpleString(random(), 2));
       }
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
@@ -321,13 +311,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(random(), null);
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("nums");
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       int numValues = random().nextInt(5);
       for (int j = 0; j < numValues; j++) {
-        doc.add(new SortedNumericDocValuesField("nums", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
+        doc.addLong("nums", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
       }
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
@@ -375,11 +367,11 @@
 
     int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       if (random().nextInt(4) >= 0) {
-        doc.add(new NumericDocValuesField("numbers", random().nextLong()));
+        doc.addLong("numbers", random().nextLong());
       }
-      doc.add(new NumericDocValuesField("numbersAlways", random().nextLong()));
+      doc.addLong("numbersAlways", random().nextLong());
       iw.addDocument(doc);
       if (random().nextInt(17) == 0) {
         iw.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
index a0b6170..82d963f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
@@ -17,12 +17,13 @@
  * limitations under the License.
  */
 
+import java.util.*;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.document.*;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.*;
 import org.apache.lucene.util.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import java.util.*;
 
 public class TestMultiFields extends LuceneTestCase {
 
@@ -38,6 +39,8 @@
 
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                              .setMergePolicy(NoMergePolicy.INSTANCE));
+      FieldTypes fieldTypes = w.getFieldTypes();
+
       // we can do this because we use NoMergePolicy (and dont merge to "nothing")
       w.setKeepFullyDeletedSegments(true);
 
@@ -46,11 +49,6 @@
       List<BytesRef> terms = new ArrayList<>();
 
       int numDocs = TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER);
-      Document doc = new Document();
-      Field f = newStringField("field", "", Field.Store.NO);
-      doc.add(f);
-      Field id = newStringField("id", "", Field.Store.NO);
-      doc.add(id);
 
       boolean onlyUniqueTerms = random().nextBoolean();
       if (VERBOSE) {
@@ -59,11 +57,12 @@
       Set<BytesRef> uniqueTerms = new HashSet<>();
       for(int i=0;i<numDocs;i++) {
 
+        Document doc = w.newDocument();
         if (!onlyUniqueTerms && random().nextBoolean() && terms.size() > 0) {
           // re-use existing term
           BytesRef term = terms.get(random().nextInt(terms.size()));
           docs.get(term).add(i);
-          f.setStringValue(term.utf8ToString());
+          doc.addAtom("field", term.utf8ToString());
         } else {
           String s = TestUtil.randomUnicodeString(random(), 10);
           BytesRef term = new BytesRef(s);
@@ -73,9 +72,9 @@
           docs.get(term).add(i);
           terms.add(term);
           uniqueTerms.add(term);
-          f.setStringValue(s);
+          doc.addAtom("field", s);
         }
-        id.setStringValue(""+i);
+        doc.addUniqueInt("id", i);
         w.addDocument(doc);
         if (random().nextInt(4) == 1) {
           w.commit();
@@ -83,7 +82,7 @@
         if (i > 0 && random().nextInt(20) == 1) {
           int delID = random().nextInt(i);
           deleted.add(delID);
-          w.deleteDocuments(new Term("id", ""+delID));
+          w.deleteDocuments(fieldTypes.newIntTerm("id", delID));
           if (VERBOSE) {
             System.out.println("TEST: delete " + delID);
           }
@@ -124,14 +123,18 @@
         }
         
         DocsEnum docsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE);
-        assertNotNull(docsEnum);
-
-        for(int docID : docs.get(term)) {
-          if (!deleted.contains(docID)) {
-            assertEquals(docID, docsEnum.nextDoc());
+        if (docsEnum == null) {
+          for(int docID : docs.get(term)) {
+            assertTrue(deleted.contains(docID));
           }
+        } else {
+          for(int docID : docs.get(term)) {
+            if (!deleted.contains(docID)) {
+              assertEquals(docID, docsEnum.nextDoc());
+            }
+          }
+          assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
         }
-        assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
       }
 
       reader.close();
@@ -157,8 +160,8 @@
   public void testSeparateEnums() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newStringField("f", "j", Field.Store.NO));
+    Document d = w.newDocument();
+    d.addAtom("f", "j");
     w.addDocument(d);
     w.commit();
     w.addDocument(d);
@@ -175,8 +178,8 @@
   public void testTermDocsEnum() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d = new Document();
-    d.add(newStringField("f", "j", Field.Store.NO));
+    Document d = w.newDocument();
+    d.addAtom("f", "j");
     w.addDocument(d);
     w.commit();
     w.addDocument(d);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index d1a9cd8..011e216 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
@@ -72,8 +71,8 @@
                                                 .setMergePolicy(newLogMergePolicy()));
     Term term = new Term("test", "a");
     for (int i = 0; i < 5000; i++) {
-      Document d1 = new Document();
-      d1.add(newTextField(term.field(), term.text(), Field.Store.NO));
+      Document d1 = writer.newDocument();
+      d1.addLargeText(term.field(), term.text());
       writer.addDocument(d1);
     }
     writer.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
index 41da4de..94e5bc8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
@@ -21,7 +21,6 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
@@ -94,8 +93,7 @@
           //int n = random.nextInt(2);
           if (type == 0) {
             int i = seq.addAndGet(1);
-            Document doc = DocHelper.createDocument(i, "index1", 10);
-            writer.addDocument(doc);
+            writer.addDocument(DocHelper.createDocument(writer, i, "index1", 10));
             addCount++;
           } else if (type == 1) {
             // we may or may not delete because the term may not exist,
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java
index 0a0438a..f167b71 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java
@@ -23,7 +23,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.IOUtils;
@@ -61,9 +60,9 @@
             try {
               int docCount = 0;
               while (System.currentTimeMillis() < stopTime) {
-                final Document doc = new Document();
-                doc.add(newStringField("dc", ""+docCount, Field.Store.YES));
-                doc.add(newTextField("field", "here is some text", Field.Store.YES));
+                Document doc = w.newDocument();
+                doc.addInt("dc", docCount);
+                doc.addLargeText("field", "here is some text");
                 w.addDocument(doc);
 
                 if (docCount % 13 == 0) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java
index 7377b46..42bd2d3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java
@@ -24,7 +24,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
@@ -72,8 +71,8 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
     for (int i = 0; i < 10; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("c", "a" + i, Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("c", "a" + i);
       writer.addDocument(doc);
       writer.commit();
       assertEquals("wrong number of commits !", i + 1, DirectoryReader.listCommits(dir).size());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
index 3ffc109..74edf1a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
@@ -22,8 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -80,14 +78,11 @@
     IndexWriterConfig config = newIndexWriterConfig(analyzer);
     config.setSimilarity(new CustomNormEncodingSimilarity());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    Document doc = new Document();
-    Field foo = newTextField("foo", "", Field.Store.NO);
-    Field bar = newTextField("bar", "", Field.Store.NO);
-    doc.add(foo);
-    doc.add(bar);
     
     for (int i = 0; i < 100; i++) {
-      bar.setStringValue("singleton");
+      Document doc = writer.newDocument();
+      doc.addLargeText("foo", "");
+      doc.addLargeText("bar", "singleton");
       writer.addDocument(doc);
     }
     
@@ -115,8 +110,8 @@
     NumericDocValues normValues = open.getNormValues(byteTestField);
     assertNotNull(normValues);
     for (int i = 0; i < open.maxDoc(); i++) {
-      StoredDocument document = open.document(i);
-      int expected = Integer.parseInt(document.get(byteTestField));
+      Document document = open.document(i);
+      int expected = Integer.parseInt(document.getString(byteTestField));
       assertEquals(expected, normValues.get(i));
     }
     open.close();
@@ -133,16 +128,13 @@
     Similarity provider = new MySimProvider();
     config.setSimilarity(provider);
     RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
-    final LineFileDocs docs = new LineFileDocs(random, true);
+    final LineFileDocs docs = new LineFileDocs(writer.w, random);
     int num = atLeast(100);
     for (int i = 0; i < num; i++) {
       Document doc = docs.nextDoc();
       int boost = random().nextInt(255);
-      Field f = new TextField(byteTestField, "" + boost, Field.Store.YES);
-      f.setBoost(boost);
-      doc.add(f);
+      doc.addLargeText(byteTestField, "" + boost, boost);
       writer.addDocument(doc);
-      doc.removeField(byteTestField);
       if (rarely()) {
         writer.commit();
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
index 643adcd..4d6a99f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
@@ -12,13 +12,8 @@
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.asserting.AssertingDocValuesFormat;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Sort;
@@ -32,10 +27,8 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 /*
@@ -58,11 +51,11 @@
 @SuppressWarnings("resource")
 public class TestNumericDocValuesUpdates extends LuceneTestCase {
 
-  private Document doc(int id) {
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc-" + id, Store.NO));
+  private Document doc(IndexWriter w, int id) {
+    Document doc = w.newDocument();
+    doc.addAtom("id", "doc-" + id);
     // make sure we don't set the doc's value to 0, to not confuse with a document that's missing values
-    doc.add(new NumericDocValuesField("val", id + 1));
+    doc.addInt("val", id + 1);
     return doc;
   }
   
@@ -71,20 +64,20 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
                                                 .setRAMBufferSizeMB(0.00000001));
-    writer.addDocument(doc(0)); // val=1
-    writer.addDocument(doc(1)); // val=2
-    writer.addDocument(doc(3)); // val=2
+    writer.addDocument(doc(writer, 0)); // val=1
+    writer.addDocument(doc(writer, 1)); // val=2
+    writer.addDocument(doc(writer, 3)); // val=2
     writer.commit();
-    assertEquals(1, writer.getFlushDeletesCount());
-    writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 5L);
     assertEquals(2, writer.getFlushDeletesCount());
-    writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 6L);
+    writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 5L);
     assertEquals(3, writer.getFlushDeletesCount());
-    writer.updateNumericDocValue(new Term("id", "doc-2"), "val", 7L); 
+    writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 6L);
     assertEquals(4, writer.getFlushDeletesCount());
+    writer.updateNumericDocValue(new Term("id", "doc-2"), "val", 7L); 
+    assertEquals(5, writer.getFlushDeletesCount());
     writer.getConfig().setRAMBufferSizeMB(1000d);
     writer.updateNumericDocValue(new Term("id", "doc-2"), "val", 7L);
-    assertEquals(4, writer.getFlushDeletesCount());
+    assertEquals(5, writer.getFlushDeletesCount());
     writer.close();
     dir.close();
   }
@@ -97,8 +90,8 @@
     conf.setMaxBufferedDocs(10);
     conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(doc(0)); // val=1
-    writer.addDocument(doc(1)); // val=2
+    writer.addDocument(doc(writer, 0)); // val=1
+    writer.addDocument(doc(writer, 1)); // val=2
     if (random().nextBoolean()) { // randomly commit before the update is sent
       writer.commit();
     }
@@ -133,7 +126,7 @@
     int numDocs = 10;
     long[] expectedValues = new long[numDocs];
     for (int i = 0; i < numDocs; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
       expectedValues[i] = i + 1;
     }
     writer.commit();
@@ -176,8 +169,8 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(doc(0));
-    writer.addDocument(doc(1));
+    writer.addDocument(doc(writer, 0));
+    writer.addDocument(doc(writer, 1));
     
     final boolean isNRT = random().nextBoolean();
     final DirectoryReader reader1;
@@ -217,7 +210,7 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     for (int i = 0; i < 6; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
       if (i % 2 == 1) {
         writer.commit(); // create 2-docs segments
       }
@@ -265,8 +258,8 @@
     conf.setMaxBufferedDocs(10); // control segment flushing
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    writer.addDocument(doc(0));
-    writer.addDocument(doc(1));
+    writer.addDocument(doc(writer, 0));
+    writer.addDocument(doc(writer, 1));
     
     if (random().nextBoolean()) {
       writer.commit();
@@ -298,15 +291,18 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     conf.setMaxBufferedDocs(10); // prevent merges
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("ssdv");
+    fieldTypes.disableSorting("bdv");
+
     for (int i = 0; i < 4; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
-      doc.add(new NumericDocValuesField("ndv", i));
-      doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedDocValuesField("sdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i))));
-      doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i * 2))));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
+      doc.addInt("ndv", i);
+      doc.addBinary("bdv", new BytesRef(Integer.toString(i)));
+      doc.addShortText("sdv", Integer.toString(i));
+      doc.addShortText("ssdv", Integer.toString(i));
+      doc.addShortText("ssdv", Integer.toString(i * 2));
       writer.addDocument(doc);
     }
     writer.commit();
@@ -351,10 +347,10 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     for (int i = 0; i < 2; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
-      doc.add(new NumericDocValuesField("ndv1", i));
-      doc.add(new NumericDocValuesField("ndv2", i));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
+      doc.addInt("ndv1", i);
+      doc.addInt("ndv2", i);
       writer.addDocument(doc);
     }
     writer.commit();
@@ -383,10 +379,10 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     for (int i = 0; i < 2; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("dvUpdateKey", "dv");
       if (i == 0) { // index only one document with value
-        doc.add(new NumericDocValuesField("ndv", 5));
+        doc.addInt("ndv", 5);
       }
       writer.addDocument(doc);
     }
@@ -415,9 +411,9 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new StringField("foo", "bar", Store.NO));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addAtom("foo", "bar");
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -454,10 +450,10 @@
     });
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 5));
-    doc.add(new SortedDocValuesField("sorted", new BytesRef("value")));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addInt("ndv", 5);
+    doc.addShortText("sorted", "value");
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -486,9 +482,9 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("key", "doc", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 5));
+    Document doc = writer.newDocument();
+    doc.addAtom("key", "doc");
+    doc.addInt("ndv", 5);
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -513,17 +509,17 @@
     Random random = random();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     int docid = 0;
     int numRounds = atLeast(10);
     for (int rnd = 0; rnd < numRounds; rnd++) {
-      Document doc = new Document();
-      doc.add(new StringField("key", "doc", Store.NO));
-      doc.add(new NumericDocValuesField("ndv", -1));
       int numDocs = atLeast(30);
       for (int i = 0; i < numDocs; i++) {
-        doc.removeField("id");
-        doc.add(new StringField("id", Integer.toString(docid++), Store.NO));
+        Document doc = writer.newDocument();
+        doc.addAtom("key", "doc");
+        doc.addLong("ndv", -1);
+        doc.addUniqueInt("id", docid++);
         writer.addDocument(doc);
       }
       
@@ -531,7 +527,7 @@
       writer.updateNumericDocValue(new Term("key", "doc"), "ndv", value);
       
       if (random.nextDouble() < 0.2) { // randomly delete some docs
-        writer.deleteDocuments(new Term("id", Integer.toString(random.nextInt(docid))));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", random.nextInt(docid)));
       }
       
       // randomly commit or reopen-IW (or nothing), before forceMerge
@@ -549,10 +545,10 @@
       // forceMerge is called, the index will be with one segment and deletes
       // and some MPs might now merge it, thereby invalidating test's
       // assumption that the reader has no deletes).
-      doc = new Document();
-      doc.add(new StringField("id", Integer.toString(docid++), Store.NO));
-      doc.add(new StringField("key", "doc", Store.NO));
-      doc.add(new NumericDocValuesField("ndv", value));
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", docid++);
+      doc.addAtom("key", "doc");
+      doc.addLong("ndv", value);
       writer.addDocument(doc);
 
       writer.forceMerge(1, true);
@@ -586,10 +582,10 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("k1", "v1", Store.NO));
-    doc.add(new StringField("k2", "v2", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 5));
+    Document doc = writer.newDocument();
+    doc.addAtom("k1", "v1");
+    doc.addAtom("k2", "v2");
+    doc.addInt("ndv", 5);
     writer.addDocument(doc); // flushed document
     writer.commit();
     writer.addDocument(doc); // in-memory document
@@ -639,12 +635,12 @@
       int numDocs = atLeast(5);
 //      System.out.println("[" + Thread.currentThread().getName() + "]: round=" + i + ", numDocs=" + numDocs);
       for (int j = 0; j < numDocs; j++) {
-        Document doc = new Document();
-        doc.add(new StringField("id", "doc-" + docID, Store.NO));
-        doc.add(new StringField("key", "all", Store.NO)); // update key
+        Document doc = writer.newDocument();
+        doc.addAtom("id", "doc-" + docID);
+        doc.addAtom("key", "all"); // update key
         // add all fields with their current value
         for (int f = 0; f < fieldValues.length; f++) {
-          doc.add(new NumericDocValuesField("f" + f, fieldValues[f]));
+          doc.addLong("f" + f, fieldValues[f]);
         }
         writer.addDocument(doc);
         ++docID;
@@ -698,7 +694,7 @@
     writer.close();
     IOUtils.close(reader, dir);
   }
-  
+
   @Test
   public void testUpdateSegmentWithNoDocValues() throws Exception {
     Directory dir = newDirectory();
@@ -708,33 +704,40 @@
     // legit.
     conf.setMergePolicy(NoMergePolicy.INSTANCE);
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("id");
+    fieldTypes.disableExistsFilters();
+
     // first segment with NDV
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc0", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 3));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc0");
+    doc.addInt("ndv", 3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc4", Store.NO)); // document without 'ndv' field
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc4"); // document without 'ndv' field
     writer.addDocument(doc);
+    System.out.println("\nTEST: commit seg 1");
     writer.commit();
     
     // second segment with no NDV
-    doc = new Document();
-    doc.add(new StringField("id", "doc1", Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc2", Store.NO)); // document that isn't updated
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc2"); // document that isn't updated
     writer.addDocument(doc);
+    System.out.println("\nTEST: commit seg 2");
     writer.commit();
     
     // update document in the first segment - should not affect docsWithField of
     // the document without NDV field
+    System.out.println("\nTEST: update");
     writer.updateNumericDocValue(new Term("id", "doc0"), "ndv", 5L);
     
     // update document in the second segment - field should be added and we should
     // be able to handle the other document correctly (e.g. no NPE)
     writer.updateNumericDocValue(new Term("id", "doc1"), "ndv", 5L);
+    System.out.println("\nTEST: close writer");
     writer.close();
 
     DirectoryReader reader = DirectoryReader.open(dir);
@@ -752,7 +755,7 @@
 
     dir.close();
   }
-  
+
   @Test
   public void testUpdateSegmentWithNoDocValues2() throws Exception {
     Directory dir = newDirectory();
@@ -764,22 +767,22 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     // first segment with NDV
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc0", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 3));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc0");
+    doc.addInt("ndv", 3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc4", Store.NO)); // document without 'ndv' field
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc4"); // document without 'ndv' field
     writer.addDocument(doc);
     writer.commit();
     
     // second segment with no NDV, but another dv field "foo"
-    doc = new Document();
-    doc.add(new StringField("id", "doc1", Store.NO));
-    doc.add(new NumericDocValuesField("foo", 3));
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc1");
+    doc.addInt("foo", 3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "doc2", Store.NO)); // document that isn't updated
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc2"); // document that isn't updated
     writer.addDocument(doc);
     writer.commit();
     
@@ -850,17 +853,17 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     
     // first segment with NDV
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc0", Store.NO));
-    doc.add(new StringField("ndv", "mock-value", Store.NO));
-    doc.add(new NumericDocValuesField("ndv", 5));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc0");
+    doc.addAtom("ndvmock", "mock-value");
+    doc.addInt("ndv", 5);
     writer.addDocument(doc);
     writer.commit();
     
     // second segment with no NDV
-    doc = new Document();
-    doc.add(new StringField("id", "doc1", Store.NO));
-    doc.add(new StringField("ndv", "mock-value", Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("id", "doc1");
+    doc.addAtom("ndvmock", "mock-value");
     writer.addDocument(doc);
     writer.commit();
     
@@ -882,30 +885,6 @@
   }
   
   @Test
-  public void testUpdateNumericDVFieldWithSameNameAsPostingField() throws Exception {
-    // this used to fail because FieldInfos.Builder neglected to update
-    // globalFieldMaps.docValuesTypes map
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
-    IndexWriter writer = new IndexWriter(dir, conf);
-    
-    Document doc = new Document();
-    doc.add(new StringField("f", "mock-value", Store.NO));
-    doc.add(new NumericDocValuesField("f", 5));
-    writer.addDocument(doc);
-    writer.commit();
-    writer.updateNumericDocValue(new Term("f", "mock-value"), "f", 17L);
-    writer.close();
-    
-    DirectoryReader r = DirectoryReader.open(dir);
-    NumericDocValues ndv = r.leaves().get(0).reader().getNumericDocValues("f");
-    assertEquals(17, ndv.get(0));
-    r.close();
-    
-    dir.close();
-  }
-  
-  @Test
   public void testStressMultiThreading() throws Exception {
     final Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
@@ -915,19 +894,19 @@
     final int numFields = TestUtil.nextInt(random(), 1, 4);
     final int numDocs = atLeast(2000);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "doc" + i);
       double group = random().nextDouble();
       String g;
       if (group < 0.1) g = "g0";
       else if (group < 0.5) g = "g1";
       else if (group < 0.8) g = "g2";
       else g = "g3";
-      doc.add(new StringField("updKey", g, Store.NO));
+      doc.addAtom("updKey", g);
       for (int j = 0; j < numFields; j++) {
         long value = random().nextInt();
-        doc.add(new NumericDocValuesField("f" + j, value));
-        doc.add(new NumericDocValuesField("cf" + j, value * 2)); // control, always updated to f * 2
+        doc.addLong("f" + j, value);
+        doc.addLong("cf" + j, value * 2); // control, always updated to f * 2
       }
       writer.addDocument(doc);
     }
@@ -959,7 +938,11 @@
               final String cf = "cf" + field;
 //              System.out.println("[" + Thread.currentThread().getName() + "] numUpdates=" + numUpdates + " updateTerm=" + t + " field=" + field);
               long updValue = random.nextInt();
-              writer.updateDocValues(t, new NumericDocValuesField(f, updValue), new NumericDocValuesField(cf, updValue*2));
+              Document update = writer.newDocument();
+              update.disableExistsField();
+              update.addLong(f, updValue);
+              update.addLong(cf, updValue*2);
+              writer.updateDocValues(t, update);
               
               if (random.nextDouble() < 0.2) {
                 // delete a random document
@@ -1043,11 +1026,11 @@
     IndexWriter writer = new IndexWriter(dir, conf);
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", "doc" + i, Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", "doc" + i);
       long value = random().nextInt();
-      doc.add(new NumericDocValuesField("f", value));
-      doc.add(new NumericDocValuesField("cf", value * 2));
+      doc.addLong("f", value);
+      doc.addLong("cf", value * 2);
       writer.addDocument(doc);
     }
     
@@ -1056,7 +1039,11 @@
       int doc = random().nextInt(numDocs);
       Term t = new Term("id", "doc" + doc);
       long value = random().nextLong();
-      writer.updateDocValues(t, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value*2));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addLong("f", value);
+      update.addLong("cf", value*2);
+      writer.updateDocValues(t, update);
       DirectoryReader reader = DirectoryReader.open(writer, true);
       for (LeafReaderContext context : reader.leaves()) {
         LeafReader r = context.reader();
@@ -1084,10 +1071,10 @@
       }
     });
     IndexWriter writer = new IndexWriter(dir, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "d0", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 5L));
-    doc.add(new NumericDocValuesField("f2", 13L));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "d0");
+    doc.addLong("f1", 5L);
+    doc.addLong("f2", 13L);
     writer.addDocument(doc);
     writer.close();
     
@@ -1101,10 +1088,10 @@
       }
     });
     writer = new IndexWriter(dir, conf);
-    doc = new Document();
-    doc.add(new StringField("id", "d1", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 17L));
-    doc.add(new NumericDocValuesField("f2", 2L));
+    doc = writer.newDocument();
+    doc.addAtom("id", "d1");
+    doc.addLong("f1", 17L);
+    doc.addLong("f2", 2L);
     writer.addDocument(doc);
     writer.updateNumericDocValue(new Term("id", "d0"), "f1", 12L);
     writer.close();
@@ -1136,10 +1123,10 @@
 
     // create first index
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", RandomPicks.randomFrom(random(), randomTerms), Store.NO));
-      doc.add(new NumericDocValuesField("ndv", 4L));
-      doc.add(new NumericDocValuesField("control", 8L));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", RandomPicks.randomFrom(random(), randomTerms));
+      doc.addLong("ndv", 4L);
+      doc.addLong("control", 8L);
       writer.addDocument(doc);
     }
     
@@ -1150,7 +1137,11 @@
     // update some docs to a random value
     long value = random().nextInt();
     Term term = new Term("id", RandomPicks.randomFrom(random(), randomTerms));
-    writer.updateDocValues(term, new NumericDocValuesField("ndv", value), new NumericDocValuesField("control", value*2));
+    Document update = writer.newDocument();
+    update.disableExistsField();
+    update.addLong("ndv", value);
+    update.addLong("control", value*2);
+    writer.updateDocValues(term, update);
     writer.close();
     
     Directory dir2 = newDirectory();
@@ -1189,10 +1180,10 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "d0", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 1L));
-    doc.add(new NumericDocValuesField("f2", 1L));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "d0");
+    doc.addLong("f1", 1L);
+    doc.addLong("f2", 1L);
     writer.addDocument(doc);
 
     // update each field twice to make sure all unneeded files are deleted
@@ -1222,7 +1213,10 @@
     conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
     conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); // don't flush by doc
     IndexWriter writer = new IndexWriter(dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("upd");
+    fieldTypes.setMultiValued("upd");
+
     // test data: lots of documents (few 10Ks) and lots of update terms (few hundreds)
     final int numDocs = atLeast(20000);
     final int numNumericFields = atLeast(5);
@@ -1236,15 +1230,15 @@
     
     // build a large index with many NDV fields and update terms
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       int numUpdateTerms = TestUtil.nextInt(random, 1, numTerms / 10);
       for (int j = 0; j < numUpdateTerms; j++) {
-        doc.add(new StringField("upd", RandomPicks.randomFrom(random, updateTerms), Store.NO));
+        doc.addAtom("upd", RandomPicks.randomFrom(random, updateTerms));
       }
       for (int j = 0; j < numNumericFields; j++) {
         long val = random.nextInt();
-        doc.add(new NumericDocValuesField("f" + j, val));
-        doc.add(new NumericDocValuesField("cf" + j, val * 2));
+        doc.addLong("f" + j, val);
+        doc.addLong("cf" + j, val * 2);
       }
       writer.addDocument(doc);
     }
@@ -1260,7 +1254,11 @@
       int field = random.nextInt(numNumericFields);
       Term updateTerm = new Term("upd", RandomPicks.randomFrom(random, updateTerms));
       long value = random.nextInt();
-      writer.updateDocValues(updateTerm, new NumericDocValuesField("f"+field, value), new NumericDocValuesField("cf"+field, value*2));
+      Document update = writer.newDocument();
+      update.disableExistsField();
+      update.addLong("f"+field, value);
+      update.addLong("cf"+field, value*2);
+      writer.updateDocValues(updateTerm, update);
     }
 
     writer.close();
@@ -1286,12 +1284,15 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("upd");
+    fieldTypes.setMultiValued("upd");
     
-    Document doc = new Document();
-    doc.add(new StringField("upd", "t1", Store.NO));
-    doc.add(new StringField("upd", "t2", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 1L));
-    doc.add(new NumericDocValuesField("f2", 1L));
+    Document doc = writer.newDocument();
+    doc.addAtom("upd", "t1");
+    doc.addAtom("upd", "t2");
+    doc.addLong("f1", 1L);
+    doc.addLong("f2", 1L);
     writer.addDocument(doc);
     writer.updateNumericDocValue(new Term("upd", "t1"), "f1", 2L); // update f1 to 2
     writer.updateNumericDocValue(new Term("upd", "t1"), "f2", 2L); // update f2 to 2
@@ -1314,9 +1315,9 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 1L));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc");
+    doc.addLong("f1", 1L);
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.commit();
@@ -1339,9 +1340,9 @@
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "doc", Store.NO));
-    doc.add(new NumericDocValuesField("f1", 1L));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "doc");
+    doc.addLong("f1", 1L);
     writer.addDocument(doc);
     // update w/ multiple nonexisting terms in same field
     writer.updateNumericDocValue(new Term("c", "foo"), "f1", 2L);
@@ -1368,7 +1369,7 @@
     conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     IndexWriter writer = new IndexWriter(dir, conf);
     for (int i = 0; i < 100; i++) {
-      writer.addDocument(doc(i));
+      writer.addDocument(doc(writer, i));
     }
     writer.commit();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
index 317310e..75a6bc3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
@@ -22,12 +22,9 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
 
 public class TestOmitNorms extends LuceneTestCase {
   // Tests whether the DocumentWriter correctly enable the
@@ -36,28 +33,28 @@
     Directory ram = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
-    Document d = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    Document d = writer.newDocument();
         
     // this field will have norms
-    Field f1 = newTextField("f1", "This field has norms", Field.Store.NO);
-    d.add(f1);
+    d.addLargeText("f1", "This field has norms");
        
     // this field will NOT have norms
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f2 = newField("f2", "This field has NO norms in all docs", customType);
-    d.add(f2);
+    fieldTypes.disableNorms("f2");
+    d.addLargeText("f2", "This field has NO norms in all docs");
         
     writer.addDocument(d);
     writer.forceMerge(1);
+
     // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
     // keep things constant
-    d = new Document();
+    d = writer.newDocument();
         
     // Reverse
-    d.add(newField("f1", "This field has norms", customType));
-        
-    d.add(newTextField("f2", "This field has NO norms in all docs", Field.Store.NO));
+    fieldTypes.disableNorms("f1");
+    d.addLargeText("f1", "This field has norms");
+    d.addLargeText("f2", "This field has NO norms in all docs");
         
     writer.addDocument(d);
 
@@ -86,17 +83,16 @@
            .setMaxBufferedDocs(3)
            .setMergePolicy(newLogMergePolicy(2))
     );
-    Document d = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    Document d = writer.newDocument();
         
     // this field will have norms
-    Field f1 = newTextField("f1", "This field has norms", Field.Store.NO);
-    d.add(f1);
+    d.addLargeText("f1", "This field has norms");
        
     // this field will NOT have norms
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f2 = newField("f2", "This field has NO norms in all docs", customType);
-    d.add(f2);
+    fieldTypes.disableNorms("f2");
+    d.addLargeText("f2", "This field has NO norms in all docs");
 
     for (int i = 0; i < 30; i++) {
       writer.addDocument(d);
@@ -104,12 +100,12 @@
         
     // now we add another document which has norms for field f2 and not for f1 and verify if the SegmentMerger
     // keep things constant
-    d = new Document();
+    d = writer.newDocument();
         
     // Reverese
-    d.add(newField("f1", "This field has norms", customType));
-        
-    d.add(newTextField("f2", "This field has NO norms in all docs", Field.Store.NO));
+    fieldTypes.disableNorms("f1");
+    d.addLargeText("f1", "This field has norms");
+    d.addLargeText("f2", "This field has NO norms in all docs");
         
     for (int i = 0; i < 30; i++) {
       writer.addDocument(d);
@@ -141,18 +137,16 @@
             .setMaxBufferedDocs(10)
             .setMergePolicy(newLogMergePolicy(2))
     );
-    Document d = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    Document d = writer.newDocument();
         
     // this field will have norms
-    Field f1 = newTextField("f1", "This field has norms", Field.Store.NO);
-    d.add(f1);
+    d.addLargeText("f1", "This field has norms");
        
     // this field will NOT have norms
-
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f2 = newField("f2", "This field has NO norms in all docs", customType);
-    d.add(f2);
+    fieldTypes.disableNorms("f2");
+    d.addLargeText("f2", "This field has NO norms in all docs");
 
     for (int i = 0; i < 5; i++) {
       writer.addDocument(d);
@@ -192,16 +186,16 @@
     IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
                                                 .setMaxBufferedDocs(3)
                                                 .setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
     LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
     lmp.setMergeFactor(2);
     lmp.setNoCFSRatio(0.0);
-    Document d = new Document();
 
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f1 = newField("f1", "This field has no norms", customType);
-    d.add(f1);
+    Document d = writer.newDocument();
 
+    fieldTypes.disableNorms("f1");
+    d.addLargeText("f1", "This field has no norms");
     for (int i = 0; i < 30; i++) {
       writer.addDocument(d);
     }
@@ -218,98 +212,4 @@
     assertNoNrm(ram);
     ram.close();
   }
-  
-  /**
-   * Tests various combinations of omitNorms=true/false, the field not existing at all,
-   * ensuring that only omitNorms is 'viral'.
-   * Internally checks that MultiNorms.norms() is consistent (returns the same bytes)
-   * as the fully merged equivalent.
-   */
-  public void testOmitNormsCombos() throws IOException {
-    // indexed with norms
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    Field norms = new Field("foo", "a", customType);
-    // indexed without norms
-    FieldType customType1 = new FieldType(TextField.TYPE_STORED);
-    customType1.setOmitNorms(true);
-    Field noNorms = new Field("foo", "a", customType1);
-    // not indexed, but stored
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
-    Field noIndex = new Field("foo", "a", customType2);
-    // not indexed but stored, omitNorms is set
-    FieldType customType3 = new FieldType();
-    customType3.setStored(true);
-    customType3.setOmitNorms(true);
-    Field noNormsNoIndex = new Field("foo", "a", customType3);
-    // not indexed nor stored (doesnt exist at all, we index a different field instead)
-    Field emptyNorms = new Field("bar", "a", customType);
-    
-    assertNotNull(getNorms("foo", norms, norms));
-    assertNull(getNorms("foo", norms, noNorms));
-    assertNotNull(getNorms("foo", norms, noIndex));
-    assertNotNull(getNorms("foo", norms, noNormsNoIndex));
-    assertNotNull(getNorms("foo", norms, emptyNorms));
-    assertNull(getNorms("foo", noNorms, noNorms));
-    assertNull(getNorms("foo", noNorms, noIndex));
-    assertNull(getNorms("foo", noNorms, noNormsNoIndex));
-    assertNull(getNorms("foo", noNorms, emptyNorms));
-    assertNull(getNorms("foo", noIndex, noIndex));
-    assertNull(getNorms("foo", noIndex, noNormsNoIndex));
-    assertNull(getNorms("foo", noIndex, emptyNorms));
-    assertNull(getNorms("foo", noNormsNoIndex, noNormsNoIndex));
-    assertNull(getNorms("foo", noNormsNoIndex, emptyNorms));
-    assertNull(getNorms("foo", emptyNorms, emptyNorms));
-  }
-
-  /**
-   * Indexes at least 1 document with f1, and at least 1 document with f2.
-   * returns the norms for "field".
-   */
-  NumericDocValues getNorms(String field, Field f1, Field f2) throws IOException {
-    Directory dir = newDirectory();
-    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
-                              .setMergePolicy(newLogMergePolicy());
-    RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-    
-    // add f1
-    Document d = new Document();
-    d.add(f1);
-    riw.addDocument(d);
-    
-    // add f2
-    d = new Document();
-    d.add(f2);
-    riw.addDocument(d);
-    
-    // add a mix of f1's and f2's
-    int numExtraDocs = TestUtil.nextInt(random(), 1, 1000);
-    for (int i = 0; i < numExtraDocs; i++) {
-      d = new Document();
-      d.add(random().nextBoolean() ? f1 : f2);
-      riw.addDocument(d);
-    }
-
-    IndexReader ir1 = riw.getReader();
-    // todo: generalize
-    NumericDocValues norms1 = MultiDocValues.getNormValues(ir1, field);
-    
-    // fully merge and validate MultiNorms against single segment.
-    riw.forceMerge(1);
-    DirectoryReader ir2 = riw.getReader();
-    NumericDocValues norms2 = getOnlySegmentReader(ir2).getNormValues(field);
-
-    if (norms1 == null) {
-      assertNull(norms2);
-    } else {
-      for(int docID=0;docID<ir1.maxDoc();docID++) {
-        assertEquals(norms1.get(docID), norms2.get(docID));
-      }
-    }
-    ir1.close();
-    ir2.close();
-    riw.close();
-    dir.close();
-    return norms1;
-  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
index fb58903..dd74ec4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
@@ -17,15 +17,10 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -39,11 +34,12 @@
   public void testBasic() throws Exception {   
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    Field f = newField("foo", "this is a test test", ft);
-    doc.add(f);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("foo");
+    fieldTypes.setIndexOptions("foo", IndexOptions.DOCS_AND_FREQS);
+
+    Document doc = w.newDocument();
+    doc.addLargeText("foo", "this is a test test");
     for (int i = 0; i < 100; i++) {
       w.addDocument(doc);
     }
@@ -61,210 +57,4 @@
     reader.close();
     dir.close();
   }
-  
-  // Tests whether the DocumentWriter correctly enable the
-  // omitTermFreqAndPositions bit in the FieldInfo
-  public void testPositions() throws Exception {
-    Directory ram = newDirectory();
-    Analyzer analyzer = new MockAnalyzer(random());
-    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
-    Document d = new Document();
-        
-    // f1,f2,f3: docs only
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    
-    Field f1 = newField("f1", "This field has docs only", ft);
-    d.add(f1);
-       
-    Field f2 = newField("f2", "This field has docs only", ft);
-    d.add(f2);
-    
-    Field f3 = newField("f3", "This field has docs only", ft);
-    d.add(f3);
-
-    FieldType ft2 = new FieldType(TextField.TYPE_NOT_STORED);
-    ft2.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    
-    // f4,f5,f6 docs and freqs
-    Field f4 = newField("f4", "This field has docs and freqs", ft2);
-    d.add(f4);
-       
-    Field f5 = newField("f5", "This field has docs and freqs", ft2);
-    d.add(f5);
-    
-    Field f6 = newField("f6", "This field has docs and freqs", ft2);
-    d.add(f6);
-    
-    FieldType ft3 = new FieldType(TextField.TYPE_NOT_STORED);
-    ft3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    
-    // f7,f8,f9 docs/freqs/positions
-    Field f7 = newField("f7", "This field has docs and freqs and positions", ft3);
-    d.add(f7);
-       
-    Field f8 = newField("f8", "This field has docs and freqs and positions", ft3);
-    d.add(f8);
-    
-    Field f9 = newField("f9", "This field has docs and freqs and positions", ft3);
-    d.add(f9);
-        
-    writer.addDocument(d);
-    writer.forceMerge(1);
-
-    // now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8, 
-    // and docs/freqs/positions for f3, f6, f9
-    d = new Document();
-    
-    // f1,f4,f7: docs only
-    f1 = newField("f1", "This field has docs only", ft);
-    d.add(f1);
-    
-    f4 = newField("f4", "This field has docs only", ft);
-    d.add(f4);
-    
-    f7 = newField("f7", "This field has docs only", ft);
-    d.add(f7);
-
-    // f2, f5, f8: docs and freqs
-    f2 = newField("f2", "This field has docs and freqs", ft2);
-    d.add(f2);
-    
-    f5 = newField("f5", "This field has docs and freqs", ft2);
-    d.add(f5);
-    
-    f8 = newField("f8", "This field has docs and freqs", ft2);
-    d.add(f8);
-    
-    // f3, f6, f9: docs and freqs and positions
-    f3 = newField("f3", "This field has docs and freqs and positions", ft3);
-    d.add(f3);     
-    
-    f6 = newField("f6", "This field has docs and freqs and positions", ft3);
-    d.add(f6);
-    
-    f9 = newField("f9", "This field has docs and freqs and positions", ft3);
-    d.add(f9);
-        
-    writer.addDocument(d);
-
-    // force merge
-    writer.forceMerge(1);
-    // flush
-    writer.close();
-
-    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(ram));
-    FieldInfos fi = reader.getFieldInfos();
-    // docs + docs = docs
-    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f1").getIndexOptions());
-    // docs + docs/freqs = docs
-    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f2").getIndexOptions());
-    // docs + docs/freqs/pos = docs
-    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f3").getIndexOptions());
-    // docs/freqs + docs = docs
-    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f4").getIndexOptions());
-    // docs/freqs + docs/freqs = docs/freqs
-    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f5").getIndexOptions());
-    // docs/freqs + docs/freqs/pos = docs/freqs
-    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f6").getIndexOptions());
-    // docs/freqs/pos + docs = docs
-    assertEquals(IndexOptions.DOCS, fi.fieldInfo("f7").getIndexOptions());
-    // docs/freqs/pos + docs/freqs = docs/freqs
-    assertEquals(IndexOptions.DOCS_AND_FREQS, fi.fieldInfo("f8").getIndexOptions());
-    // docs/freqs/pos + docs/freqs/pos = docs/freqs/pos
-    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f9").getIndexOptions());
-    
-    reader.close();
-    ram.close();
-  }
-  
-  private void assertNoPrx(Directory dir) throws Throwable {
-    final String[] files = dir.listAll();
-    for(int i=0;i<files.length;i++) {
-      assertFalse(files[i].endsWith(".prx"));
-      assertFalse(files[i].endsWith(".pos"));
-    }
-  }
-
-  // Verifies no *.prx exists when all fields omit term positions:
-  public void testNoPrxFile() throws Throwable {
-    Directory ram = newDirectory();
-    if (ram instanceof MockDirectoryWrapper) {
-      // we verify some files get deleted
-      ((MockDirectoryWrapper)ram).setEnableVirusScanner(false);
-    }
-    Analyzer analyzer = new MockAnalyzer(random());
-    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
-                                                .setMaxBufferedDocs(3)
-                                                .setMergePolicy(newLogMergePolicy()));
-    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
-    lmp.setMergeFactor(2);
-    lmp.setNoCFSRatio(0.0);
-    Document d = new Document();
-
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    Field f1 = newField("f1", "This field has term freqs", ft);
-    d.add(f1);
-
-    for(int i=0;i<30;i++)
-      writer.addDocument(d);
-
-    writer.commit();
-
-    assertNoPrx(ram);
-    
-    // now add some documents with positions, and check there is no prox after optimization
-    d = new Document();
-    f1 = newTextField("f1", "This field has positions", Field.Store.NO);
-    d.add(f1);
-    
-    for(int i=0;i<30;i++)
-      writer.addDocument(d);
-
-    // force merge
-    writer.forceMerge(1);
-    // flush
-    writer.close();
-
-    assertNoPrx(ram);
-    ram.close();
-  }
-  
-  /** make sure we downgrade positions and payloads correctly */
-  public void testMixing() throws Exception {
-    // no positions
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    
-    for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      if (i < 19 && random().nextBoolean()) {
-        for (int j = 0; j < 50; j++) {
-          doc.add(new TextField("foo", "i have positions", Field.Store.NO));
-        }
-      } else {
-        for (int j = 0; j < 50; j++) {
-          doc.add(new Field("foo", "i have no positions", ft));
-        }
-      }
-      iw.addDocument(doc);
-      iw.commit();
-    }
-    
-    if (random().nextBoolean()) {
-      iw.forceMerge(1);
-    }
-    
-    DirectoryReader ir = iw.getReader();
-    FieldInfos fis = MultiFields.getMergedFieldInfos(ir);
-    assertEquals(IndexOptions.DOCS_AND_FREQS, fis.fieldInfo("foo").getIndexOptions());
-    assertFalse(fis.fieldInfo("foo").hasPayloads());
-    iw.close();
-    ir.close();
-    dir.close(); // checkindex
-  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
index 8818f33..9a66fe4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -22,9 +22,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.CollectionStatistics;
@@ -41,7 +39,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
-
 public class TestOmitTf extends LuceneTestCase {
   
   public static class SimpleSimilarity extends TFIDFSimilarity {
@@ -61,42 +58,34 @@
     @Override public float scorePayload(int doc, int start, int end, BytesRef payload) { return 1.0f; }
   }
 
-  private static final FieldType omitType = new FieldType(TextField.TYPE_NOT_STORED);
-  private static final FieldType normalType = new FieldType(TextField.TYPE_NOT_STORED);
-  
-  static {
-    omitType.setIndexOptions(IndexOptions.DOCS);
-  }
-
   // Tests whether the DocumentWriter correctly enable the
   // omitTermFreqAndPositions bit in the FieldInfo
   public void testOmitTermFreqAndPositions() throws Exception {
     Directory ram = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
-    Document d = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document d = writer.newDocument();
         
     // this field will have Tf
-    Field f1 = newField("f1", "This field has term freqs", normalType);
-    d.add(f1);
+    d.addLargeText("f1", "This field has term freqs");
        
     // this field will NOT have Tf
-    Field f2 = newField("f2", "This field has NO Tf in all docs", omitType);
-    d.add(f2);
+    fieldTypes.setIndexOptions("f2", IndexOptions.DOCS);
+    fieldTypes.disableHighlighting("f2");
+    d.addLargeText("f2", "This field has NO Tf in all docs");
         
     writer.addDocument(d);
     writer.forceMerge(1);
     // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
     // keep things constant
-    d = new Document();
+    d = writer.newDocument();
         
     // Reverse
-    f1 = newField("f1", "This field has term freqs", omitType);
-    d.add(f1);
-        
-    f2 = newField("f2", "This field has NO Tf in all docs", normalType);     
-    d.add(f2);
-        
+    fieldTypes.setIndexOptions("f1", IndexOptions.DOCS);
+    fieldTypes.disableHighlighting("f1");
+    d.addLargeText("f1", "This field has term freqs");
+    d.addLargeText("f2", "This field has NO Tf in all docs");
     writer.addDocument(d);
 
     // force merge
@@ -124,32 +113,36 @@
             setMaxBufferedDocs(3).
             setMergePolicy(newLogMergePolicy(2))
     );
-    Document d = new Document();
+
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    Document d = writer.newDocument();
         
     // this field will have Tf
-    Field f1 = newField("f1", "This field has term freqs", normalType);
-    d.add(f1);
+    d.addLargeText("f1", "This field has term freqs");
        
     // this field will NOT have Tf
-    Field f2 = newField("f2", "This field has NO Tf in all docs", omitType);
-    d.add(f2);
+    fieldTypes.disableHighlighting("f2");
+    fieldTypes.setIndexOptions("f2", IndexOptions.DOCS);
+    d.addLargeText("f2", "This field has NO Tf in all docs");
 
-    for(int i=0;i<30;i++)
+    for(int i=0;i<30;i++) {
       writer.addDocument(d);
+    }
         
     // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
     // keep things constant
-    d = new Document();
+    d = writer.newDocument();
         
-    // Reverese
-    f1 = newField("f1", "This field has term freqs", omitType);
-    d.add(f1);
+    // Reverse
+    fieldTypes.disableHighlighting("f1");
+    fieldTypes.setIndexOptions("f1", IndexOptions.DOCS);
+    d.addLargeText("f1", "This field has term freqs");
+    d.addLargeText("f2", "This field has NO Tf in all docs");
         
-    f2 = newField("f2", "This field has NO Tf in all docs", normalType);     
-    d.add(f2);
-        
-    for(int i=0;i<30;i++)
+    for(int i=0;i<30;i++) {
       writer.addDocument(d);
+    }
         
     // force merge
     writer.forceMerge(1);
@@ -177,21 +170,25 @@
             setMaxBufferedDocs(10).
             setMergePolicy(newLogMergePolicy(2))
     );
-    Document d = new Document();
+    Document d = writer.newDocument();
+    FieldTypes fieldTypes = writer.getFieldTypes();
         
     // this field will have Tf
-    Field f1 = newField("f1", "This field has term freqs", normalType);
-    d.add(f1);
+    fieldTypes.disableHighlighting("f1");
+    d.addLargeText("f1", "This field has term freqs");
        
     // this field will NOT have Tf
-    Field f2 = newField("f2", "This field has NO Tf in all docs", omitType);
-    d.add(f2);
+    fieldTypes.disableHighlighting("f2");
+    fieldTypes.setIndexOptions("f2", IndexOptions.DOCS);
+    d.addLargeText("f2", "This field has NO Tf in all docs");
 
-    for(int i=0;i<5;i++)
+    for(int i=0;i<5;i++) {
       writer.addDocument(d);
+    }     
 
-    for(int i=0;i<20;i++)
+    for(int i=0;i<20;i++) {
       writer.addDocument(d);
+    }
 
     // force merge
     writer.forceMerge(1);
@@ -225,18 +222,20 @@
     }
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
-                                                .setMaxBufferedDocs(3)
-                                                .setMergePolicy(newLogMergePolicy()));
+                                         .setMaxBufferedDocs(3)
+                                         .setMergePolicy(newLogMergePolicy()));
     LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
     lmp.setMergeFactor(2);
     lmp.setNoCFSRatio(0.0);
-    Document d = new Document();
-        
-    Field f1 = newField("f1", "This field has term freqs", omitType);
-    d.add(f1);
+    Document d = writer.newDocument();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableHighlighting("f1");
+    fieldTypes.setIndexOptions("f1", IndexOptions.DOCS);
+    d.addLargeText("f1", "This field has term freqs");
 
-    for(int i=0;i<30;i++)
+    for(int i=0;i<30;i++) {
       writer.addDocument(d);
+    }
 
     writer.commit();
 
@@ -244,12 +243,12 @@
     
     // now add some documents with positions, and check
     // there is no prox after full merge
-    d = new Document();
-    f1 = newTextField("f1", "This field has positions", Field.Store.NO);
-    d.add(f1);
+    d = writer.newDocument();
+    d.addLargeText("f1", "This field has positions");
     
-    for(int i=0;i<30;i++)
+    for(int i=0;i<30;i++) {
       writer.addDocument(d);
+    }
  
     // force merge
     writer.forceMerge(1);
@@ -273,16 +272,16 @@
     );
         
     StringBuilder sb = new StringBuilder(265);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableHighlighting("noTf");
+    fieldTypes.setIndexOptions("noTf", IndexOptions.DOCS);
     String term = "term";
     for(int i = 0; i<30; i++){
-      Document d = new Document();
+      Document d = writer.newDocument();
       sb.append(term).append(" ");
       String content  = sb.toString();
-      Field noTf = newField("noTf", content + (i%2==0 ? "" : " notf"), omitType);
-      d.add(noTf);
-          
-      Field tf = newField("tf", content + (i%2==0 ? " tf" : ""), normalType);
-      d.add(tf);
+      d.addLargeText("noTf", content + (i%2==0 ? "" : " notf"));
+      d.addLargeText("tf", content + (i%2==0 ? " tf" : ""));
           
       writer.addDocument(d);
       //System.out.println(d);
@@ -444,12 +443,8 @@
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
         newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    ft.freeze();
-    Field f = newField("foo", "bar", ft);
-    doc.add(f);
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOrdinalMap.java b/lucene/core/src/test/org/apache/lucene/index/TestOrdinalMap.java
index cc4b383..a6e8a10 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOrdinalMap.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOrdinalMap.java
@@ -23,13 +23,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.MultiDocValues.MultiSortedDocValues;
 import org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues;
 import org.apache.lucene.index.MultiDocValues.OrdinalMap;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LongValues;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.RamUsageTester;
@@ -65,16 +63,18 @@
     Directory dir = newDirectory();
     IndexWriterConfig cfg = new IndexWriterConfig(new MockAnalyzer(random())).setCodec(TestUtil.alwaysDocValuesFormat(TestUtil.getDefaultDocValuesFormat()));
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("ssdv");
     final int maxDoc = TestUtil.nextInt(random(), 10, 1000);
     final int maxTermLength = TestUtil.nextInt(random(), 1, 4);
     for (int i = 0; i < maxDoc; ++i) {
-      Document d = new Document();
+      Document d = iw.newDocument();
       if (random().nextBoolean()) {
-        d.add(new SortedDocValuesField("sdv", new BytesRef(TestUtil.randomSimpleString(random(), maxTermLength))));
+        d.addShortText("sdv", TestUtil.randomSimpleString(random(), maxTermLength));
       }
       final int numSortedSet = random().nextInt(3);
       for (int j = 0; j < numSortedSet; ++j) {
-        d.add(new SortedSetDocValuesField("ssdv", new BytesRef(TestUtil.randomSimpleString(random(), maxTermLength))));
+        d.addShortText("ssdv", TestUtil.randomSimpleString(random(), maxTermLength));
       }
       iw.addDocument(d);
       if (rarely()) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
index ae6c372..88813fe 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
@@ -22,10 +22,9 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
-import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -221,9 +220,9 @@
     // one document only:
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d3 = new Document();
+    Document d3 = w2.newDocument();
 
-    d3.add(newTextField("f3", "v1", Field.Store.YES));
+    d3.addLargeText("f3", "v1");
     w2.addDocument(d3);
     w2.close();
     
@@ -407,12 +406,12 @@
     assertEquals(parallelHits.length, singleHits.length);
     for(int i = 0; i < parallelHits.length; i++) {
       assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
-      StoredDocument docParallel = parallel.doc(parallelHits[i].doc);
-      StoredDocument docSingle = single.doc(singleHits[i].doc);
-      assertEquals(docParallel.get("f1"), docSingle.get("f1"));
-      assertEquals(docParallel.get("f2"), docSingle.get("f2"));
-      assertEquals(docParallel.get("f3"), docSingle.get("f3"));
-      assertEquals(docParallel.get("f4"), docSingle.get("f4"));
+      Document docParallel = parallel.doc(parallelHits[i].doc);
+      Document docSingle = single.doc(singleHits[i].doc);
+      assertEquals(docParallel.getString("f1"), docSingle.getString("f1"));
+      assertEquals(docParallel.getString("f2"), docSingle.getString("f2"));
+      assertEquals(docParallel.getString("f3"), docSingle.getString("f3"));
+      assertEquals(docParallel.getString("f4"), docSingle.getString("f4"));
     }
   }
 
@@ -420,29 +419,29 @@
   private IndexSearcher single(Random random, boolean compositeComposite) throws IOException {
     dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)));
-    Document d1 = new Document();
-    d1.add(newTextField("f1", "v1", Field.Store.YES));
-    d1.add(newTextField("f2", "v1", Field.Store.YES));
-    d1.add(newTextField("f3", "v1", Field.Store.YES));
-    d1.add(newTextField("f4", "v1", Field.Store.YES));
+    Document d1 = w.newDocument();
+    d1.addLargeText("f1", "v1");
+    d1.addLargeText("f2", "v1");
+    d1.addLargeText("f3", "v1");
+    d1.addLargeText("f4", "v1");
     w.addDocument(d1);
-    Document d2 = new Document();
-    d2.add(newTextField("f1", "v2", Field.Store.YES));
-    d2.add(newTextField("f2", "v2", Field.Store.YES));
-    d2.add(newTextField("f3", "v2", Field.Store.YES));
-    d2.add(newTextField("f4", "v2", Field.Store.YES));
+    Document d2 = w.newDocument();
+    d2.addLargeText("f1", "v2");
+    d2.addLargeText("f2", "v2");
+    d2.addLargeText("f3", "v2");
+    d2.addLargeText("f4", "v2");
     w.addDocument(d2);
-    Document d3 = new Document();
-    d3.add(newTextField("f1", "v3", Field.Store.YES));
-    d3.add(newTextField("f2", "v3", Field.Store.YES));
-    d3.add(newTextField("f3", "v3", Field.Store.YES));
-    d3.add(newTextField("f4", "v3", Field.Store.YES));
+    Document d3 = w.newDocument();
+    d3.addLargeText("f1", "v3");
+    d3.addLargeText("f2", "v3");
+    d3.addLargeText("f3", "v3");
+    d3.addLargeText("f4", "v3");
     w.addDocument(d3);
-    Document d4 = new Document();
-    d4.add(newTextField("f1", "v4", Field.Store.YES));
-    d4.add(newTextField("f2", "v4", Field.Store.YES));
-    d4.add(newTextField("f3", "v4", Field.Store.YES));
-    d4.add(newTextField("f4", "v4", Field.Store.YES));
+    Document d4 = w.newDocument();
+    d4.addLargeText("f1", "v4");
+    d4.addLargeText("f2", "v4");
+    d4.addLargeText("f3", "v4");
+    d4.addLargeText("f4", "v4");
     w.addDocument(d4);
     w.close();
 
@@ -480,23 +479,23 @@
     Directory dir1 = newDirectory();
     IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig(new MockAnalyzer(random))
                                              .setMergePolicy(NoMergePolicy.INSTANCE));
-    Document d1 = new Document();
-    d1.add(newTextField("f1", "v1", Field.Store.YES));
-    d1.add(newTextField("f2", "v1", Field.Store.YES));
+    Document d1 = w1.newDocument();
+    d1.addLargeText("f1", "v1");
+    d1.addLargeText("f2", "v1");
     w1.addDocument(d1);
     w1.commit();
-    Document d2 = new Document();
-    d2.add(newTextField("f1", "v2", Field.Store.YES));
-    d2.add(newTextField("f2", "v2", Field.Store.YES));
+    Document d2 = w1.newDocument();
+    d2.addLargeText("f1", "v2");
+    d2.addLargeText("f2", "v2");
     w1.addDocument(d2);
-    Document d3 = new Document();
-    d3.add(newTextField("f1", "v3", Field.Store.YES));
-    d3.add(newTextField("f2", "v3", Field.Store.YES));
+    Document d3 = w1.newDocument();
+    d3.addLargeText("f1", "v3");
+    d3.addLargeText("f2", "v3");
     w1.addDocument(d3);
     w1.commit();
-    Document d4 = new Document();
-    d4.add(newTextField("f1", "v4", Field.Store.YES));
-    d4.add(newTextField("f2", "v4", Field.Store.YES));
+    Document d4 = w1.newDocument();
+    d4.addLargeText("f1", "v4");
+    d4.addLargeText("f2", "v4");
     w1.addDocument(d4);
     w1.close();
     return dir1;
@@ -507,23 +506,23 @@
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random))
                                              .setMergePolicy(NoMergePolicy.INSTANCE));
-    Document d1 = new Document();
-    d1.add(newTextField("f3", "v1", Field.Store.YES));
-    d1.add(newTextField("f4", "v1", Field.Store.YES));
+    Document d1 = w2.newDocument();
+    d1.addLargeText("f3", "v1");
+    d1.addLargeText("f4", "v1");
     w2.addDocument(d1);
     w2.commit();
-    Document d2 = new Document();
-    d2.add(newTextField("f3", "v2", Field.Store.YES));
-    d2.add(newTextField("f4", "v2", Field.Store.YES));
+    Document d2 = w2.newDocument();
+    d2.addLargeText("f3", "v2");
+    d2.addLargeText("f4", "v2");
     w2.addDocument(d2);
-    Document d3 = new Document();
-    d3.add(newTextField("f3", "v3", Field.Store.YES));
-    d3.add(newTextField("f4", "v3", Field.Store.YES));
+    Document d3 = w2.newDocument();
+    d3.addLargeText("f3", "v3");
+    d3.addLargeText("f4", "v3");
     w2.addDocument(d3);
     w2.commit();
-    Document d4 = new Document();
-    d4.add(newTextField("f3", "v4", Field.Store.YES));
-    d4.add(newTextField("f4", "v4", Field.Store.YES));
+    Document d4 = w2.newDocument();
+    d4.addLargeText("f3", "v4");
+    d4.addLargeText("f4", "v4");
     w2.addDocument(d4);
     w2.close();
     return dir2;
@@ -534,23 +533,23 @@
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random))
                                              .setMergePolicy(NoMergePolicy.INSTANCE));
-    Document d1 = new Document();
-    d1.add(newTextField("f3", "v1", Field.Store.YES));
-    d1.add(newTextField("f4", "v1", Field.Store.YES));
+    Document d1 = w2.newDocument();
+    d1.addLargeText("f3", "v1");
+    d1.addLargeText("f4", "v1");
     w2.addDocument(d1);
     w2.commit();
-    Document d2 = new Document();
-    d2.add(newTextField("f3", "v2", Field.Store.YES));
-    d2.add(newTextField("f4", "v2", Field.Store.YES));
+    Document d2 = w2.newDocument();
+    d2.addLargeText("f3", "v2");
+    d2.addLargeText("f4", "v2");
     w2.addDocument(d2);
     w2.commit();
-    Document d3 = new Document();
-    d3.add(newTextField("f3", "v3", Field.Store.YES));
-    d3.add(newTextField("f4", "v3", Field.Store.YES));
+    Document d3 = w2.newDocument();
+    d3.addLargeText("f3", "v3");
+    d3.addLargeText("f4", "v3");
     w2.addDocument(d3);
-    Document d4 = new Document();
-    d4.add(newTextField("f3", "v4", Field.Store.YES));
-    d4.add(newTextField("f4", "v4", Field.Store.YES));
+    Document d4 = w2.newDocument();
+    d4.addLargeText("f3", "v4");
+    d4.addLargeText("f4", "v4");
     w2.addDocument(d4);
     w2.close();
     return dir2;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
index 6aedca7..f03ed96 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
@@ -22,9 +22,9 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -145,9 +145,9 @@
     // one document only:
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document d3 = new Document();
+    Document d3 = w2.newDocument();
 
-    d3.add(newTextField("f3", "v1", Field.Store.YES));
+    d3.addLargeText("f3", "v1");
     w2.addDocument(d3);
     w2.close();
     
@@ -249,12 +249,12 @@
     assertEquals(parallelHits.length, singleHits.length);
     for(int i = 0; i < parallelHits.length; i++) {
       assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
-      StoredDocument docParallel = parallel.doc(parallelHits[i].doc);
-      StoredDocument docSingle = single.doc(singleHits[i].doc);
-      assertEquals(docParallel.get("f1"), docSingle.get("f1"));
-      assertEquals(docParallel.get("f2"), docSingle.get("f2"));
-      assertEquals(docParallel.get("f3"), docSingle.get("f3"));
-      assertEquals(docParallel.get("f4"), docSingle.get("f4"));
+      Document docParallel = parallel.doc(parallelHits[i].doc);
+      Document docSingle = single.doc(singleHits[i].doc);
+      assertEquals(docParallel.getString("f1"), docSingle.getString("f1"));
+      assertEquals(docParallel.getString("f2"), docSingle.getString("f2"));
+      assertEquals(docParallel.getString("f3"), docSingle.getString("f3"));
+      assertEquals(docParallel.getString("f4"), docSingle.getString("f4"));
     }
   }
 
@@ -262,17 +262,17 @@
   private IndexSearcher single(Random random) throws IOException {
     dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)));
-    Document d1 = new Document();
-    d1.add(newTextField("f1", "v1", Field.Store.YES));
-    d1.add(newTextField("f2", "v1", Field.Store.YES));
-    d1.add(newTextField("f3", "v1", Field.Store.YES));
-    d1.add(newTextField("f4", "v1", Field.Store.YES));
+    Document d1 = w.newDocument();
+    d1.addShortText("f1", "v1");
+    d1.addShortText("f2", "v1");
+    d1.addShortText("f3", "v1");
+    d1.addShortText("f4", "v1");
     w.addDocument(d1);
-    Document d2 = new Document();
-    d2.add(newTextField("f1", "v2", Field.Store.YES));
-    d2.add(newTextField("f2", "v2", Field.Store.YES));
-    d2.add(newTextField("f3", "v2", Field.Store.YES));
-    d2.add(newTextField("f4", "v2", Field.Store.YES));
+    Document d2 = w.newDocument();
+    d2.addShortText("f1", "v2");
+    d2.addShortText("f2", "v2");
+    d2.addShortText("f3", "v2");
+    d2.addShortText("f4", "v2");
     w.addDocument(d2);
     w.close();
 
@@ -294,13 +294,16 @@
   private Directory getDir1(Random random) throws IOException {
     Directory dir1 = newDirectory();
     IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig(new MockAnalyzer(random)));
-    Document d1 = new Document();
-    d1.add(newTextField("f1", "v1", Field.Store.YES));
-    d1.add(newTextField("f2", "v1", Field.Store.YES));
+    FieldTypes fieldTypes = w1.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+
+    Document d1 = w1.newDocument();
+    d1.addShortText("f1", "v1");
+    d1.addShortText("f2", "v1");
     w1.addDocument(d1);
-    Document d2 = new Document();
-    d2.add(newTextField("f1", "v2", Field.Store.YES));
-    d2.add(newTextField("f2", "v2", Field.Store.YES));
+    Document d2 = w1.newDocument();
+    d2.addShortText("f1", "v2");
+    d2.addShortText("f2", "v2");
     w1.addDocument(d2);
     w1.close();
     return dir1;
@@ -309,13 +312,15 @@
   private Directory getDir2(Random random) throws IOException {
     Directory dir2 = newDirectory();
     IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(new MockAnalyzer(random)));
-    Document d3 = new Document();
-    d3.add(newTextField("f3", "v1", Field.Store.YES));
-    d3.add(newTextField("f4", "v1", Field.Store.YES));
+    FieldTypes fieldTypes = w2.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    Document d3 = w2.newDocument();
+    d3.addShortText("f3", "v1");
+    d3.addShortText("f4", "v1");
     w2.addDocument(d3);
-    Document d4 = new Document();
-    d4.add(newTextField("f3", "v2", Field.Store.YES));
-    d4.add(newTextField("f4", "v2", Field.Store.YES));
+    Document d4 = w2.newDocument();
+    d4.addShortText("f3", "v2");
+    d4.addShortText("f4", "v2");
     w2.addDocument(d4);
     w2.close();
     return dir2;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
index ffc7b60..d58ee28 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
@@ -23,9 +23,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -92,17 +90,19 @@
         System.out.println("\nTEST: make 1st writer");
       }
       IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig(new MockAnalyzer(random())));
-      Document doc = new Document();
-      Field idField = newTextField("id", "", Field.Store.NO);
-      doc.add(idField);
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      doc.add(newField("test", "", customType));
-      idField.setStringValue("1");
+      FieldTypes fieldTypes = iw.getFieldTypes();
+      fieldTypes.enableTermVectors("test");
+
+      Document doc = iw.newDocument();
+      doc.addLargeText("test", "");
+      doc.addUniqueInt("id", 1);
       iw.addDocument(doc);
-      doc.add(newField("test", "", customType));
-      idField.setStringValue("2");
+
+      doc = iw.newDocument();
+      doc.addLargeText("test", "");
+      doc.addUniqueInt("id", 2);
       iw.addDocument(doc);
+
       iw.close();
 
       IndexWriterConfig dontMergeConfig = new IndexWriterConfig(new MockAnalyzer(random()))
@@ -111,8 +111,9 @@
         System.out.println("\nTEST: make 2nd writer");
       }
       IndexWriter writer = new IndexWriter(rd1, dontMergeConfig);
-      
-      writer.deleteDocuments(new Term("id", "1"));
+      fieldTypes = writer.getFieldTypes();
+
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", 1));
       writer.close();
       IndexReader ir = DirectoryReader.open(rd1);
       assertEquals(2, ir.maxDoc());
@@ -128,8 +129,7 @@
     Directory rd2 = newDirectory();
     {
       IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig(new MockAnalyzer(random())));
-      Document doc = new Document();
-      iw.addDocument(doc);
+      iw.addDocument(iw.newDocument());
       iw.close();
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
index 7fd9182..b390c4f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -22,7 +22,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
@@ -42,19 +42,23 @@
     Document doc;
     rd1 = newDirectory();
     IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = iw1.getFieldTypes();
+    fieldTypes.disableExistsFilters();
 
-    doc = new Document();
-    doc.add(newTextField("field1", "the quick brown fox jumps", Field.Store.YES));
-    doc.add(newTextField("field2", "the quick brown fox jumps", Field.Store.YES));
+    doc = iw1.newDocument();
+    doc.addLargeText("field1", "the quick brown fox jumps");
+    doc.addLargeText("field2", "the quick brown fox jumps");
     iw1.addDocument(doc);
 
     iw1.close();
     rd2 = newDirectory();
     IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig(new MockAnalyzer(random())));
+    fieldTypes = iw2.getFieldTypes();
+    fieldTypes.disableExistsFilters();
 
-    doc = new Document();
-    doc.add(newTextField("field1", "the fox jumps over the lazy dog", Field.Store.YES));
-    doc.add(newTextField("field3", "the fox jumps over the lazy dog", Field.Store.YES));
+    doc = iw2.newDocument();
+    doc.addLargeText("field1", "the fox jumps over the lazy dog");
+    doc.addLargeText("field3", "the fox jumps over the lazy dog");
     iw2.addDocument(doc);
 
     iw2.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index fb0d142..3c91b5d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -27,16 +27,15 @@
 import java.util.Map;
 
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -61,17 +60,21 @@
     Directory ram = newDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
     IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
-    Document d = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("f2");
+
+    Document d = writer.newDocument();
+
     // this field won't have any payloads
-    d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
+    d.addLargeText("f1", "This field has no payloads");
     // this field will have payloads in all docs, however not for all term positions,
     // so this field is used to check if the DocumentWriter correctly enables the payloads bit
     // even if only some term positions have payloads
-    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-    d.add(newTextField("f2", "This field has payloads in all docs NO PAYLOAD", Field.Store.NO));
+    d.addLargeText("f2", "This field has payloads in all docs");
+    d.addLargeText("f2", "This field has payloads in all docs NO PAYLOAD");
     // this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads 
     // enabled in only some documents
-    d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
+    d.addLargeText("f3", "This field has payloads in some docs");
     // only add payload data for field f2
     analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
     writer.addDocument(d);
@@ -90,11 +93,13 @@
     analyzer = new PayloadAnalyzer(); // Clear payload state for each field
     writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
                              .setOpenMode(OpenMode.CREATE));
-    d = new Document();
-    d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
-    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-    d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
+    fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("f2");
+    d = writer.newDocument();
+    d.addLargeText("f1", "This field has no payloads");
+    d.addLargeText("f2", "This field has payloads in all docs");
+    d.addLargeText("f2", "This field has payloads in all docs");
+    d.addLargeText("f3", "This field has payloads in some docs");
     // add payload data for field f2 and f3
     analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
     analyzer.setPayloadData("f3", "somedata".getBytes(StandardCharsets.UTF_8), 0, 3);
@@ -115,15 +120,10 @@
   }
 
   // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
-  public void testPayloadsEncoding() throws Exception {
-    Directory dir = newDirectory();
-    performTest(dir);
-    dir.close();
-  }
-    
   // builds an index with payloads in the given Directory and performs
   // different tests to verify the payload encoding
-  private void performTest(Directory dir) throws Exception {
+  public void testPayloadsEncoding() throws Exception {
+    Directory dir = newDirectory();
     PayloadAnalyzer analyzer = new PayloadAnalyzer();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
                                          .setOpenMode(OpenMode.CREATE)
@@ -145,12 +145,11 @@
     }
     String content = sb.toString();
         
-        
     int payloadDataLength = numTerms * numDocs * 2 + numTerms * numDocs * (numDocs - 1) / 2;
     byte[] payloadData = generateRandomData(payloadDataLength);
         
-    Document d = new Document();
-    d.add(newTextField(fieldName, content, Field.Store.NO));
+    Document d = writer.newDocument();
+    d.addLargeText(fieldName, content);
     // add the same document multiple times to have the same payload lengths for all
     // occurrences within two consecutive skip intervals
     int offset = 0;
@@ -174,7 +173,6 @@
     // flush
     writer.close();
         
-        
     /*
      * Verify the index
      * first we test if all payloads are stored correctly
@@ -267,13 +265,12 @@
                              .setOpenMode(OpenMode.CREATE));
     String singleTerm = "lucene";
         
-    d = new Document();
-    d.add(newTextField(fieldName, singleTerm, Field.Store.NO));
+    d = writer.newDocument();
+    d.addLargeText(fieldName, singleTerm);
     // add a payload whose length is greater than the buffer size of BufferedIndexOutput
     payloadData = generateRandomData(2000);
     analyzer.setPayloadData(fieldName, payloadData, 100, 1500);
     writer.addDocument(d);
-
         
     writer.forceMerge(1);
     // flush
@@ -294,7 +291,7 @@
         
     assertByteArrayEquals(portion, br.bytes, br.offset, br.length);
     reader.close();
-        
+    dir.close();
   }
     
   static final Charset utf8 = StandardCharsets.UTF_8;
@@ -355,7 +352,6 @@
     }
   }    
     
-    
   static class PayloadData {
     byte[] data;
     int offset;
@@ -396,7 +392,6 @@
       return new TokenStreamComponents(ts, tokenStream);
     }
   }
-
     
   /**
    * This Filter adds payloads to the tokens.
@@ -408,7 +403,7 @@
     private String fieldName;
     private PayloadData payloadData;
     private int offset;
-        
+
     public PayloadFilter(TokenStream in, String fieldName, Map<String,PayloadData> fieldToData) {
       super(in);
       this.fieldToData = fieldToData;
@@ -460,8 +455,8 @@
           public void run() {
             try {
               for (int j = 0; j < numDocs; j++) {
-                Document d = new Document();
-                d.add(new TextField(field, new PoolingPayloadTokenStream(pool)));
+                Document d = writer.newDocument();
+                d.addLargeText(field, new PoolingPayloadTokenStream(pool));
                 writer.addDocument(d);
               }
             } catch (Exception e) {
@@ -561,15 +556,15 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
                                                      new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true));
-    Document doc = new Document();
-    doc.add(new TextField("hasMaybepayload", "here we go", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("hasMaybepayload", "here we go");
     writer.addDocument(doc);
     writer.close();
 
     writer = new RandomIndexWriter(random(), dir,
                                    new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true));
-    doc = new Document();
-    doc.add(new TextField("hasMaybepayload2", "here we go", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("hasMaybepayload2", "here we go");
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.forceMerge(1);
@@ -584,22 +579,22 @@
     IndexWriterConfig iwc = newIndexWriterConfig(null);
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    Field field = new TextField("field", "", Field.Store.NO);
+    Document doc = writer.newDocument();
     TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
-    ((Tokenizer)ts).setReader(new StringReader("here we go"));
-    field.setTokenStream(ts);
-    doc.add(field);
+    ((Tokenizer) ts).setReader(new StringReader("here we go"));
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     Token withPayload = new Token("withPayload", 0, 11);
     withPayload.setPayload(new BytesRef("test"));
     ts = new CannedTokenStream(withPayload);
     assertTrue(ts.hasAttribute(PayloadAttribute.class));
-    field.setTokenStream(ts);
+    doc = writer.newDocument();
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("another"));
-    field.setTokenStream(ts);
+    doc = writer.newDocument();
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     LeafReader sr = SlowCompositeReaderWrapper.wrap(reader);
@@ -616,24 +611,20 @@
   public void testMixupMultiValued() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = new TextField("field", "", Field.Store.NO);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = writer.newDocument();
     TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("here we go"));
-    field.setTokenStream(ts);
-    doc.add(field);
-    Field field2 = new TextField("field", "", Field.Store.NO);
+    doc.addLargeText("field", ts);
     Token withPayload = new Token("withPayload", 0, 11);
     withPayload.setPayload(new BytesRef("test"));
     ts = new CannedTokenStream(withPayload);
     assertTrue(ts.hasAttribute(PayloadAttribute.class));
-    field2.setTokenStream(ts);
-    doc.add(field2);
-    Field field3 = new TextField("field", "", Field.Store.NO);
+    doc.addLargeText("field", ts);
     ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("nopayload"));
-    field3.setTokenStream(ts);
-    doc.add(field3);
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     SegmentReader sr = getOnlySegmentReader(reader);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
index 3fa9a81..4715f7f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
@@ -27,9 +27,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -42,29 +40,32 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorPayloads(true);
-    customType.setStoreTermVectorOffsets(random().nextBoolean());
-    Field field = new Field("field", "", customType);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorPayloads("field");
+    if (random().nextBoolean()) {
+      fieldTypes.enableTermVectorOffsets("field");
+    }
+
+    Document doc = writer.newDocument();
     TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("here we go"));
-    field.setTokenStream(ts);
-    doc.add(field);
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     
     Token withPayload = new Token("withPayload", 0, 11);
     withPayload.setPayload(new BytesRef("test"));
     ts = new CannedTokenStream(withPayload);
     assertTrue(ts.hasAttribute(PayloadAttribute.class));
-    field.setTokenStream(ts);
+    doc = writer.newDocument();
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     
     ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("another"));
-    field.setTokenStream(ts);
+    doc = writer.newDocument();
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
     
     DirectoryReader reader = writer.getReader();
@@ -85,30 +86,31 @@
   public void testMixupMultiValued() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorPayloads(true);
-    customType.setStoreTermVectorOffsets(random().nextBoolean());
-    Field field = new Field("field", "", customType);
+
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorPayloads("field");
+    if (random().nextBoolean()) {
+      fieldTypes.enableTermVectorOffsets("field");
+    }
+    fieldTypes.setMultiValued("field");
     TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("here we go"));
-    field.setTokenStream(ts);
-    doc.add(field);
-    Field field2 = new Field("field", "", customType);
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", ts);
+
     Token withPayload = new Token("withPayload", 0, 11);
     withPayload.setPayload(new BytesRef("test"));
     ts = new CannedTokenStream(withPayload);
     assertTrue(ts.hasAttribute(PayloadAttribute.class));
-    field2.setTokenStream(ts);
-    doc.add(field2);
-    Field field3 = new Field("field", "", customType);
+    doc.addLargeText("field", ts);
+
     ts = new MockTokenizer(MockTokenizer.WHITESPACE, true);
     ((Tokenizer)ts).setReader(new StringReader("nopayload"));
-    field3.setTokenStream(ts);
-    doc.add(field3);
+    doc.addLargeText("field", ts);
     writer.addDocument(doc);
+
     DirectoryReader reader = writer.getReader();
     Terms terms = reader.getTermVector(0, "field");
     assert terms != null;
@@ -126,17 +128,12 @@
   public void testPayloadsWithoutPositions() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(false);
-    customType.setStoreTermVectorPayloads(true);
-    customType.setStoreTermVectorOffsets(random().nextBoolean());
-    doc.add(new Field("field", "foo", customType));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
     try {
-      writer.addDocument(doc);
-      fail();
-    } catch (IllegalArgumentException expected) {
+      fieldTypes.enableTermVectorPayloads("field");
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
       // expected
     }
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 6c5c2d3..1990980 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -46,14 +46,14 @@
     iwc.setMergePolicy(fsmp);
     IndexWriter writer = new IndexWriter(dir, iwc);
     for (int x = 0; x < 5; x++) {
-      writer.addDocument(DocHelper.createDocument(x, "1", 2));
+      writer.addDocument(DocHelper.createDocument(writer, x, "1", 2));
       //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
     }
     //System.out.println("commit1");
     writer.commit();
     assertEquals(1, writer.segmentInfos.size());
     for (int x = 5; x < 10; x++) {
-      writer.addDocument(DocHelper.createDocument(x, "2", 2));
+      writer.addDocument(DocHelper.createDocument(writer, x, "2", 2));
       //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
     }
     //System.out.println("commit2");
@@ -61,7 +61,7 @@
     assertEquals(2, writer.segmentInfos.size());
 
     for (int x = 10; x < 15; x++) {
-      writer.addDocument(DocHelper.createDocument(x, "3", 2));
+      writer.addDocument(DocHelper.createDocument(writer, x, "3", 2));
       //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
     }
 
@@ -175,12 +175,12 @@
   **/
   void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
     for (int x = 20; x < 25; x++) {
-      writer.addDocument(DocHelper.createDocument(x, "5", 2));
+      writer.addDocument(DocHelper.createDocument(writer, x, "5", 2));
       //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
     }
     writer.flush(false, false);
     for (int x = 25; x < 30; x++) {
-      writer.addDocument(DocHelper.createDocument(x, "5", 2));
+      writer.addDocument(DocHelper.createDocument(writer, x, "5", 2));
       //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
     }
     writer.flush(false, false);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java
index c94b66a..0454c0d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -82,7 +81,7 @@
     assertEquals(numSnapshots, psdp.getSnapshotCount());
     assertSnapshotExists(dir, psdp, numSnapshots, false);
 
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     snapshots.add(psdp.snapshot());
     assertEquals(numSnapshots+1, psdp.getSnapshots().size());
@@ -129,7 +128,7 @@
       });
     IndexWriter writer = new IndexWriter(dir, getConfig(random(), new PersistentSnapshotDeletionPolicy(
                                          new KeepOnlyLastCommitDeletionPolicy(), dir, OpenMode.CREATE_OR_APPEND)));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
 
     PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index f7eac40..fa14009 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -31,12 +31,7 @@
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -61,22 +56,27 @@
     Directory dir = newDirectory();
     
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Document doc = w.newDocument();
+
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(random().nextBoolean());
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
+      fieldTypes.enableTermVectors("content");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("content");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("content");
+      }
     }
+
     Token[] tokens = new Token[] {
       makeToken("a", 1, 0, 6),
       makeToken("b", 1, 8, 9),
       makeToken("a", 1, 9, 17),
       makeToken("c", 1, 19, 50),
     };
-    doc.add(new Field("content", new CannedTokenStream(tokens), ft));
+    doc.addLargeText("content", new CannedTokenStream(tokens));
 
     w.addDocument(doc);
     IndexReader r = w.getReader();
@@ -130,21 +130,27 @@
     iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy()); // will rely on docids a bit for skipping
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
     
-    FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
-      ft.setStoreTermVectorPositions(random().nextBoolean());
+      fieldTypes.enableTermVectors("numbers");
+      fieldTypes.enableTermVectors("oddeven");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("numbers");
+        fieldTypes.enableTermVectorOffsets("oddeven");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("numbers");
+        fieldTypes.enableTermVectorPositions("oddeven");
+      }
     }
     
     int numDocs = atLeast(500);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new Field("numbers", English.intToEnglish(i), ft));
-      doc.add(new Field("oddeven", (i % 2) == 0 ? "even" : "odd", ft));
-      doc.add(new StringField("id", "" + i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
+      doc.addLargeText("oddeven", (i % 2) == 0 ? "even" : "odd");
+      doc.addAtom("id", "" + i);
       w.addDocument(doc);
     }
     
@@ -157,7 +163,7 @@
       DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
       int doc;
       while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-        String storedNumbers = reader.document(doc).get("numbers");
+        String storedNumbers = reader.document(doc).getString("numbers");
         int freq = dp.freq();
         for (int i = 0; i < freq; i++) {
           dp.nextPosition();
@@ -187,7 +193,7 @@
       assertEquals(num, doc);
       int freq = dp.freq();
       for (int i = 0; i < freq; i++) {
-        String storedNumbers = reader.document(doc).get("numbers");
+        String storedNumbers = reader.document(doc).getString("numbers");
         dp.nextPosition();
         int start = dp.startOffset();
         assert start >= 0;
@@ -222,25 +228,26 @@
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
     final int numDocs = atLeast(20);
     //final int numDocs = atLeast(5);
 
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-
     // TODO: randomize what IndexOptions we use; also test
     // changing this up in one IW buffered segment...:
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
-      ft.setStoreTermVectorPositions(random().nextBoolean());
+      fieldTypes.enableTermVectors("content");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("content");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("content");
+      }
     }
 
     for(int docCount=0;docCount<numDocs;docCount++) {
-      Document doc = new Document();
-      doc.add(new IntField("id", docCount, Field.Store.YES));
-      doc.add(new NumericDocValuesField("id", docCount));
+      Document doc = w.newDocument();
+      doc.addInt("id", docCount);
       List<Token> tokens = new ArrayList<>();
       final int numTokens = atLeast(100);
       //final int numTokens = atLeast(20);
@@ -282,7 +289,7 @@
         offset += offIncr + tokenOffset;
         //System.out.println("  " + token + " posIncr=" + token.getPositionIncrement() + " pos=" + pos + " off=" + token.startOffset() + "/" + token.endOffset() + " (freq=" + postingsByDoc.get(docCount).size() + ")");
       }
-      doc.add(new Field("content", new CannedTokenStream(tokens.toArray(new Token[tokens.size()])), ft));
+      doc.addLargeText("content", new CannedTokenStream(tokens.toArray(new Token[tokens.size()])));
       w.addDocument(doc);
     }
     final DirectoryReader r = w.getReader();
@@ -352,51 +359,18 @@
     dir.close();
   }
   
-  public void testWithUnindexedFields() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-    for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
-      // ensure at least one doc is indexed with offsets
-      if (i < 99 && random().nextInt(2) == 0) {
-        // stored only
-        FieldType ft = new FieldType();
-        ft.setStored(true);
-        doc.add(new Field("foo", "boo!", ft));
-      } else {
-        FieldType ft = new FieldType(TextField.TYPE_STORED);
-        ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-        if (random().nextBoolean()) {
-          // store some term vectors for the checkindex cross-check
-          ft.setStoreTermVectors(true);
-          ft.setStoreTermVectorPositions(true);
-          ft.setStoreTermVectorOffsets(true);
-        }
-        doc.add(new Field("foo", "bar", ft));
-      }
-      riw.addDocument(doc);
-    }
-    CompositeReader ir = riw.getReader();
-    LeafReader slow = SlowCompositeReaderWrapper.wrap(ir);
-    FieldInfos fis = slow.getFieldInfos();
-    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, fis.fieldInfo("foo").getIndexOptions());
-    slow.close();
-    ir.close();
-    riw.close();
-    dir.close();
-  }
-  
   public void testAddFieldTwice() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType customType3 = new FieldType(TextField.TYPE_STORED);
-    customType3.setStoreTermVectors(true);
-    customType3.setStoreTermVectorPositions(true);
-    customType3.setStoreTermVectorOffsets(true);    
-    customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    doc.add(new Field("content3", "here is more content with aaa aaa aaa", customType3));
-    doc.add(new Field("content3", "here is more content with aaa aaa aaa", customType3));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("content3");
+    fieldTypes.enableTermVectorPositions("content3");
+    fieldTypes.enableTermVectorOffsets("content3");
+    fieldTypes.setMultiValued("content3");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("content3", "here is more content with aaa aaa aaa");
+    doc.addLargeText("content3", "here is more content with aaa aaa aaa");
     iw.addDocument(doc);
     iw.close();
     dir.close(); // checkindex
@@ -471,13 +445,11 @@
     };
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     iw.addDocument(doc);
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-      doc.add(new Field("foo", "bar", ft));
-      doc.add(new Field("foo", "bar", ft));
+      doc.addLargeText("foo", "bar");
+      doc.addLargeText("foo", "bar");
       iw.addDocument(doc);
       fail("didn't get expected exception");
     } catch (IllegalArgumentException expected) {}
@@ -493,7 +465,7 @@
   public void testLegalbutVeryLargeOffsets() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     Token t1 = new Token("foo", 0, Integer.MAX_VALUE-500);
     if (random().nextBoolean()) {
       t1.setPayload(new BytesRef("test"));
@@ -502,14 +474,12 @@
     TokenStream tokenStream = new CannedTokenStream(
         new Token[] { t1, t2 }
     );
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    FieldTypes fieldTypes = iw.getFieldTypes();
     // store some term vectors for the checkindex cross-check
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(true);
-    ft.setStoreTermVectorOffsets(true);
-    Field field = new Field("foo", tokenStream, ft);
-    doc.add(field);
+    fieldTypes.enableTermVectors("foo");
+    fieldTypes.enableTermVectorPositions("foo");
+    fieldTypes.enableTermVectorOffsets("foo");
+    doc.addLargeText("foo", tokenStream);
     iw.addDocument(doc);
     iw.close();
     dir.close();
@@ -519,18 +489,18 @@
   private void checkTokens(Token[] field1, Token[] field2) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = riw.getFieldTypes();
+
     boolean success = false;
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
       // store some term vectors for the checkindex cross-check
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(true);
-      ft.setStoreTermVectorOffsets(true);
+      fieldTypes.enableTermVectors("body");
+      fieldTypes.enableTermVectorPositions("body");
+      fieldTypes.enableTermVectorOffsets("body");
      
-      Document doc = new Document();
-      doc.add(new Field("body", new CannedTokenStream(field1), ft));
-      doc.add(new Field("body", new CannedTokenStream(field2), ft));
+      Document doc = riw.newDocument();
+      doc.addLargeText("body", new CannedTokenStream(field1));
+      doc.addLargeText("body", new CannedTokenStream(field2));
       riw.addDocument(doc);
       riw.close();
       success = true;
@@ -546,17 +516,16 @@
   private void checkTokens(Token[] tokens) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = riw.getFieldTypes();
     boolean success = false;
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
       // store some term vectors for the checkindex cross-check
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(true);
-      ft.setStoreTermVectorOffsets(true);
+      fieldTypes.enableTermVectors("body");
+      fieldTypes.enableTermVectorPositions("body");
+      fieldTypes.enableTermVectorOffsets("body");
      
-      Document doc = new Document();
-      doc.add(new Field("body", new CannedTokenStream(tokens), ft));
+      Document doc = riw.newDocument();
+      doc.addLargeText("body", new CannedTokenStream(tokens));
       riw.addDocument(doc);
       riw.close();
       success = true;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java b/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java
index 55258c6..1c98788 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.store.AlreadyClosedException;
@@ -43,15 +42,12 @@
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
           .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
     
-    Document doc = new Document();
-    Field field = newStringField("field", "", Field.Store.NO);
-    doc.add(field);
-
     // we generate aweful prefixes: good for testing.
     // but for preflex codec, the test can be very slow, so use less iterations.
     int num = atLeast(10);
     for (int i = 0; i < num; i++) {
-      field.setStringValue(TestUtil.randomUnicodeString(random(), 10));
+      Document doc = writer.newDocument();
+      doc.addAtom("field", TestUtil.randomUnicodeString(random(), 10));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReindexingReader.java b/lucene/core/src/test/org/apache/lucene/index/TestReindexingReader.java
new file mode 100644
index 0000000..91492d7
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestReindexingReader.java
@@ -0,0 +1,1054 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.TestUtil;
+
+// TODO:
+//   - old parallel indices are only pruned on commit/close; can we do it on refresh?
+
+/** Simple example showing how to use ParallelLeafReader to index new
+ *  stuff (postings, DVs, etc.) from previously stored fields, on the
+ *  fly (during NRT reader reopen), after the  initial indexing.  The
+ *  test indexes just a single stored field with text "content X" (X is
+ *  a number embedded in the text).
+ *
+ *  Then, on reopen, for any newly created segments (flush or merge), it
+ *  builds a new parallel segment by loading all stored docs, parsing
+ *  out that X, and adding it as DV and numeric indexed (trie) field.
+ *
+ *  Finally, for searching, it builds a top-level MultiReader, with
+ *  ParallelLeafReader for each segment, and then tests that random
+ *  numeric range queries, and sorting by the new DV field, work
+ *  correctly.
+ *
+ *  Each per-segment index lives in a private directory next to the main
+ *  index, and they are deleted once their segments are removed from the
+ *  index.  They are "volatile", meaning if e.g. the index is replicated to
+ *  another machine, it's OK to not copy parallel segments indices,
+ *  since they will just be regnerated (at a cost though). */
+
+public class TestReindexingReader extends LuceneTestCase {
+
+  static final boolean DEBUG = false;
+
+  private ReindexingReader getReindexer(Path root) throws IOException {
+    return new ReindexingReader(newFSDirectory(root), root.resolve("segs")) {
+      @Override
+      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+        TieredMergePolicy tmp = new TieredMergePolicy();
+        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
+        tmp.setFloorSegmentMB(.01);
+        iwc.setMergePolicy(tmp);
+        return iwc;
+      }
+
+      @Override
+      protected Directory openDirectory(Path path) throws IOException {
+        MockDirectoryWrapper dir = newMockFSDirectory(path);
+        dir.setUseSlowOpenClosers(false);
+        dir.setThrottling(Throttling.NEVER);
+        return dir;
+      }
+
+      @Override
+      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+
+        // The order of our docIDs must precisely matching incoming reader:
+        iwc.setMergePolicy(new LogByteSizeMergePolicy());
+        IndexWriter w = new IndexWriter(parallelDir, iwc);
+        int maxDoc = reader.maxDoc();
+
+        // Slowly parse the stored field into a new doc values field:
+        for(int i=0;i<maxDoc;i++) {
+          // TODO: is this still O(blockSize^2)?
+          Document oldDoc = reader.document(i);
+          Document newDoc = w.newDocument();
+          long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+          newDoc.addLong("number", value);
+          w.addDocument(newDoc);
+        }
+
+        if (random().nextBoolean()) {
+          w.forceMerge(1);
+        }
+
+        w.close();
+      }
+
+      @Override
+      protected long getCurrentSchemaGen() {
+        return 0;
+      }
+    };
+  }
+
+  /** Schema change by adding a new number_<schemaGen> DV field each time. */
+  private ReindexingReader getReindexerNewDVFields(Path root, final AtomicLong currentSchemaGen) throws IOException {
+    return new ReindexingReader(newFSDirectory(root), root.resolve("segs")) {
+      @Override
+      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+        TieredMergePolicy tmp = new TieredMergePolicy();
+        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
+        tmp.setFloorSegmentMB(.01);
+        iwc.setMergePolicy(tmp);
+        return iwc;
+      }
+
+      @Override
+      protected Directory openDirectory(Path path) throws IOException {
+        MockDirectoryWrapper dir = newMockFSDirectory(path);
+        dir.setUseSlowOpenClosers(false);
+        dir.setThrottling(Throttling.NEVER);
+        return dir;
+      }
+
+      @Override
+      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+
+        // The order of our docIDs must precisely matching incoming reader:
+        iwc.setMergePolicy(new LogByteSizeMergePolicy());
+        IndexWriter w = new IndexWriter(parallelDir, iwc);
+        int maxDoc = reader.maxDoc();
+
+        if (oldSchemaGen <= 0) {
+          // Must slowly parse the stored field into a new doc values field:
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+            newDoc.addLong("number_" + newSchemaGen, value);
+            newDoc.addLong("number", value);
+            w.addDocument(newDoc);
+          }
+        } else {
+          // Just carry over doc values from previous field:
+          NumericDocValues oldValues = reader.getNumericDocValues("number_" + oldSchemaGen);
+          assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            newDoc.addLong("number_" + newSchemaGen, oldValues.get(i));
+            w.addDocument(newDoc);
+          }
+        }
+
+        if (random().nextBoolean()) {
+          w.forceMerge(1);
+        }
+
+        w.close();
+      }
+
+      @Override
+      protected long getCurrentSchemaGen() {
+        return currentSchemaGen.get();
+      }
+
+      @Override
+      protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen) throws IOException {
+        String fieldName = "number_" + schemaGen;
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check parallel number DVs field=" + fieldName + " r=" + r + " parR=" + parR);
+        NumericDocValues numbers = parR.getNumericDocValues(fieldName);
+        if (numbers == null) {
+          return;
+        }
+        int maxDoc = r.maxDoc();
+        boolean failed = false;
+        for(int i=0;i<maxDoc;i++) {
+          Document oldDoc = r.document(i);
+          long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+          if (value != numbers.get(i)) {
+            if (DEBUG) System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
+            failed = true;
+          } else if (failed) {
+            if (DEBUG) System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
+          }
+        }
+        assertFalse("FAILED field=" + fieldName + " r=" + r, failed);
+      }
+    };
+  }
+
+  /** Schema change by adding changing how the same "number" DV field is indexed. */
+  private ReindexingReader getReindexerSameDVField(Path root, final AtomicLong currentSchemaGen, final AtomicLong mergingSchemaGen) throws IOException {
+    return new ReindexingReader(newFSDirectory(root), root.resolve("segs")) {
+      @Override
+      protected IndexWriterConfig getIndexWriterConfig() throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+        TieredMergePolicy tmp = new TieredMergePolicy();
+        // We write tiny docs, so we need tiny floor to avoid O(N^2) merging:
+        tmp.setFloorSegmentMB(.01);
+        iwc.setMergePolicy(tmp);
+        if (TEST_NIGHTLY) {
+          // during nightly tests, we might use too many files if we arent careful
+          iwc.setUseCompoundFile(true);
+        }
+        return iwc;
+      }
+
+      @Override
+      protected Directory openDirectory(Path path) throws IOException {
+        MockDirectoryWrapper dir = newMockFSDirectory(path);
+        dir.setUseSlowOpenClosers(false);
+        dir.setThrottling(Throttling.NEVER);
+        return dir;
+      }
+
+      @Override
+      protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
+        IndexWriterConfig iwc = newIndexWriterConfig();
+
+        // The order of our docIDs must precisely matching incoming reader:
+        iwc.setMergePolicy(new LogByteSizeMergePolicy());
+        IndexWriter w = new IndexWriter(parallelDir, iwc);
+        int maxDoc = reader.maxDoc();
+
+        if (oldSchemaGen <= 0) {
+          // Must slowly parse the stored field into a new doc values field:
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+            newDoc.addLong("number", newSchemaGen*value);
+            w.addDocument(newDoc);
+          }
+        } else {
+          // Just carry over doc values from previous field:
+          NumericDocValues oldValues = reader.getNumericDocValues("number");
+          assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            newDoc.addLong("number", newSchemaGen*(oldValues.get(i)/oldSchemaGen));
+            w.addDocument(newDoc);
+          }
+        }
+
+        if (random().nextBoolean()) {
+          w.forceMerge(1);
+        }
+
+        w.close();
+      }
+
+      @Override
+      protected long getCurrentSchemaGen() {
+        return currentSchemaGen.get();
+      }
+
+      @Override
+      protected long getMergingSchemaGen() {
+        return mergingSchemaGen.get();
+      }
+
+      @Override
+      protected void checkParallelReader(LeafReader r, LeafReader parR, long schemaGen) throws IOException {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: now check parallel number DVs r=" + r + " parR=" + parR);
+        NumericDocValues numbers = parR.getNumericDocValues("numbers");
+        if (numbers == null) {
+          return;
+        }
+        int maxDoc = r.maxDoc();
+        boolean failed = false;
+        for(int i=0;i<maxDoc;i++) {
+          Document oldDoc = r.document(i);
+          long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+          value *= schemaGen;
+          if (value != numbers.get(i)) {
+            System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
+            failed = true;
+          } else if (failed) {
+            System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
+          }
+        }
+        assertFalse("FAILED r=" + r, failed);
+      }
+    };
+  }
+
+  public void testBasicMultipleSchemaGens() throws Exception {
+
+    AtomicLong currentSchemaGen = new AtomicLong();
+
+    // TODO: separate refresh thread, search threads, indexing threads
+    ReindexingReader reindexer = getReindexerNewDVFields(createTempDir(), currentSchemaGen);
+    reindexer.commit();
+
+    Document doc = reindexer.w.newDocument();
+    doc.addLargeText("text", "number " + random().nextLong());
+    reindexer.w.addDocument(doc);
+
+    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: refresh @ 1 doc");
+    reindexer.mgr.maybeRefresh();
+    DirectoryReader r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+    //reindexer.printRefCounts();
+
+    currentSchemaGen.incrementAndGet();
+
+    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: increment schemaGen");
+    if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST: commit");
+    reindexer.commit();
+
+    doc = reindexer.w.newDocument();
+    doc.addLargeText("text", "number " + random().nextLong());
+    reindexer.w.addDocument(doc);
+
+    if (DEBUG) System.out.println("TEST: refresh @ 2 docs");
+    reindexer.mgr.maybeRefresh();
+    //reindexer.printRefCounts();
+    r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println("TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+
+    if (DEBUG) System.out.println("TEST: forceMerge");
+    reindexer.w.forceMerge(1);
+
+    currentSchemaGen.incrementAndGet();
+
+    if (DEBUG) System.out.println("TEST: commit");
+    reindexer.commit();
+
+    if (DEBUG) System.out.println("TEST: refresh after forceMerge");
+    reindexer.mgr.maybeRefresh();
+    r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println("TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+
+    if (DEBUG) System.out.println("TEST: close writer");
+    reindexer.close();
+    reindexer.indexDir.close();
+  }
+
+  public void testRandomMultipleSchemaGens() throws Exception {
+
+    AtomicLong currentSchemaGen = new AtomicLong();
+    ReindexingReader reindexer = null;
+
+    // TODO: separate refresh thread, search threads, indexing threads
+    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
+    int maxID = 0;
+    Path root = createTempDir();
+    int refreshEveryNumDocs = 100;
+    int commitCloseNumDocs = 1000;
+    for(int i=0;i<numDocs;i++) {
+      if (reindexer == null) {
+        reindexer = getReindexerNewDVFields(root, currentSchemaGen);
+      }
+
+      Document doc = reindexer.w.newDocument();
+      String id;
+      String updateID;
+      if (maxID > 0 && random().nextInt(10) == 7) {
+        // Replace a doc
+        id = "" + random().nextInt(maxID);
+        updateID = id;
+      } else {
+        id = "" + (maxID++);
+        updateID = null;
+      }
+        
+      doc.addAtom("id", id);
+      doc.addLargeText("text", "number " + random().nextLong());
+      if (updateID == null) {
+        reindexer.w.addDocument(doc);
+      } else {
+        reindexer.w.updateDocument(new Term("id", updateID), doc);
+      }
+      if (random().nextInt(refreshEveryNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: refresh @ " + (i+1) + " docs");
+        reindexer.mgr.maybeRefresh();
+
+        DirectoryReader r = reindexer.mgr.acquire();
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: got reader=" + r);
+        try {
+          checkAllNumberDVs(r, "number_" + currentSchemaGen.get(), true, 1);
+        } finally {
+          reindexer.mgr.release(r);
+        }
+        if (DEBUG) reindexer.printRefCounts();
+        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
+      }
+
+      if (random().nextInt(500) == 17) {
+        currentSchemaGen.incrementAndGet();
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance schemaGen to " + currentSchemaGen);
+      }
+
+      if (i > 0 && random().nextInt(10) == 7) {
+        // Random delete:
+        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
+      }
+
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: commit @ " + (i+1) + " docs");
+        reindexer.commit();
+        //reindexer.printRefCounts();
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+
+      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: close writer @ " + (i+1) + " docs");
+        reindexer.close();
+        reindexer.indexDir.close();
+        reindexer = null;
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+    }
+
+    if (reindexer != null) {
+      reindexer.close();
+      reindexer.indexDir.close();
+    }
+  }
+
+  /** First schema change creates a new "number" DV field off the stored field; subsequent changes just change the value of that number
+   *  field for all docs. */
+  public void testRandomMultipleSchemaGensSameField() throws Exception {
+
+    AtomicLong currentSchemaGen = new AtomicLong();
+    AtomicLong mergingSchemaGen = new AtomicLong();
+
+    ReindexingReader reindexer = null;
+
+    // TODO: separate refresh thread, search threads, indexing threads
+    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
+    int maxID = 0;
+    Path root = createTempDir();
+    int refreshEveryNumDocs = 100;
+    int commitCloseNumDocs = 1000;
+
+    for(int i=0;i<numDocs;i++) {
+      if (reindexer == null) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: open new reader/writer");
+        reindexer = getReindexerSameDVField(root, currentSchemaGen, mergingSchemaGen);
+      }
+
+      Document doc = reindexer.w.newDocument();
+      String id;
+      String updateID;
+      if (maxID > 0 && random().nextInt(10) == 7) {
+        // Replace a doc
+        id = "" + random().nextInt(maxID);
+        updateID = id;
+      } else {
+        id = "" + (maxID++);
+        updateID = null;
+      }
+        
+      doc.addAtom("id", id);
+      doc.addLargeText("text", "number " + TestUtil.nextInt(random(), -10000, 10000));
+      if (updateID == null) {
+        reindexer.w.addDocument(doc);
+      } else {
+        reindexer.w.updateDocument(new Term("id", updateID), doc);
+      }
+      if (random().nextInt(refreshEveryNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: refresh @ " + (i+1) + " docs");
+        reindexer.mgr.maybeRefresh();
+        DirectoryReader r = reindexer.mgr.acquire();
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: got reader=" + r);
+        try {
+          checkAllNumberDVs(r, "number", true, (int) currentSchemaGen.get());
+        } finally {
+          reindexer.mgr.release(r);
+        }
+        if (DEBUG) reindexer.printRefCounts();
+        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
+      }
+
+      if (random().nextInt(500) == 17) {
+        currentSchemaGen.incrementAndGet();
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance schemaGen to " + currentSchemaGen);
+        if (random().nextBoolean()) {
+          mergingSchemaGen.incrementAndGet();
+          if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: advance mergingSchemaGen to " + mergingSchemaGen);
+        }
+      }
+
+      if (i > 0 && random().nextInt(10) == 7) {
+        // Random delete:
+        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
+      }
+
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: commit @ " + (i+1) + " docs");
+        reindexer.commit();
+        //reindexer.printRefCounts();
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+
+      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println(Thread.currentThread().getName() + ": TEST TOP: close writer @ " + (i+1) + " docs");
+        reindexer.close();
+        reindexer.indexDir.close();
+        reindexer = null;
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+    }
+
+    if (reindexer != null) {
+      reindexer.close();
+      reindexer.indexDir.close();
+    }
+    Directory dir = newFSDirectory(root.resolve("index"));
+
+    if (DirectoryReader.indexExists(dir)) {
+      // Verify main index never reflects schema changes beyond mergingSchemaGen:
+      try (IndexReader r = DirectoryReader.open(dir)) {
+        for (LeafReaderContext ctx : r.leaves()) {
+          LeafReader leaf = ctx.reader();
+          NumericDocValues numbers = leaf.getNumericDocValues("number");
+          if (numbers != null) {
+            int maxDoc = leaf.maxDoc();
+            for(int i=0;i<maxDoc;i++) {
+              Document doc = leaf.document(i);
+              long value = Long.parseLong(doc.getString("text").split(" ")[1]);
+              long dvValue = numbers.get(i);
+              if (value == 0) {
+                assertEquals(0, dvValue);
+              } else {
+                assertTrue(dvValue % value == 0);
+                assertTrue(dvValue / value <= mergingSchemaGen.get());
+              }
+            }
+          }
+        }
+      }
+    }
+    dir.close();
+  }
+
+  public void testBasic() throws Exception {
+    ReindexingReader reindexer = getReindexer(createTempDir());
+
+    // Start with initial empty commit:
+    reindexer.commit();
+
+    Document doc = reindexer.w.newDocument();
+    doc.addLargeText("text", "number " + random().nextLong());
+    reindexer.w.addDocument(doc);
+
+    if (DEBUG) System.out.println("TEST: refresh @ 1 doc");
+    reindexer.mgr.maybeRefresh();
+    DirectoryReader r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println("TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r);
+      IndexSearcher s = newSearcher(r);
+      testNumericDVSort(s);
+      testRanges(s);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+    //reindexer.printRefCounts();
+
+    if (DEBUG) System.out.println("TEST: commit");
+    reindexer.commit();
+
+    doc = reindexer.w.newDocument();
+    doc.addLargeText("text", "number " + random().nextLong());
+    reindexer.w.addDocument(doc);
+
+    if (DEBUG) System.out.println("TEST: refresh @ 2 docs");
+    reindexer.mgr.maybeRefresh();
+    //reindexer.printRefCounts();
+    r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println("TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r);
+      IndexSearcher s = newSearcher(r);
+      testNumericDVSort(s);
+      testRanges(s);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+
+    if (DEBUG) System.out.println("TEST: forceMerge");
+    reindexer.w.forceMerge(1);
+
+    if (DEBUG) System.out.println("TEST: commit");
+    reindexer.commit();
+
+    if (DEBUG) System.out.println("TEST: refresh after forceMerge");
+    reindexer.mgr.maybeRefresh();
+    r = reindexer.mgr.acquire();
+    if (DEBUG) System.out.println("TEST: got reader=" + r);
+    try {
+      checkAllNumberDVs(r);
+      IndexSearcher s = newSearcher(r);
+      testNumericDVSort(s);
+      testRanges(s);
+    } finally {
+      reindexer.mgr.release(r);
+    }
+
+    if (DEBUG) System.out.println("TEST: close writer");
+    reindexer.close();
+    reindexer.indexDir.close();
+  }
+
+  public void testRandom() throws Exception {
+    Path root = createTempDir();
+    ReindexingReader reindexer = null;
+
+    // TODO: separate refresh thread, search threads, indexing threads
+    int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
+    int maxID = 0;
+    int refreshEveryNumDocs = 100;
+    int commitCloseNumDocs = 1000;
+    for(int i=0;i<numDocs;i++) {
+      if (reindexer == null) {
+        if (DEBUG) System.out.println("TEST: open writer @ " + (i+1) + " docs");
+        reindexer = getReindexer(root);
+      }
+
+      Document doc = reindexer.w.newDocument();
+      String id;
+      String updateID;
+      if (maxID > 0 && random().nextInt(10) == 7) {
+        // Replace a doc
+        id = "" + random().nextInt(maxID);
+        updateID = id;
+      } else {
+        id = "" + (maxID++);
+        updateID = null;
+      }
+        
+      doc.addAtom("id", id);
+      doc.addLargeText("text", "number " + random().nextLong());
+      if (updateID == null) {
+        reindexer.w.addDocument(doc);
+      } else {
+        reindexer.w.updateDocument(new Term("id", updateID), doc);
+      }
+
+      if (random().nextInt(refreshEveryNumDocs) == 17) {
+        if (DEBUG) System.out.println("TEST: refresh @ " + (i+1) + " docs");
+        reindexer.mgr.maybeRefresh();
+        DirectoryReader r = reindexer.mgr.acquire();
+        if (DEBUG) System.out.println("TEST: got reader=" + r);
+        try {
+          checkAllNumberDVs(r);
+          IndexSearcher s = newSearcher(r);
+          testNumericDVSort(s);
+          testRanges(s);
+        } finally {
+          reindexer.mgr.release(r);
+        }
+        refreshEveryNumDocs = (int) (1.25 * refreshEveryNumDocs);
+      }
+
+      if (i > 0 && random().nextInt(10) == 7) {
+        // Random delete:
+        reindexer.w.deleteDocuments(new Term("id", ""+random().nextInt(i)));
+      }
+
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println("TEST: commit @ " + (i+1) + " docs");
+        reindexer.commit();
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+
+      // Sometimes close & reopen writer/manager, to confirm the parallel segments persist:
+      if (random().nextInt(commitCloseNumDocs) == 17) {
+        if (DEBUG) System.out.println("TEST: close writer @ " + (i+1) + " docs");
+        reindexer.close();
+        reindexer.indexDir.close();
+        reindexer = null;
+        commitCloseNumDocs = (int) (1.25 * commitCloseNumDocs);
+      }
+    }
+    if (reindexer != null) {
+      reindexer.close();
+      reindexer.indexDir.close();
+    }
+  }
+
+  private static void checkAllNumberDVs(IndexReader r) throws IOException {
+    checkAllNumberDVs(r, "number", true, 1);
+  }
+
+  private static void checkAllNumberDVs(IndexReader r, String fieldName, boolean doThrow, int multiplier) throws IOException {
+    NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName);
+    int maxDoc = r.maxDoc();
+    boolean failed = false;
+    long t0 = System.currentTimeMillis();
+    for(int i=0;i<maxDoc;i++) {
+      Document oldDoc = r.document(i);
+      long value = multiplier * Long.parseLong(oldDoc.getString("text").split(" ")[1]);
+      if (value != numbers.get(i)) {
+        System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
+        failed = true;
+      } else if (failed) {
+        System.out.println("OK: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i));
+      }
+    }
+    if (failed) {
+      if (r instanceof LeafReader == false) {
+        System.out.println("TEST FAILED; check leaves");
+        for(LeafReaderContext ctx : r.leaves()) {
+          System.out.println("CHECK LEAF=" + ctx.reader());
+          checkAllNumberDVs(ctx.reader(), fieldName, false, 1);
+        }
+      }
+      if (doThrow) {
+        assertFalse("FAILED field=" + fieldName + " r=" + r, failed);
+      } else {
+        System.out.println("FAILED field=" + fieldName + " r=" + r);
+      }
+    }
+  }
+
+  private static void testNumericDVSort(IndexSearcher s) throws IOException {
+    // Confirm we can sort by the new DV field:
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 100, new Sort(new SortField("number", SortField.Type.LONG)));
+    NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
+    long last = Long.MIN_VALUE;
+    for(ScoreDoc scoreDoc : hits.scoreDocs) {
+      long value = Long.parseLong(s.doc(scoreDoc.doc).getString("text").split(" ")[1]);
+      assertTrue(value >= last);
+      assertEquals(value, numbers.get(scoreDoc.doc));
+      last = value;
+    }
+  }
+
+  private static void testRanges(IndexSearcher s) throws IOException {
+    NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
+    FieldTypes fieldTypes = s.getFieldTypes();
+    for(int i=0;i<100;i++) {
+      // Confirm we can range search by the new indexed (numeric) field:
+      long min = random().nextLong();
+      long max = random().nextLong();
+      if (min > max) {
+        long x = min;
+        min = max;
+        max = x;
+      }
+
+      TopDocs hits = s.search(new ConstantScoreQuery(fieldTypes.newLongRangeFilter("number", min, true, max, true)), 100);
+      for(ScoreDoc scoreDoc : hits.scoreDocs) {
+        long value = Long.parseLong(s.doc(scoreDoc.doc).getString("text").split(" ")[1]);
+        assertTrue(value >= min);
+        assertTrue(value <= max);
+        assertEquals(value, numbers.get(scoreDoc.doc));
+      }
+    }
+  }
+
+  // TODO: test exceptions
+
+  public void testSwitchToDocValues() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    // Test relies on doc order:
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    int numDocs = atLeast(1000);
+    long[] numbers = new long[numDocs];
+    if (VERBOSE) {
+      System.out.println("numDocs=" + numDocs);
+    }
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      long number = random().nextLong();
+      numbers[i] = number;
+      doc.addStoredLong("number", number);
+      w.addDocument(doc);
+      // Make sure we have at least 2 segments, else forceMerge won't do anything:
+      if (i == numDocs/2) {
+        w.commit();
+      }
+    }
+    w.close();
+
+    ReindexingReader r = new ReindexingReader(dir, createTempDir()) {
+        @Override
+        protected long getCurrentSchemaGen() {
+          return 0;
+        }
+
+        @Override
+        protected IndexWriterConfig getIndexWriterConfig() {
+          // Test relies on doc order:
+          return newIndexWriterConfig().setMergePolicy(new AlwaysForceMergePolicy(newLogMergePolicy()));
+        }
+
+        @Override
+        protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
+          assertEquals(0L, newSchemaGen);
+          assertEquals(-1L, oldSchemaGen);
+
+          IndexWriterConfig iwc = newIndexWriterConfig();
+          if (VERBOSE) {
+            System.out.println("TEST: build parallel " + reader);
+          }
+
+          // The order of our docIDs must precisely matching incoming reader:
+          iwc.setMergePolicy(new LogByteSizeMergePolicy());
+          IndexWriter w = new IndexWriter(parallelDir, iwc);
+          int maxDoc = reader.maxDoc();
+
+          // Slowly parse the stored field into a new doc values field:
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            newDoc.addLong("numberDV", oldDoc.getLong("number").longValue());
+            w.addDocument(newDoc);
+          }
+
+          if (random().nextBoolean()) {
+            w.forceMerge(1);
+          }
+
+          w.close();
+        }
+      };
+    
+    if (VERBOSE) {
+      System.out.println("TEST: now force merge");
+    }
+
+    r.w.forceMerge(1);
+    // Make sure main + parallel index was fully updated:
+    DirectoryReader dr = r.mgr.acquire();
+    if (VERBOSE) {
+      System.out.println("TEST: dr=" + dr);
+    }
+    try {
+      NumericDocValues dv = MultiDocValues.getNumericValues(dr, "numberDV");
+      for(int i=0;i<numDocs;i++) {
+        assertEquals(numbers[i], dv.get(i));
+      }
+    } finally {
+      r.mgr.release(dr);
+    }
+    r.close();
+
+    // Make sure main index was fully updated:
+    dr = DirectoryReader.open(dir);
+    if (VERBOSE) {
+      System.out.println("TEST: final reader " + dr);
+    }
+    NumericDocValues dv = MultiDocValues.getNumericValues(dr, "numberDV");
+    assertNotNull(dv);
+    for(int i=0;i<numDocs;i++) {
+      assertEquals("docID=" + i, numbers[i], dv.get(i));
+    }
+
+    dr.close();
+    dir.close();
+  }
+
+  public void testSwitchToSortedSetDocValues() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    // Test relies on doc order:
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    int numDocs = atLeast(1000);
+    String[] strings = new String[2*numDocs];
+    if (VERBOSE) {
+      System.out.println("numDocs=" + numDocs);
+    }
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      String s = TestUtil.randomRealisticUnicodeString(random());
+      String s2 = TestUtil.randomRealisticUnicodeString(random());
+      strings[2*i] = s;
+      strings[2*i+1] = s2;
+      doc.addStoredString("field", s);
+      doc.addStoredString("field", s2);
+      w.addDocument(doc);
+      // Make sure we have at least 2 segments, else forceMerge won't do anything:
+      if (i == numDocs/2) {
+        w.commit();
+      }
+    }
+    w.close();
+
+    ReindexingReader r = new ReindexingReader(dir, createTempDir()) {
+        @Override
+        protected long getCurrentSchemaGen() {
+          return 0;
+        }
+
+        @Override
+        protected IndexWriterConfig getIndexWriterConfig() {
+          // Test relies on doc order:
+          return newIndexWriterConfig().setMergePolicy(new AlwaysForceMergePolicy(newLogMergePolicy()));
+        }
+
+        @Override
+        protected void reindex(long oldSchemaGen, long newSchemaGen, LeafReader reader, Directory parallelDir) throws IOException {
+          assertEquals(0L, newSchemaGen);
+          assertEquals(-1L, oldSchemaGen);
+
+          IndexWriterConfig iwc = newIndexWriterConfig();
+          if (VERBOSE) {
+            System.out.println("TEST: build parallel " + reader);
+          }
+
+          // The order of our docIDs must precisely matching incoming reader:
+          iwc.setMergePolicy(new LogByteSizeMergePolicy());
+          IndexWriter w = new IndexWriter(parallelDir, iwc);
+          int maxDoc = reader.maxDoc();
+          FieldTypes fieldTypes = w.getFieldTypes();
+          fieldTypes.setMultiValued("fieldDV");
+
+          // Slowly parse the stored field into a new doc values field:
+          for(int i=0;i<maxDoc;i++) {
+            // TODO: is this still O(blockSize^2)?
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
+            for(String s : oldDoc.getStrings("field")) {
+              newDoc.addAtom("fieldDV", s);
+            }
+            w.addDocument(newDoc);
+          }
+
+          if (random().nextBoolean()) {
+            w.forceMerge(1);
+          }
+
+          w.close();
+        }
+      };
+    
+    if (VERBOSE) {
+      System.out.println("TEST: now force merge");
+    }
+
+    r.w.forceMerge(1);
+    // Make sure main + parallel index was fully updated:
+    DirectoryReader dr = r.mgr.acquire();
+    if (VERBOSE) {
+      System.out.println("TEST: dr=" + dr);
+    }
+    try {
+      SortedSetDocValues dv = MultiDocValues.getSortedSetValues(dr, "fieldDV");
+      assertNotNull(dv);
+      for(int i=0;i<numDocs;i++) {
+        dv.setDocument(i);
+        long ord = dv.nextOrd();
+        assert ord != SortedSetDocValues.NO_MORE_ORDS;
+        String v = dv.lookupOrd(ord).utf8ToString();
+
+        long ord2 = dv.nextOrd();
+        if (ord2 == SortedSetDocValues.NO_MORE_ORDS) {
+          assertTrue(v.equals(strings[2*i]));
+          assertTrue(v.equals(strings[2*i+1]));
+        } else {
+          assert dv.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
+          String v2 = dv.lookupOrd(ord2).utf8ToString();
+          assertTrue((v.equals(strings[2*i]) && v2.equals(strings[2*i+1])) ||
+                     (v.equals(strings[2*i+1]) && v2.equals(strings[2*i])));
+        }
+      }
+    } finally {
+      r.mgr.release(dr);
+    }
+    r.close();
+
+    // Make sure main index was fully updated:
+    dr = DirectoryReader.open(dir);
+    if (VERBOSE) {
+      System.out.println("TEST: final reader " + dr);
+    }
+    SortedSetDocValues dv = MultiDocValues.getSortedSetValues(dr, "fieldDV");
+    assertNotNull(dv);
+    for(int i=0;i<numDocs;i++) {
+      dv.setDocument(i);
+      long ord = dv.nextOrd();
+      assert ord != SortedSetDocValues.NO_MORE_ORDS;
+      String v = dv.lookupOrd(ord).utf8ToString();
+
+      long ord2 = dv.nextOrd();
+      if (ord2 == SortedSetDocValues.NO_MORE_ORDS) {
+        assertTrue(v.equals(strings[2*i]));
+        assertTrue(v.equals(strings[2*i+1]));
+      } else {
+        assert dv.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
+        String v2 = dv.lookupOrd(ord2).utf8ToString();
+        assertTrue((v.equals(strings[2*i]) && v2.equals(strings[2*i+1])) ||
+                   (v.equals(strings[2*i+1]) && v2.equals(strings[2*i])));
+      }
+    }
+
+    dr.close();
+    dir.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestRollback.java b/lucene/core/src/test/org/apache/lucene/index/TestRollback.java
index 8232ee5..cf7ed11 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestRollback.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestRollback.java
@@ -19,7 +19,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -30,8 +29,8 @@
     Directory dir = newDirectory();
     RandomIndexWriter rw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 5; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("pk", Integer.toString(i), Field.Store.YES));
+      Document doc = rw.newDocument();
+      doc.addAtom("pk", Integer.toString(i));
       rw.addDocument(doc);
     }
     rw.close();
@@ -42,10 +41,10 @@
                                            .setOpenMode(IndexWriterConfig.OpenMode.APPEND));
 
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String value = Integer.toString(i);
-      doc.add(newStringField("pk", value, Field.Store.YES));
-      doc.add(newStringField("text", "foo", Field.Store.YES));
+      doc.addAtom("pk", value);
+      doc.addAtom("text", "foo");
       w.updateDocument(new Term("pk", value), doc);
     }
     w.rollback();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
index 9919af5..3656f2f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
@@ -44,8 +44,6 @@
       ((MockDirectoryWrapper)dir).setEnableVirusScanner(false);
     }
     
-    final LineFileDocs docs = new LineFileDocs(random, true);
-
     //provider.register(new MemoryCodec());
     if (random().nextBoolean()) {
       Codec.setDefault(TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random().nextBoolean(), random.nextFloat())));
@@ -66,7 +64,7 @@
     int updateCount = 0;
     // TODO: sometimes update ids not in order...
     for(int docIter=0;docIter<numUpdates;docIter++) {
-      final Document doc = docs.nextDoc();
+      final Document doc = w.newDocument();
       final String myID = Integer.toString(id);
       if (id == SIZE-1) {
         id = 0;
@@ -76,7 +74,7 @@
       if (VERBOSE) {
         System.out.println("  docIter=" + docIter + " id=" + id);
       }
-      doc.getField("docid").setStringValue(myID);
+      doc.addAtom("docid", myID);
 
       Term idTerm = new Term("docid", myID);
 
@@ -146,8 +144,6 @@
 
     TestIndexWriter.assertNoUnreferencedFiles(dir, "leftover files after rolling updates");
 
-    docs.close();
-    
     // LUCENE-4455:
     SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
     long totalBytes = 0;
@@ -168,10 +164,10 @@
   public void testUpdateSameDoc() throws Exception {
     final Directory dir = newDirectory();
 
-    final LineFileDocs docs = new LineFileDocs(random());
     for (int r = 0; r < 3; r++) {
       final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                    .setMaxBufferedDocs(2));
+      final LineFileDocs docs = new LineFileDocs(w, random());
       final int numUpdates = atLeast(20);
       int numThreads = TestUtil.nextInt(random(), 2, 6);
       IndexingThread[] threads = new IndexingThread[numThreads];
@@ -185,12 +181,12 @@
       }
 
       w.close();
+      docs.close();
     }
 
     IndexReader open = DirectoryReader.open(dir);
     assertEquals(1, open.numDocs());
     open.close();
-    docs.close();
     dir.close();
   }
   
@@ -211,8 +207,8 @@
       try {
         DirectoryReader open = null;
         for (int i = 0; i < num; i++) {
-          Document doc = new Document();// docs.nextDoc();
-          doc.add(newStringField("id", "test", Field.Store.NO));
+          Document doc = writer.newDocument();// docs.nextDoc();
+          doc.addAtom("id", "test");
           writer.updateDocument(new Term("id", "test"), doc);
           if (random().nextInt(3) == 0) {
             if (open == null) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
index 52b8ce8..2f9cf15 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -37,8 +36,8 @@
   public void test() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new TextField("eng", new BugReproTokenStream()));
+    Document doc = riw.newDocument();
+    doc.addLargeText("eng", new BugReproTokenStream());
     riw.addDocument(doc);
     riw.close();
     dir.close();
@@ -51,8 +50,8 @@
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
-      doc.add(new TextField("eng", new BugReproTokenStream()));
+      Document doc = riw.newDocument();
+      doc.addLargeText("eng", new BugReproTokenStream());
       riw.addDocument(doc);
     }
     riw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
index d7b577f..1378a64 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -19,10 +19,14 @@
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
 import java.util.HashMap;
 
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -41,11 +45,9 @@
   private String mergedSegment = "test";
   //First segment to be merged
   private Directory merge1Dir;
-  private Document doc1 = new Document();
   private SegmentReader reader1 = null;
   //Second Segment to be merged
   private Directory merge2Dir;
-  private Document doc2 = new Document();
   private SegmentReader reader2 = null;
 
   @Override
@@ -54,12 +56,10 @@
     mergedDir = newDirectory();
     merge1Dir = newDirectory();
     merge2Dir = newDirectory();
-    DocHelper.setupDoc(doc1);
-    SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1);
-    DocHelper.setupDoc(doc2);
-    SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2);
-    reader1 = new SegmentReader(info1, newIOContext(random()));
-    reader2 = new SegmentReader(info2, newIOContext(random()));
+    SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir);
+    SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir);
+    reader1 = new SegmentReader(FieldTypes.getFieldTypes(merge1Dir, null), info1, newIOContext(random()));
+    reader2 = new SegmentReader(FieldTypes.getFieldTypes(merge2Dir, null), info2, newIOContext(random()));
   }
 
   @Override
@@ -84,27 +84,32 @@
     final Codec codec = Codec.getDefault();
     final SegmentInfo si = new SegmentInfo(mergedDir, Version.LATEST, mergedSegment, -1, false, codec, null, StringHelper.randomId(), new HashMap<>());
 
-    SegmentMerger merger = new SegmentMerger(Arrays.<CodecReader>asList(reader1, reader2),
-                                             si, InfoStream.getDefault(), mergedDir,
-                                             new FieldInfos.FieldNumbers(),
-                                             newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1))));
+    FieldTypes fieldTypes = FieldTypes.getFieldTypes(merge1Dir, new MockAnalyzer(random()));
+    SegmentMerger merger = new SegmentMerger(fieldTypes, Arrays.<CodecReader>asList(reader1, reader2),
+        si, InfoStream.getDefault(), mergedDir,
+        new FieldInfos.FieldNumbers(),
+        newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1))));
     MergeState mergeState = merger.merge();
     int docsMerged = mergeState.segmentInfo.getDocCount();
     assertTrue(docsMerged == 2);
     //Should be able to open a new SegmentReader against the new directory
-    SegmentReader mergedReader = new SegmentReader(new SegmentCommitInfo(
+    SegmentReader mergedReader = new SegmentReader(fieldTypes,
+                                                   new SegmentCommitInfo(
                                                          mergeState.segmentInfo,
                                                          0, -1L, -1L, -1L),
                                                    newIOContext(random()));
     assertTrue(mergedReader != null);
     assertTrue(mergedReader.numDocs() == 2);
-    StoredDocument newDoc1 = mergedReader.document(0);
+    Document newDoc1 = mergedReader.document(0);
     assertTrue(newDoc1 != null);
+
+    Set<String> unstored = DocHelper.getUnstored(fieldTypes);
+
     //There are 2 unstored fields on the document
-    assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
-    StoredDocument newDoc2 = mergedReader.document(1);
+    assertEquals(DocHelper.numFields() - unstored.size() + 1, DocHelper.numFields(newDoc1));
+    Document newDoc2 = mergedReader.document(1);
     assertTrue(newDoc2 != null);
-    assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+    assertEquals(DocHelper.numFields() - unstored.size() + 1, DocHelper.numFields(newDoc2));
 
     DocsEnum termDocs = TestUtil.docs(random(), mergedReader,
         DocHelper.TEXT_FIELD_2_KEY,
@@ -140,7 +145,7 @@
       i++;
     }
 
-    TestSegmentReader.checkNorms(mergedReader);
+    TestSegmentReader.checkNorms(fieldTypes, mergedReader);
     mergedReader.close();
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index 918b915..bbe77b7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -21,8 +21,11 @@
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -32,17 +35,17 @@
 
 public class TestSegmentReader extends LuceneTestCase {
   private Directory dir;
-  private Document testDoc = new Document();
   private SegmentReader reader = null;
-  
+  private FieldTypes fieldTypes;
+
   //TODO: Setup the reader w/ multiple documents
   @Override
   public void setUp() throws Exception {
     super.setUp();
     dir = newDirectory();
-    DocHelper.setupDoc(testDoc);
-    SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc);
-    reader = new SegmentReader(info, IOContext.READ);
+    SegmentCommitInfo info = DocHelper.writeDoc(random(), dir);
+    fieldTypes = FieldTypes.getFieldTypes(dir, new MockAnalyzer(random()));
+    reader = new SegmentReader(fieldTypes, info, IOContext.READ);
   }
   
   @Override
@@ -55,22 +58,22 @@
   public void test() {
     assertTrue(dir != null);
     assertTrue(reader != null);
-    assertTrue(DocHelper.nameValues.size() > 0);
-    assertTrue(DocHelper.numFields(testDoc) == DocHelper.all.size());
+    assertEquals(DocHelper.getAll(fieldTypes).size(), DocHelper.numFields()+1);
   }
   
   public void testDocument() throws IOException {
     assertTrue(reader.numDocs() == 1);
     assertTrue(reader.maxDoc() >= 1);
-    StoredDocument result = reader.document(0);
+    Document result = reader.document(0);
     assertTrue(result != null);
     //There are 2 unstored fields on the document that are not preserved across writing
-    assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
+    assertEquals(DocHelper.numFields() - DocHelper.getUnstored(fieldTypes).size() + 1, DocHelper.numFields(result));
     
-    List<StorableField> fields = result.getFields();
-    for (final StorableField field : fields ) { 
+    List<IndexableField> fields = result.getFields();
+    Set<String> allFieldNames = DocHelper.getAll(fieldTypes);
+    for (final IndexableField field : fields ) { 
       assertTrue(field != null);
-      assertTrue(DocHelper.nameValues.containsKey(field.name()));
+      assertTrue(allFieldNames.contains(field.name()));
     }
   }
   
@@ -96,26 +99,19 @@
       }
     }
 
-    assertTrue(allFieldNames.size() == DocHelper.all.size());
-    for (String s : allFieldNames) {
-      assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals(""));
-    }                                                                               
-
-    assertTrue(indexedFieldNames.size() == DocHelper.indexed.size());
-    for (String s : indexedFieldNames) {
-      assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals(""));
-    }
-    
-    assertTrue(notIndexedFieldNames.size() == DocHelper.unindexed.size());
-    //Get all indexed fields that are storing term vectors
-    assertTrue(tvFieldNames.size() == DocHelper.termvector.size());
-
-    assertTrue(noTVFieldNames.size() == DocHelper.notermvector.size());
+    assertEquals(allFieldNames, DocHelper.getAll(fieldTypes));
+    assertEquals(indexedFieldNames, DocHelper.getIndexed(fieldTypes));
+    assertEquals(notIndexedFieldNames, DocHelper.getNotIndexed(fieldTypes));
+    assertEquals(tvFieldNames, DocHelper.getTermVectorFields(fieldTypes));
+    assertEquals(noTVFieldNames, DocHelper.getNoTermVectorFields(fieldTypes));
   } 
   
   public void testTerms() throws IOException {
     Fields fields = MultiFields.getFields(reader);
     for (String field : fields) {
+      if (field.equals(FieldTypes.FIELD_NAMES_FIELD)) {
+        continue;
+      }
       Terms terms = fields.terms(field);
       assertNotNull(terms);
       TermsEnum termsEnum = terms.iterator(null);
@@ -169,19 +165,17 @@
     }
 */
 
-    checkNorms(reader);
+    checkNorms(fieldTypes, reader);
   }
 
-  public static void checkNorms(LeafReader reader) throws IOException {
+  public static void checkNorms(FieldTypes fieldTypes, LeafReader reader) throws IOException {
     // test omit norms
-    for (int i=0; i<DocHelper.fields.length; i++) {
-      IndexableField f = DocHelper.fields[i];
-      if (f.fieldType().indexOptions() != IndexOptions.NONE) {
-        assertEquals(reader.getNormValues(f.name()) != null, !f.fieldType().omitNorms());
-        assertEquals(reader.getNormValues(f.name()) != null, !DocHelper.noNorms.containsKey(f.name()));
-        if (reader.getNormValues(f.name()) == null) {
+    for (String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getIndexOptions(fieldName) != IndexOptions.NONE) {
+        assertEquals("field " + fieldName, reader.getNormValues(fieldName) != null, fieldTypes.getNorms(fieldName));
+        if (reader.getNormValues(fieldName) == null) {
           // test for norms of null
-          NumericDocValues norms = MultiDocValues.getNormValues(reader, f.name());
+          NumericDocValues norms = MultiDocValues.getNormValues(reader, fieldName);
           assertNull(norms);
         }
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 3f5686e..62e4c89 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -21,7 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -29,16 +29,16 @@
 import org.apache.lucene.util.TestUtil;
 
 public class TestSegmentTermDocs extends LuceneTestCase {
-  private Document testDoc = new Document();
   private Directory dir;
   private SegmentCommitInfo info;
+  private FieldTypes fieldTypes;
 
   @Override
   public void setUp() throws Exception {
     super.setUp();
     dir = newDirectory();
-    DocHelper.setupDoc(testDoc);
-    info = DocHelper.writeDoc(random(), dir, testDoc);
+    info = DocHelper.writeDoc(random(), dir);
+    fieldTypes = FieldTypes.getFieldTypes(dir, null);
   }
   
   @Override
@@ -53,25 +53,24 @@
 
   public void testTermDocs() throws IOException {
     //After adding the document, we should be able to read it back in
-    SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(fieldTypes, info, newIOContext(random()));
     assertTrue(reader != null);
 
     TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null);
     terms.seekCeil(new BytesRef("field"));
     DocsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, DocsEnum.FLAG_FREQS);
-    if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)    {
-      int docId = termDocs.docID();
-      assertTrue(docId == 0);
-      int freq = termDocs.freq();
-      assertTrue(freq == 3);  
-    }
+    assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    int docId = termDocs.docID();
+    assertTrue(docId == 0);
+    int freq = termDocs.freq();
+    assertTrue(freq == 3);  
     reader.close();
   }  
   
   public void testBadSeek() throws IOException {
     {
       //After adding the document, we should be able to read it back in
-      SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+      SegmentReader reader = new SegmentReader(fieldTypes, info, newIOContext(random()));
       assertTrue(reader != null);
       DocsEnum termDocs = TestUtil.docs(random(), reader,
           "textField2",
@@ -85,7 +84,7 @@
     }
     {
       //After adding the document, we should be able to read it back in
-      SegmentReader reader = new SegmentReader(info, newIOContext(random()));
+      SegmentReader reader = new SegmentReader(fieldTypes, info, newIOContext(random()));
       assertTrue(reader != null);
       DocsEnum termDocs = TestUtil.docs(random(), reader,
           "junk",
@@ -257,10 +256,9 @@
   }
 
 
-  private void addDoc(IndexWriter writer, String value) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", value, Field.Store.NO));
-      writer.addDocument(doc);
+  private void addDoc(IndexWriter writer, String value) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", value);
+    writer.addDocument(doc);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
index 701de34..c7ef348 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
@@ -19,14 +19,13 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 
 
 public class TestSegmentTermEnum extends LuceneTestCase {
@@ -133,10 +132,9 @@
     reader.close();
   }
 
-  private void addDoc(IndexWriter writer, String value) throws IOException
-  {
-    Document doc = new Document();
-    doc.add(newTextField("content", value, Field.Store.NO));
+  private void addDoc(IndexWriter writer, String value) throws IOException {
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", value);
     writer.addDocument(doc);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
index b31c77b..f0f01c3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
@@ -20,8 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -34,9 +33,9 @@
 
   private void addDocs(IndexWriter writer, int numDocs, boolean withID) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       if (withID) {
-        doc.add(new StringField("id", "" + i, Field.Store.NO));
+        doc.addUniqueInt("id", i);
       }
       writer.addDocument(doc);
     }
@@ -281,13 +280,12 @@
     
     IndexWriterConfig conf = newWriterConfig();
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     addDocs(writer, 3);
     addDocs(writer, 5);
     addDocs(writer, 3);
     
-    // delete the last document, so that the last segment is merged.
-    writer.deleteDocuments(new Term("id", "10"));
     writer.close();
     
     conf = newWriterConfig();
@@ -334,12 +332,13 @@
     
     IndexWriterConfig conf = newWriterConfig();
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     addDocs(writer, 5, true);
     
     // delete the last document
     
-    writer.deleteDocuments(new Term("id", "4"));
+    writer.deleteDocuments(fieldTypes.newIntTerm("id", 4));
     writer.close();
     
     conf = newWriterConfig();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
index dbfd7d9..b2fa769 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
@@ -25,8 +25,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -71,7 +70,7 @@
       throws RuntimeException, IOException {
     for (int i = 0; i < numSnapshots; i++) {
       // create dummy document to trigger commit.
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       snapshots.add(sdp.snapshot());
     }
@@ -123,16 +122,16 @@
     }
     dp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
     writer.commit();
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
+
     final Thread t = new Thread() {
         @Override
         public void run() {
-          Document doc = new Document();
-          FieldType customType = new FieldType(TextField.TYPE_STORED);
-          customType.setStoreTermVectors(true);
-          customType.setStoreTermVectorPositions(true);
-          customType.setStoreTermVectorOffsets(true);
-          doc.add(newField("content", "aaa", customType));
+          Document doc = writer.newDocument();
+          doc.addLargeText("content", "aaa");
           do {
             for(int i=0;i<27;i++) {
               try {
@@ -172,12 +171,8 @@
     // Add one more document to force writer to commit a
     // final segment, so deletion policy has a chance to
     // delete again:
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("content", "aaa", customType));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
 
     // Make sure we don't have any leftover files in the
@@ -299,7 +294,7 @@
         @Override
         public void run() {
           try {
-            writer.addDocument(new Document());
+            writer.addDocument(writer.newDocument());
             writer.commit();
             snapshots[finalI] = sdp.snapshot();
           } catch (Exception e) {
@@ -319,7 +314,7 @@
     }
 
     // Do one last commit, so that after we release all snapshots, we stay w/ one commit
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     for (int i=0;i<threads.length;i++) {
@@ -369,7 +364,7 @@
     
     // Create another commit - we must do that, because otherwise the "snapshot"
     // files will still remain in the index, since it's the last commit.
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     // Release
@@ -387,7 +382,7 @@
 
     IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy()));
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     IndexCommit s1 = sdp.snapshot();
@@ -395,7 +390,7 @@
     assertSame(s1, s2); // should be the same instance
     
     // create another commit
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     // release "s1" should not delete "s2"
@@ -418,12 +413,12 @@
     }
     IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy()));
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     IndexCommit s1 = sdp.snapshot();
 
     // create another commit, not snapshotted.
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.close();
 
     // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1"
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
index bd35b50..951a49e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -36,23 +36,20 @@
       Directory dir = newDirectory();
       RandomIndexWriter w = new RandomIndexWriter(random(), dir);
       final Set<Integer> aDocs = new HashSet<>();
-      final Document doc = new Document();
-      final Field f = newStringField("field", "", Field.Store.NO);
-      doc.add(f);
-      final Field idField = newStringField("id", "", Field.Store.YES);
-      doc.add(idField);
       int num = atLeast(4097);
       if (VERBOSE) {
         System.out.println("\nTEST: numDocs=" + num);
       }
       for(int id=0;id<num;id++) {
+        Document doc = w.newDocument();
         if (random().nextInt(4) == 3) {
-          f.setStringValue("a");
+          doc.addAtom("field", "a");
           aDocs.add(id);
         } else {
-          f.setStringValue("b");
+          doc.addAtom("field", "b");
         }
-        idField.setStringValue(""+id);
+        
+        doc.addUniqueInt("id", id);
         w.addDocument(doc);
         if (VERBOSE) {
           System.out.println("\nTEST: doc upto " + id);
@@ -67,7 +64,7 @@
       final DirectoryReader r = w.getReader();
       final int[] idToDocID = new int[r.maxDoc()];
       for(int docID=0;docID<idToDocID.length;docID++) {
-        int id = Integer.parseInt(r.document(docID).get("id"));
+        int id = r.document(docID).getInt("id");
         if (aDocs.contains(id)) {
           aDocIDs.add(docID);
         } else {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java
index c6c856b..aff3779 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java
@@ -22,9 +22,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -44,6 +43,8 @@
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter w = new IndexWriter(dir, iwc);
+    final FieldTypes fieldTypes = w.getFieldTypes();
+
     final int iters = atLeast(2000);
     final Map<Integer,Boolean> exists = new ConcurrentHashMap<>();
     Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 6)];
@@ -60,23 +61,23 @@
                 synchronized (locks[id]) {
                   Boolean v = exists.get(id);
                   if (v == null || v.booleanValue() == false) {
-                    Document doc = new Document();
-                    doc.add(newStringField("id", ""+id, Field.Store.NO));
+                    Document doc = w.newDocument();
+                    doc.addInt("id", id);
                     w.addDocument(doc);
                     exists.put(id, true);
                   } else {
                     if (deleteMode == 0) {
                       // Always delete by term
-                      w.deleteDocuments(new Term("id", ""+id));
+                      w.deleteDocuments(fieldTypes.newIntTerm("id", id));
                     } else if (deleteMode == 1) {
                       // Always delete by query
-                      w.deleteDocuments(new TermQuery(new Term("id", ""+id)));
+                      w.deleteDocuments(fieldTypes.newExactIntQuery("id", id));
                     } else {
                       // Mixed
                       if (random().nextBoolean()) {
-                        w.deleteDocuments(new Term("id", ""+id));
+                        w.deleteDocuments(fieldTypes.newIntTerm("id", id));
                       } else {
-                        w.deleteDocuments(new TermQuery(new Term("id", ""+id)));
+                        w.deleteDocuments(fieldTypes.newExactIntQuery("id", id));
                       }
                     }
                     exists.put(id, false);
@@ -106,7 +107,7 @@
     IndexSearcher s = newSearcher(r);
     for(Map.Entry<Integer,Boolean> ent : exists.entrySet()) {
       int id = ent.getKey();
-      TopDocs hits = s.search(new TermQuery(new Term("id", ""+id)), 1);
+      TopDocs hits = s.search(fieldTypes.newExactIntQuery("id", id), 1);
       if (ent.getValue()) {
         assertEquals(1, hits.totalHits);
       } else {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
index 1050991..609e816 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
@@ -74,19 +74,21 @@
 
     @Override
     public void doWork() throws Exception {
+      FieldTypes fieldTypes = writer.getFieldTypes();
+
       // Add 10 docs:
       for(int j=0; j<10; j++) {
-        Document d = new Document();
+        Document d = writer.newDocument();
         int n = random().nextInt();
-        d.add(newStringField("id", Integer.toString(nextID++), Field.Store.YES));
-        d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
+        d.addInt("id", nextID++);
+        d.addLargeText("contents", English.intToEnglish(n));
         writer.addDocument(d);
       }
 
       // Delete 5 docs:
       int deleteID = nextID-1;
       for(int j=0; j<5; j++) {
-        writer.deleteDocuments(new Term("id", ""+deleteID));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", deleteID));
         deleteID -= 2;
       }
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 926d8db..1e4a72e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -25,12 +25,11 @@
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.TermQuery;
@@ -64,7 +63,7 @@
     // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
     int maxThreadStates = 1+random().nextInt(10);
     boolean doReaderPooling = random().nextBoolean();
-    Map<String,Document> docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling);
+    Map<String,List<LowSchemaField>> docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling);
     indexSerial(random(), docs, dir2);
 
     // verifying verify
@@ -99,7 +98,7 @@
       if (VERBOSE) {
         System.out.println("  nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor + " maxBufferedDocs=" + maxBufferedDocs);
       }
-      Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
+      Map<String,List<LowSchemaField>> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
       if (VERBOSE) {
         System.out.println("TEST: index serial");
       }
@@ -116,9 +115,9 @@
 
   static Term idTerm = new Term("id","");
   IndexingThread[] threads;
-  static Comparator<GeneralField> fieldNameComparator = new Comparator<GeneralField>() {
+  static Comparator<IndexableField> fieldNameComparator = new Comparator<IndexableField>() {
     @Override
-    public int compare(GeneralField o1, GeneralField o2) {
+    public int compare(IndexableField o1, IndexableField o2) {
       return o1.name().compareTo(o2.name());
     }
   };
@@ -128,12 +127,12 @@
   // everything.
   
   public static class DocsAndWriter {
-    Map<String,Document> docs;
+    Map<String,List<LowSchemaField>> docs;
     IndexWriter writer;
   }
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
-    Map<String,Document> docs = new HashMap<>();
+    Map<String,List<LowSchemaField>> docs = new HashMap<>();
     IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
             .setOpenMode(OpenMode.CREATE)
             .setRAMBufferSizeMB(0.1)
@@ -184,9 +183,9 @@
     return dw;
   }
   
-  public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates,
-                                          boolean doReaderPooling) throws IOException, InterruptedException {
-    Map<String,Document> docs = new HashMap<>();
+  public Map<String,List<LowSchemaField>> indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates,
+                                                      boolean doReaderPooling) throws IOException, InterruptedException {
+    Map<String,List<LowSchemaField>> docs = new HashMap<>();
     IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.CREATE)
              .setRAMBufferSizeMB(0.1)
@@ -232,23 +231,17 @@
   }
 
   
-  public static void indexSerial(Random random, Map<String,Document> docs, Directory dir) throws IOException {
+  public static void indexSerial(Random random, Map<String,List<LowSchemaField>> docs, Directory dir) throws IOException {
     IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
 
     // index all docs in a single thread
-    Iterator<Document> iter = docs.values().iterator();
+    Iterator<List<LowSchemaField>> iter = docs.values().iterator();
     while (iter.hasNext()) {
-      Document d = iter.next();
-      ArrayList<Field> fields = new ArrayList<>();
-      fields.addAll(d.getFields());
+      List<LowSchemaField> d = iter.next();
+      List<LowSchemaField> fields = new ArrayList<>(d);
       // put fields in same order each time
       Collections.sort(fields, fieldNameComparator);
-      
-      Document d1 = new Document();
-      for (int i=0; i<fields.size(); i++) {
-        d1.add(fields.get(i));
-      }
-      w.addDocument(d1);
+      w.addDocument(fields);
       // System.out.println("indexing "+d1);
     }
     
@@ -276,11 +269,11 @@
       Bits liveDocs = sub.getLiveDocs();
       System.out.println("  " + ((SegmentReader) sub).getSegmentInfo());
       for(int docID=0;docID<sub.maxDoc();docID++) {
-        StoredDocument doc = sub.document(docID);
+        Document doc = sub.document(docID);
         if (liveDocs == null || liveDocs.get(docID)) {
-          System.out.println("    docID=" + docID + " id:" + doc.get("id"));
+          System.out.println("    docID=" + docID + " id:" + doc.getString("id"));
         } else {
-          System.out.println("    DEL docID=" + docID + " id:" + doc.get("id"));
+          System.out.println("    DEL docID=" + docID + " id:" + doc.getString("id"));
         }
       }
     }
@@ -563,9 +556,9 @@
     }
   }
 
-  public static void verifyEquals(StoredDocument d1, StoredDocument d2) {
-    List<StorableField> ff1 = d1.getFields();
-    List<StorableField> ff2 = d2.getFields();
+  public static void verifyEquals(Document d1, Document d2) {
+    List<IndexableField> ff1 = d1.getFields();
+    List<IndexableField> ff2 = d2.getFields();
 
     Collections.sort(ff1, fieldNameComparator);
     Collections.sort(ff2, fieldNameComparator);
@@ -573,17 +566,17 @@
     assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size());
 
     for (int i=0; i<ff1.size(); i++) {
-      StorableField f1 = ff1.get(i);
-      StorableField f2 = ff2.get(i);
+      IndexableField f1 = ff1.get(i);
+      IndexableField f2 = ff2.get(i);
       if (f1.binaryValue() != null) {
         assert(f2.binaryValue() != null);
       } else {
         String s1 = f1.stringValue();
         String s2 = f2.stringValue();
         assertEquals(ff1 + " : " + ff2, s1,s2);
-        }
       }
     }
+  }
 
   public static void verifyEquals(Fields d1, Fields d2) throws IOException {
     if (d1 == null) {
@@ -683,7 +676,7 @@
     int base;
     int range;
     int iterations;
-    Map<String,Document> docs = new HashMap<>();
+    Map<String,List<LowSchemaField>> docs = new HashMap<>();
     Random r;
 
     public int nextInt(int lim) {
@@ -754,81 +747,66 @@
       return Integer.toString(base + nextInt(range));
     }
 
-    public void indexDoc() throws IOException {
-      Document d = new Document();
+    private void setTermVectors(LowSchemaField field, Map<String,LowSchemaField> prevFields) {
+      LowSchemaField prev = prevFields.get(field.name());
+      if (prev == null) {
+        // First time we see this field name in in this doc: randomize TV settings:
+        switch (nextInt(4)) {
+        case 0:
+          break;
+        case 1:
+          field.enableTermVectors(false, false, false);
+          break;
+        case 2:
+          field.enableTermVectors(true, false, false);
+          break;
+        case 3:
+          field.enableTermVectors(false, true, false);
+          break;
+        }
+        prevFields.put(field.name(), field);
+      } else {
+        field.enableTermVectors(prev.storeTermVectorPositions(),
+                                prev.storeTermVectorOffsets(),
+                                prev.storeTermVectorPayloads());
+      }
+    }
 
-      FieldType customType1 = new FieldType(TextField.TYPE_STORED);
-      customType1.setTokenized(false);
-      customType1.setOmitNorms(true);
-      
-      ArrayList<Field> fields = new ArrayList<>();
+    public void indexDoc(Analyzer a) throws IOException {
+
+      List<LowSchemaField> fields = new ArrayList<>();
       String idString = getIdString();
-      Field idField =  newField("id", idString, customType1);
+      LowSchemaField idField = new LowSchemaField(a, "id", idString, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, false);
+      idField.disableNorms();
       fields.add(idField);
 
-      Map<String,FieldType> tvTypes = new HashMap<>();
+      Map<String,LowSchemaField> prevField = new HashMap<>();
 
       int nFields = nextInt(maxFields);
       for (int i=0; i<nFields; i++) {
 
         String fieldName = "f" + nextInt(100);
-        FieldType customType;
 
-        // Use the same term vector settings if we already
-        // added this field to the doc:
-        FieldType oldTVType = tvTypes.get(fieldName);
-        if (oldTVType != null) {
-          customType = new FieldType(oldTVType);
-        } else {
-          customType = new FieldType();
-          switch (nextInt(4)) {
-          case 0:
-            break;
-          case 1:
-            customType.setStoreTermVectors(true);
-            break;
-          case 2:
-            customType.setStoreTermVectors(true);
-            customType.setStoreTermVectorPositions(true);
-            break;
-          case 3:
-            customType.setStoreTermVectors(true);
-            customType.setStoreTermVectorOffsets(true);
-            break;
-          }
-          FieldType newType = new FieldType(customType);
-          newType.freeze();
-          tvTypes.put(fieldName, newType);
-        }
+        LowSchemaField field;
         
         switch (nextInt(4)) {
           case 0:
-            customType.setStored(true);
-            customType.setOmitNorms(true);
-            customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-            customType.freeze();
-            fields.add(newField(fieldName, getString(1), customType));
+            field = new LowSchemaField(a, fieldName, getString(1), IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+            field.disableNorms();
+            setTermVectors(field, prevField);
             break;
           case 1:
-            customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-            customType.setTokenized(true);
-            customType.freeze();
-            fields.add(newField(fieldName, getString(0), customType));
+            field = new LowSchemaField(a, fieldName, getString(0), IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+            field.doNotStore();
+            setTermVectors(field, prevField);
             break;
           case 2:
-            customType.setStored(true);
-            customType.setStoreTermVectors(false);
-            customType.setStoreTermVectorOffsets(false);
-            customType.setStoreTermVectorPositions(false);
-            customType.freeze();
-            fields.add(newField(fieldName, getString(0), customType));
+            field = new LowSchemaField(a, fieldName, getString(0), IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+            // no term vectors
             break;
           case 3:
-            customType.setStored(true);
-            customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-            customType.setTokenized(true);
-            customType.freeze();
-            fields.add(newField(fieldName, getString(bigFieldSize), customType));
+            field = new LowSchemaField(a, fieldName, getString(0), IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+            setTermVectors(field, prevField);
             break;
         }
       }
@@ -837,18 +815,15 @@
         Collections.sort(fields, fieldNameComparator);
       } else {
         // random placement of id field also
-        Collections.swap(fields,nextInt(fields.size()), 0);
+        Collections.swap(fields, nextInt(fields.size()), 0);
       }
 
-      for (int i=0; i<fields.size(); i++) {
-        d.add(fields.get(i));
-      }
       if (VERBOSE) {
         System.out.println(Thread.currentThread().getName() + ": indexing id:" + idString);
       }
-      w.updateDocument(new Term("id", idString), d);
+      w.updateDocument(new Term("id", idString), fields);
       //System.out.println(Thread.currentThread().getName() + ": indexing "+d);
-      docs.put(idString, d);
+      docs.put(idString, fields);
     }
 
     public void deleteDoc() throws IOException {
@@ -871,6 +846,7 @@
 
     @Override
     public void run() {
+      Analyzer a = new MockAnalyzer(random());
       try {
         r = new Random(base+range+seed);
         for (int i=0; i<iterations; i++) {
@@ -880,7 +856,7 @@
           } else if (what < 10) {
             deleteByQuery();
           } else {
-            indexDoc();
+            indexDoc(a);
           }
         }
       } catch (Throwable e) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
index 7637eea..4f03437 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
@@ -28,8 +28,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
@@ -82,9 +80,6 @@
     final int nReadThreads = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5);
     initModel(ndocs);
 
-    final FieldType storedOnlyType = new FieldType();
-    storedOnlyType.setStored(true);
-
     if (VERBOSE) {
       System.out.println("\n");
       System.out.println("TEST: commitPercent=" + commitPercent);
@@ -231,9 +226,9 @@
 
                     // add tombstone first
                     if (tombstones) {
-                      Document d = new Document();
-                      d.add(newStringField("id", "-"+Integer.toString(id), Field.Store.YES));
-                      d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                      Document d = writer.newDocument();
+                      d.addAtom("id", "-"+Integer.toString(id));
+                      d.addStoredString(field, Long.toString(nextVal));
                       writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                     }
 
@@ -247,9 +242,9 @@
 
                     // add tombstone first
                     if (tombstones) {
-                      Document d = new Document();
-                      d.add(newStringField("id", "-"+Integer.toString(id), Field.Store.YES));
-                      d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                      Document d = writer.newDocument();
+                      d.addAtom("id", "-"+Integer.toString(id));
+                      d.addStoredString(field, Long.toString(nextVal));
                       writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                     }
 
@@ -260,9 +255,9 @@
                     model.put(id, -nextVal);
                   } else {
                     // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
-                    Document d = new Document();
-                    d.add(newStringField("id", Integer.toString(id), Field.Store.YES));
-                    d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                    Document d = writer.newDocument();
+                    d.addAtom("id", Integer.toString(id));
+                    d.addStoredString(field, Long.toString(nextVal));
                     if (VERBOSE) {
                       System.out.println("TEST: " + Thread.currentThread().getName() + ": u id:" + id + " val=" + nextVal);
                     }
@@ -356,13 +351,13 @@
                 if (results.totalHits != 1) {
                   System.out.println("FAIL: hits id:" + id + " val=" + val);
                   for(ScoreDoc sd : results.scoreDocs) {
-                    final StoredDocument doc = r.document(sd.doc);
-                    System.out.println("  docID=" + sd.doc + " id:" + doc.get("id") + " foundVal=" + doc.get(field));
+                    final Document doc = r.document(sd.doc);
+                    System.out.println("  docID=" + sd.doc + " id:" + doc.get("id") + " foundVal=" + doc.getString(field));
                   }
                   fail("id=" + id + " reader=" + r + " totalHits=" + results.totalHits);
                 }
-                StoredDocument doc = searcher.doc(results.scoreDocs[0].doc);
-                long foundVal = Long.parseLong(doc.get(field));
+                Document doc = searcher.doc(results.scoreDocs[0].doc);
+                long foundVal = Long.parseLong(doc.getString(field));
                 if (foundVal < Math.abs(val)) {
                   fail("foundVal=" + foundVal + " val=" + val + " id=" + id + " reader=" + r);
                 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
index 223fa70..09928c1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
@@ -18,7 +18,7 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -34,22 +34,17 @@
     
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
-    Document doc = new Document();
-    Field id = newStringField("id", "", Field.Store.NO);
-    Field field1 = newTextField("foo", "", Field.Store.NO);
-    Field field2 = newTextField("bar", "", Field.Store.NO);
-    doc.add(id);
-    doc.add(field1);
-    doc.add(field2);
     for (int i = 0; i < numDocs; i++) {
-      id.setStringValue("" + i);
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
       char ch1 = (char) TestUtil.nextInt(random(), 'a', 'z');
       char ch2 = (char) TestUtil.nextInt(random(), 'a', 'z');
-      field1.setStringValue("" + ch1 + " " + ch2);
+      doc.addLargeText("foo", "" + ch1 + " " + ch2);
       ch1 = (char) TestUtil.nextInt(random(), 'a', 'z');
       ch2 = (char) TestUtil.nextInt(random(), 'a', 'z');
-      field2.setStringValue("" + ch1 + " " + ch2);
+      doc.addLargeText("bar", "" + ch1 + " " + ch2);
       writer.addDocument(doc);
     }
     
@@ -60,7 +55,7 @@
     
     int numDeletions = atLeast(20);
     for (int i = 0; i < numDeletions; i++) {
-      writer.deleteDocuments(new Term("id", "" + random().nextInt(numDocs)));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", random().nextInt(numDocs)));
     }
     writer.forceMerge(1);
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
index 356353e..59126d1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
@@ -22,9 +22,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -45,29 +43,30 @@
   public static void beforeClass() throws Exception {                  
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
     //writer.setNoCFSRatio(1.0);
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
-      Document doc = new Document();
-      FieldType ft = new FieldType(TextField.TYPE_STORED);
       int mod3 = i % 3;
       int mod2 = i % 2;
       if (mod2 == 0 && mod3 == 0) {
-        ft.setStoreTermVectors(true);
-        ft.setStoreTermVectorOffsets(true);
-        ft.setStoreTermVectorPositions(true);
+        fieldTypes.enableTermVectors("field");
+        fieldTypes.enableTermVectorOffsets("field");
+        fieldTypes.enableTermVectorPositions("field");
       } else if (mod2 == 0) {
-        ft.setStoreTermVectors(true);
-        ft.setStoreTermVectorPositions(true);
+        fieldTypes.enableTermVectors("field");
+        fieldTypes.enableTermVectorPositions("field");
       } else if (mod3 == 0) {
-        ft.setStoreTermVectors(true);
-        ft.setStoreTermVectorOffsets(true);
+        fieldTypes.enableTermVectors("field");
+        fieldTypes.enableTermVectorOffsets("field");
       } else {
-        ft.setStoreTermVectors(true);
+        fieldTypes.enableTermVectors("field");
       }
-      doc.add(new Field("field", English.intToEnglish(i), ft));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", English.intToEnglish(i));
       //test no term vectors too
-      doc.add(new TextField("noTV", English.intToEnglish(i), Field.Store.YES));
+      doc.addLargeText("noTV", English.intToEnglish(i));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
@@ -89,17 +88,18 @@
 
   private void createDir(Directory dir) throws IOException {
     IndexWriter writer = createWriter(dir);
-    writer.addDocument(createDoc());
+    writer.addDocument(createDoc(writer));
     writer.close();
   }
 
-  private Document createDoc() {
-    Document doc = new Document();
-    final FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorOffsets(true);
-    ft.setStoreTermVectorPositions(true);
-    doc.add(newField("c", "aaa", ft));
+  private Document createDoc(IndexWriter writer) {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("c");
+    fieldTypes.enableTermVectorOffsets("c");
+    fieldTypes.enableTermVectorPositions("c");
+
+    Document doc = writer.newDocument();
+    doc.addLargeText("c", "aaa");
     return doc;
   }
 
@@ -118,7 +118,7 @@
     // with maxBufferedDocs=2, this results in two segments, so that forceMerge
     // actually does something.
     for (int i = 0; i < 4; i++) {
-      writer.addDocument(createDoc());
+      writer.addDocument(createDoc(writer));
     }
     writer.forceMerge(1);
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index a8adfb7..caad85c8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -27,10 +27,7 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -96,28 +93,20 @@
             setMergePolicy(newLogMergePolicy(false, 10))
             .setUseCompoundFile(false)
     );
-
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
     for(int i=0;i<testFields.length;i++) {
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      if (testFieldsStorePos[i] && testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
-        customType.setStoreTermVectorOffsets(true);
+      fieldTypes.enableTermVectors(testFields[i]);
+      if (testFieldsStorePos[i]) {
+        fieldTypes.enableTermVectorPositions(testFields[i]);
       }
-      else if (testFieldsStorePos[i] && !testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
+      if (testFieldsStoreOff[i]) {
+        fieldTypes.enableTermVectorOffsets(testFields[i]);
       }
-      else if (!testFieldsStorePos[i] && testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
-        customType.setStoreTermVectorOffsets(true);
-      }
-      else {
-        customType.setStoreTermVectors(true);
-      }
-      doc.add(new Field(testFields[i], "", customType));
+    }
+
+    Document doc = writer.newDocument();
+    for(int i=0;i<testFields.length;i++) {
+      doc.addLargeText(testFields[i], testTerms[i]);
     }
 
     //Create 5 documents for testing, they all have the same
@@ -195,7 +184,7 @@
   }
 
   public void testReader() throws IOException {
-    TermVectorsReader reader = Codec.getDefault().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
+    TermVectorsReader reader = seg.info.getCodec().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
     for (int j = 0; j < 5; j++) {
       Terms vector = reader.get(j).terms(testFields[0]);
       assertNotNull(vector);
@@ -339,117 +328,59 @@
     MockAnalyzer a = new MockAnalyzer(random());
     a.setEnableChecks(false);
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPayloads(true);
-    Document doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector payloads without term vector positions (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field\": cannot enable termVectorPayloads when termVectorPositions haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorOffsets(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorOffsets("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector offsets when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorOffsets when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorPositions(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPositions("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector positions when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorPositions when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector payloads when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorPayloads when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    fieldTypes.enableTermVectors("field3");
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field3");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector payloads without term vector positions (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field3\": cannot enable termVectorPayloads when termVectorPositions haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectors(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    fieldTypes.enableTermVectors("field4");
+    Document doc = w.newDocument();
     try {
+      doc.addStoredString("field4", "foo");
       w.addDocument(doc);
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot store term vectors for a field that is not indexed (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorPositions(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot store term vector positions for a field that is not indexed (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorOffsets(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot store term vector offsets for a field that is not indexed (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot store term vector payloads for a field that is not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field4\": cannot enable term vectors when indexOptions is NONE", ise.getMessage());
     }
 
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
index edd1c9e..ff16e3d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -26,11 +26,7 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -43,18 +39,22 @@
   // LUCENE-1442
   public void testDoubleOffsetCounting() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
-    Field f2 = newField("field", "", customType);
-    doc.add(f2);
-    doc.add(f);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.setMultiValued("field");
+    fieldTypes.setAnalyzerOffsetGap("field", 0);
+    fieldTypes.setAnalyzerPositionGap("field", 0);
+    fieldTypes.setDocValuesType("field", DocValuesType.NONE);
+    fieldTypes.disableFastRanges("field");
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document doc = w.newDocument();
+    doc.addAtom("field", "abcd");
+    doc.addAtom("field", "abcd");
+    doc.addAtom("field", "");
+    doc.addAtom("field", "abcd");
     w.addDocument(doc);
     w.close();
 
@@ -102,15 +102,15 @@
   // LUCENE-1442
   public void testDoubleOffsetCounting2() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "abcd");
+    doc.addLargeText("field", "abcd");
     w.addDocument(doc);
     w.close();
 
@@ -137,15 +137,15 @@
   // LUCENE-1448
   public void testEndOffsetPositionCharAnalyzer() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd   ", customType);
-    doc.add(f);
-    doc.add(f);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "abcd   ");
+    doc.addLargeText("field", "abcd   ");
     w.addDocument(doc);
     w.close();
 
@@ -174,15 +174,15 @@
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
     try (TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", "abcd   "))) {
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      Field f = new Field("field", stream, customType);
-      doc.add(f);
-      doc.add(f);
+      doc.addLargeText("field", stream);
+      doc.addLargeText("field", stream);
       w.addDocument(doc);
     }
     w.close();
@@ -211,14 +211,14 @@
   public void testEndOffsetPositionStopFilter() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd the", customType);
-    doc.add(f);
-    doc.add(f);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "abcd the");
+    doc.addLargeText("field", "abcd the");
     w.addDocument(doc);
     w.close();
 
@@ -246,15 +246,14 @@
   public void testEndOffsetPositionStandard() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd the  ", customType);
-    Field f2 = newField("field", "crunch man", customType);
-    doc.add(f);
-    doc.add(f2);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "abcd the  ");
+    doc.addLargeText("field", "crunch man");
     w.addDocument(doc);
     w.close();
 
@@ -289,16 +288,15 @@
   // LUCENE-1448
   public void testEndOffsetPositionStandardEmptyField() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "", customType);
-    Field f2 = newField("field", "crunch man", customType);
-    doc.add(f);
-    doc.add(f2);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    doc.addLargeText("field", "");
+    doc.addLargeText("field", "crunch man");
     w.addDocument(doc);
     w.close();
 
@@ -328,19 +326,15 @@
   public void testEndOffsetPositionStandardEmptyField2() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(newField("field", "", customType));
-
-    Field f2 = newField("field", "crunch", customType);
-    doc.add(f2);
-
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.setMultiValued("field");
+    doc.addLargeText("field", "abcd");
+    doc.addLargeText("field", "");
+    doc.addLargeText("field", "crunch");
     w.addDocument(doc);
     w.close();
 
@@ -378,24 +372,19 @@
           .setMergeScheduler(new SerialMergeScheduler())
           .setMergePolicy(new LogDocMergePolicy()));
 
-      Document document = new Document();
-      FieldType customType = new FieldType();
-      customType.setStored(true);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("termVector");
+      fieldTypes.enableTermVectorOffsets("termVector");
+      fieldTypes.enableTermVectorPositions("termVector");
 
-      Field storedField = newField("stored", "stored", customType);
-      document.add(storedField);
+      Document document = writer.newDocument();
+      document.addStoredString("stored", "stored");
       writer.addDocument(document);
       writer.addDocument(document);
 
-      document = new Document();
-      document.add(storedField);
-      FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-      customType2.setStoreTermVectors(true);
-      customType2.setStoreTermVectorPositions(true);
-      customType2.setStoreTermVectorOffsets(true);
-      Field termVectorField = newField("termVector", "termVector", customType2);
-
-      document.add(termVectorField);
+      document = writer.newDocument();
+      document.addStoredString("stored", "stored");
+      document.addLargeText("termVector", "termVector");
       writer.addDocument(document);
       writer.forceMerge(1);
       writer.close();
@@ -431,24 +420,19 @@
           .setMergeScheduler(new SerialMergeScheduler())
           .setMergePolicy(new LogDocMergePolicy()));
 
-      Document document = new Document();
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("termVector");
+      fieldTypes.enableTermVectorOffsets("termVector");
+      fieldTypes.enableTermVectorPositions("termVector");
 
-      FieldType customType = new FieldType();
-      customType.setStored(true);
-
-      Field storedField = newField("stored", "stored", customType);
-      document.add(storedField);
+      Document document = writer.newDocument();
+      document.addStoredString("stored", "stored");
       writer.addDocument(document);
       writer.addDocument(document);
 
-      document = new Document();
-      document.add(storedField);
-      FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-      customType2.setStoreTermVectors(true);
-      customType2.setStoreTermVectorPositions(true);
-      customType2.setStoreTermVectorOffsets(true);
-      Field termVectorField = newField("termVector", "termVector", customType2);
-      document.add(termVectorField);
+      document = writer.newDocument();
+      document.addStoredString("stored", "stored");
+      document.addLargeText("termVector", "termVector");
       writer.addDocument(document);
       writer.forceMerge(1);
       writer.close();
@@ -470,21 +454,17 @@
         .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergeScheduler(new SerialMergeScheduler())
         .setMergePolicy(new LogDocMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termVector");
+    fieldTypes.enableTermVectorOffsets("termVector");
+    fieldTypes.enableTermVectorPositions("termVector");
 
-    Document document = new Document();
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-
-    Field storedField = newField("stored", "stored", customType);
-    document.add(storedField);
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    Field termVectorField = newField("termVector", "termVector", customType2);
-    document.add(termVectorField);
-    for(int i=0;i<10;i++)
+    Document document = writer.newDocument();
+    document.addStoredString("stored", "stored");
+    document.addLargeText("termVector", "termVector");
+    for(int i=0;i<10;i++) {
       writer.addDocument(document);
+    }
     writer.close();
 
     writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
@@ -492,9 +472,12 @@
         .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergeScheduler(new SerialMergeScheduler())
         .setMergePolicy(new LogDocMergePolicy()));
-    for(int i=0;i<6;i++)
+    document = writer.newDocument();
+    document.addStoredString("stored", "stored");
+    document.addLargeText("termVector", "termVector");
+    for(int i=0;i<6;i++) {
       writer.addDocument(document);
-
+    }
     writer.forceMerge(1);
     writer.close();
 
@@ -507,169 +490,22 @@
     dir.close();
   }
   
-  // LUCENE-1008
-  public void testNoTermVectorAfterTermVector() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document document = new Document();
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    document.add(newField("tvtest", "a b c", customType2));
-    iw.addDocument(document);
-    document = new Document();
-    document.add(newTextField("tvtest", "x y z", Field.Store.NO));
-    iw.addDocument(document);
-    // Make first segment
-    iw.commit();
-
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    document = new Document();
-    document.add(newField("tvtest", "a b c", customType));
-    iw.addDocument(document);
-    // Make 2nd segment
-    iw.commit();
-
-    iw.forceMerge(1);
-    iw.close();
-    dir.close();
-  }
-
-  // LUCENE-1010
-  public void testNoTermVectorAfterTermVectorMerge() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document document = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    document.add(newField("tvtest", "a b c", customType));
-    iw.addDocument(document);
-    iw.commit();
-
-    document = new Document();
-    document.add(newTextField("tvtest", "x y z", Field.Store.NO));
-    iw.addDocument(document);
-    // Make first segment
-    iw.commit();
-
-    iw.forceMerge(1);
-
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    document.add(newField("tvtest", "a b c", customType2));
-    document = new Document();
-    iw.addDocument(document);
-    // Make 2nd segment
-    iw.commit();
-    iw.forceMerge(1);
-
-    iw.close();
-    dir.close();
-  }
-  
-  /** 
-   * In a single doc, for the same field, mix the term vectors up 
-   */
-  public void testInconsistentTermVectorOptions() throws IOException {
-    FieldType a, b;
-    
-    // no vectors + vectors
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    doTestMixup(a, b);
-    
-    // vectors + vectors with pos
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    doTestMixup(a, b);
-    
-    // vectors + vectors with off
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorOffsets(true);
-    doTestMixup(a, b);
-    
-    // vectors with pos + vectors with pos + off
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    a.setStoreTermVectorPositions(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    b.setStoreTermVectorOffsets(true);
-    doTestMixup(a, b);
-    
-    // vectors with pos + vectors with pos + pay
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    a.setStoreTermVectorPositions(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    b.setStoreTermVectorPayloads(true);
-    doTestMixup(a, b);
-  }
-  
-  private void doTestMixup(FieldType ft1, FieldType ft2) throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    
-    // add 3 good docs
-    for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
-      iw.addDocument(doc);
-    }
-    
-    // add broken doc
-    Document doc = new Document();
-    doc.add(new Field("field", "value1", ft1));
-    doc.add(new Field("field", "value2", ft2));
-    
-    // ensure broken doc hits exception
-    try {
-      iw.addDocument(doc);
-      fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
-      assertNotNull(iae.getMessage());
-      assertTrue(iae.getMessage().startsWith("all instances of a given field name must have the same term vectors settings"));
-    }
-    
-    // ensure good docs are still ok
-    IndexReader ir = iw.getReader();
-    assertEquals(3, ir.numDocs());
-    
-    ir.close();
-    iw.close();
-    dir.close();
-  }
-
   // LUCENE-5611: don't abort segment when term vector settings are wrong
   public void testNoAbortOnBadTVSettings() throws Exception {
     Directory dir = newDirectory();
     // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter iw = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
 
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     iw.addDocument(doc);
-    FieldType ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectors(true);
-    ft.freeze();
-    doc.add(new Field("field", "value", ft));
     try {
-      iw.addDocument(doc);
+      doc.addStoredString("field", "value");
       fail("should have hit exc");
-    } catch (IllegalArgumentException iae) {
-      // expected
+    } catch (IllegalStateException ise) {
+      assertEquals("field \"field\": cannot enable term vectors when indexOptions is NONE", ise.getMessage());
     }
     IndexReader r = DirectoryReader.open(iw, true);
 
@@ -679,4 +515,5 @@
     r.close();
     dir.close();
   }
+
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
index a1d3a77..41fc820 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
@@ -84,9 +83,6 @@
       }
     };
 
-    Document doc = new Document();
-    
-    doc.add(newStringField(field, val, Field.Store.NO));
     IndexWriter writer = new IndexWriter(
         dir,
         newIndexWriterConfig(analyzer)
@@ -94,7 +90,8 @@
           .setMaxBufferedDocs(100)
           .setMergePolicy(newLogMergePolicy(100))
     );
-
+    Document doc = writer.newDocument();
+    doc.addAtom(field, val);
     for (int i=0; i<ndocs; i++) {
       writer.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
index ac39b1a..b57df8e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
@@ -21,12 +21,6 @@
 
 import org.apache.lucene.analysis.CannedBinaryTokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -38,8 +32,8 @@
   public void testTermMinMaxBasic() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c cc ddd", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addShortText("field", "a b c cc ddd");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
@@ -57,9 +51,6 @@
     BytesRef minTerm = null;
     BytesRef maxTerm = null;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      Field field = new TextField("field", "", Field.Store.NO);
-      doc.add(field);
       //System.out.println("  doc " + i);
       CannedBinaryTokenStream.BinaryToken[] tokens = new CannedBinaryTokenStream.BinaryToken[atLeast(10)];
       for(int j=0;j<tokens.length;j++) {
@@ -77,7 +68,9 @@
         }
         tokens[j] = new CannedBinaryTokenStream.BinaryToken(tokenBytes);
       }
-      field.setTokenStream(new CannedBinaryTokenStream(tokens));
+
+      Document doc = w.newDocument();
+      doc.addLargeText("field", new CannedBinaryTokenStream(tokens));
       w.addDocument(doc);
     }
 
@@ -98,18 +91,18 @@
     int minValue = Integer.MAX_VALUE;
     int maxValue = Integer.MIN_VALUE;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document doc = w.newDocument();
       int num = random().nextInt();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new IntField("field", num, Field.Store.NO));
+      doc.addInt("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.getMinInt(terms));
-    assertEquals(maxValue, NumericUtils.getMaxInt(terms));
+    assertEquals(minValue, NumericUtils.bytesToInt(terms.getMin()));
+    assertEquals(maxValue, NumericUtils.bytesToInt(terms.getMax()));
 
     r.close();
     w.close();
@@ -123,19 +116,19 @@
     long minValue = Long.MAX_VALUE;
     long maxValue = Long.MIN_VALUE;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document doc = w.newDocument();
       long num = random().nextLong();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new LongField("field", num, Field.Store.NO));
+      doc.addLong("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
 
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.getMinLong(terms));
-    assertEquals(maxValue, NumericUtils.getMaxLong(terms));
+    assertEquals(minValue, NumericUtils.bytesToLong(terms.getMin()));
+    assertEquals(maxValue, NumericUtils.bytesToLong(terms.getMax()));
 
     r.close();
     w.close();
@@ -149,18 +142,18 @@
     float minValue = Float.POSITIVE_INFINITY;
     float maxValue = Float.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document doc = w.newDocument();
       float num = random().nextFloat();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new FloatField("field", num, Field.Store.NO));
+      doc.addFloat("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)), 0.0f);
-    assertEquals(maxValue, NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)), 0.0f);
+    assertEquals(minValue, NumericUtils.bytesToFloat(terms.getMin()), 0.0f);
+    assertEquals(maxValue, NumericUtils.bytesToFloat(terms.getMax()), 0.0f);
 
     r.close();
     w.close();
@@ -174,11 +167,11 @@
     double minValue = Double.POSITIVE_INFINITY;
     double maxValue = Double.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document doc = w.newDocument();
       double num = random().nextDouble();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new DoubleField("field", num, Field.Store.NO));
+      doc.addDouble("field", num);
       w.addDocument(doc);
     }
     
@@ -186,8 +179,8 @@
 
     Terms terms = MultiFields.getTerms(r, "field");
 
-    assertEquals(minValue, NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)), 0.0);
-    assertEquals(maxValue, NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)), 0.0);
+    assertEquals(minValue, NumericUtils.bytesToDouble(terms.getMin()), 0.0);
+    assertEquals(maxValue, NumericUtils.bytesToDouble(terms.getMax()), 0.0);
 
     r.close();
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
index 0644f0f..839f1f3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
@@ -22,15 +22,13 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.automaton.Automata;
 import org.apache.lucene.util.automaton.Automaton;
@@ -42,11 +40,11 @@
 
   public void test() throws Exception {
     Random random = new Random(random().nextLong());
-    final LineFileDocs docs = new LineFileDocs(random, true);
     final Directory d = newDirectory();
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
     final RandomIndexWriter w = new RandomIndexWriter(random(), d, analyzer);
+    final LineFileDocs docs = new LineFileDocs(w.w, random);
     final int numDocs = atLeast(10);
     for(int docCount=0;docCount<numDocs;docCount++) {
       w.addDocument(docs.nextDoc());
@@ -157,14 +155,13 @@
   }
 
   private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
-    Document doc = new Document();
-    doc.add(new IntField("id", id, Field.Store.YES));
-    doc.add(new NumericDocValuesField("id", id));
+    Document doc = w.newDocument();
+    doc.addUniqueInt("id", id);
     if (VERBOSE) {
       System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
     }
     for (String s2 : terms) {
-      doc.add(newStringField("f", s2, Field.Store.NO));
+      doc.addAtom("f", s2);
       termToID.put(new BytesRef(s2), id);
     }
     w.addDocument(doc);
@@ -184,6 +181,8 @@
   public void testIntersectRandom() throws IOException {
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("f");
 
     final int numTerms = atLeast(300);
     //final int numTerms = 50;
@@ -357,9 +356,8 @@
 
     final RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
     for(String term : terms) {
-      Document doc = new Document();
-      Field f = newStringField(FIELD, term, Field.Store.NO);
-      doc.add(f);
+      Document doc = w.newDocument();
+      doc.addAtom(FIELD, term);
       w.addDocument(doc);
     }
     if (r != null) {
@@ -496,10 +494,10 @@
   public void testZeroTerms() throws Exception {
     d = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(newTextField("field", "one two three", Field.Store.NO));
-    doc = new Document();
-    doc.add(newTextField("field2", "one two three", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "one two three");
+    doc = w.newDocument();
+    doc.addLargeText("field2", "one two three");
     w.addDocument(doc);
     w.commit();
     w.deleteDocuments(new Term("field", "one"));
@@ -719,16 +717,16 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newTextField("field", "aaa", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "aaa");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "bbb", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "bbb");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "ccc", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "ccc");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -769,20 +767,20 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("field", "abc", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "abc");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "abd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "abd");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "acd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "acd");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "bcd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "bcd");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -826,17 +824,21 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("field", "", Field.Store.NO));
-    doc.add(newStringField("field", "abc", Field.Store.NO));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
+
+    Document doc = w.newDocument();
+    doc.addAtom("field", "");
+    doc.addAtom("field", "abc");
     w.addDocument(doc);
 
-    doc = new Document();
+    doc = w.newDocument();
     // add empty string to both documents, so that singletonDocID == -1.
     // For a FST-based term dict, we'll expect to see the first arc is 
     // flaged with HAS_FINAL_OUTPUT
-    doc.add(newStringField("field", "abc", Field.Store.NO));
-    doc.add(newStringField("field", "", Field.Store.NO));
+    doc.addAtom("field", "abc");
+    doc.addAtom("field", "");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -893,8 +895,8 @@
       terms.add(prefix + TestUtil.randomRealisticUnicodeString(random(), 1, 20));
     }
     for(String term : terms) {
-      Document doc = new Document();
-      doc.add(newStringField("id", term, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addUniqueAtom("id", term);
       w.addDocument(doc);
     }
     IndexReader r = w.getReader();
@@ -933,8 +935,8 @@
         int docID = docsEnum.nextDoc();
         assertTrue(docID != DocsEnum.NO_MORE_DOCS);
         assertEquals(docID, pkLookup.lookup(termBytesRef));
-        StoredDocument doc = r.document(docID);
-        assertEquals(term, doc.get("id"));
+        Document doc = r.document(docID);
+        assertEquals(term, doc.getString("id"));
 
         if (random().nextInt(7) == 1) {
           termsEnum.next();
@@ -978,8 +980,8 @@
       IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
       iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
       RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-      Document doc = new Document();
-      doc.add(newTextField("field", sb.toString(), Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("field", sb.toString());
       w.addDocument(doc);
       IndexReader r = w.getReader();
       assertEquals(1, r.leaves().size());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
index 32afd40..2ea742c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
@@ -26,7 +26,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.AutomatonQuery;
 import org.apache.lucene.search.CheckHits;
@@ -56,15 +55,13 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
             .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    Document doc = new Document();
-    Field field = newStringField("field", "", Field.Store.YES);
-    doc.add(field);
     terms = new TreeSet<>();
  
     int num = atLeast(200);
     for (int i = 0; i < num; i++) {
       String s = TestUtil.randomUnicodeString(random());
-      field.setStringValue(s);
+      Document doc = writer.newDocument();
+      doc.addAtom("field", s);
       terms.add(new BytesRef(s));
       writer.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java
index 6c8235d..74b6a1a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java
@@ -17,21 +17,18 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.util.English;
+import java.util.Random;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.BeforeClass;
 
-import java.util.Random;
-
 public class TestThreadedForceMerge extends LuceneTestCase {
 
   private static Analyzer ANALYZER;
@@ -69,13 +66,10 @@
 
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(1000);
 
-      final FieldType customType = new FieldType(StringField.TYPE_STORED);
-      customType.setOmitNorms(true);
-      
       for(int i=0;i<200;i++) {
-        Document d = new Document();
-        d.add(newField("id", Integer.toString(i), customType));
-        d.add(newField("contents", English.intToEnglish(i), customType));
+        Document d = writer.newDocument();
+        d.addAtom("id", Integer.toString(i));
+        d.addAtom("contents", English.intToEnglish(i));
         writer.addDocument(d);
       }
 
@@ -93,13 +87,14 @@
               for(int j=0;j<NUM_ITER2;j++) {
                 writerFinal.forceMerge(1, false);
                 for(int k=0;k<17*(1+iFinal);k++) {
-                  Document d = new Document();
-                  d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, customType));
-                  d.add(newField("contents", English.intToEnglish(iFinal+k), customType));
+                  Document d = writerFinal.newDocument();
+                  d.addAtom("id", iterFinal + "_" + iFinal + "_" + j + "_" + k);
+                  d.addAtom("contents", English.intToEnglish(iFinal+k));
                   writerFinal.addDocument(d);
                 }
-                for(int k=0;k<9*(1+iFinal);k++)
+                for(int k=0;k<9*(1+iFinal);k++) {
                   writerFinal.deleteDocuments(new Term("id", iterFinal + "_" + iFinal + "_" + j + "_" + k));
+                }
                 writerFinal.forceMerge(1);
               }
             } catch (Throwable t) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
index b237f6d..f35bb88 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
@@ -19,7 +19,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.TestUtil;
 
@@ -40,8 +40,8 @@
     tmp.setForceMergeDeletesPctAllowed(30.0);
     IndexWriter w = new IndexWriter(dir, conf);
     for(int i=0;i<80;i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa " + (i%4), Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("content", "aaa " + (i%4));
       w.addDocument(doc);
     }
     assertEquals(80, w.maxDoc());
@@ -86,8 +86,8 @@
       int maxCount = 0;
       final int numDocs = TestUtil.nextInt(random(), 20, 100);
       for(int i=0;i<numDocs;i++) {
-        Document doc = new Document();
-        doc.add(newTextField("content", "aaa " + (i%4), Field.Store.NO));
+        Document doc = w.newDocument();
+        doc.addLargeText("content", "aaa " + (i%4));
         w.addDocument(doc);
         int count = w.getSegmentCount();
         maxCount = Math.max(count, maxCount);
@@ -117,13 +117,14 @@
     tmp.setForceMergeDeletesPctAllowed(0.0);
     conf.setMergePolicy(tmp);
 
-    final IndexWriter w = new IndexWriter(dir, conf);
+    IndexWriter w = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
     final int numDocs = atLeast(200);
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + i, Field.Store.NO));
-      doc.add(newTextField("content", "aaa " + i, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
+      doc.addLargeText("content", "aaa " + i);
       w.addDocument(doc);
     }
 
@@ -137,7 +138,7 @@
       System.out.println("\nTEST: delete doc");
     }
 
-    w.deleteDocuments(new Term("id", ""+(42+17)));
+    w.deleteDocuments(fieldTypes.newIntTerm("id", 42+17));
 
     r = w.getReader();
     assertEquals(numDocs, r.maxDoc());
@@ -224,9 +225,10 @@
     iwc.setMaxBufferedDocs(100);
     iwc.setRAMBufferSizeMB(-1);
     IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
     for(int i=0;i<15000*RANDOM_MULTIPLIER;i++) {
-      Document doc = new Document();
-      doc.add(newTextField("id", random().nextLong() + "" + random().nextLong(), Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addAtom("id", random().nextLong() + "" + random().nextLong());
       w.addDocument(doc);
     }
     IndexReader r = DirectoryReader.open(w, true);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
index afc3dfd..8dd5e13 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
@@ -28,7 +28,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.LuceneTestCase;
@@ -43,7 +42,6 @@
 public class TestTransactionRollback extends LuceneTestCase {
 
   private static final String FIELD_RECORD_ID = "record_id";
-  private Directory dir;
 
   //Rolls back index to a chosen ID
   private void rollBackLast(int id) throws Exception {
@@ -96,11 +94,10 @@
     final Bits liveDocs = MultiFields.getLiveDocs(r);
     for (int i = 0; i < r.maxDoc(); i++) {
       if (liveDocs == null || liveDocs.get(i)) {
-        String sval=r.document(i).get(FIELD_RECORD_ID);
-        if(sval!=null) {
-          int val=Integer.parseInt(sval);
+        Integer val = r.document(i).getInt(FIELD_RECORD_ID);
+        if (val != null) {
           assertTrue("Did not expect document #"+val, expecteds.get(val));
-          expecteds.set(val,false);
+          expecteds.set(val, false);
         }
       }
     }
@@ -127,16 +124,16 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    dir = newDirectory();
 
-    //Build index, of records 1 to 100, committing after each batch of 10
-    IndexDeletionPolicy sdp=new KeepAllDeletionPolicy();
-    IndexWriter w=new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                          .setIndexDeletionPolicy(sdp));
+    // Build index, of records 1 to 100, committing after each batch of 10
+    IndexDeletionPolicy sdp = new KeepAllDeletionPolicy();
+    IndexWriter w = new IndexWriter(dir,
+                                    newIndexWriterConfig(new MockAnalyzer(random()))
+                                     .setIndexDeletionPolicy(sdp));
 
     for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) {
-      Document doc=new Document();
-      doc.add(newTextField(FIELD_RECORD_ID, ""+currentRecordId, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addUniqueInt(FIELD_RECORD_ID, currentRecordId);
       w.addDocument(doc);
 
       if (currentRecordId%10 == 0) {
@@ -150,12 +147,6 @@
     w.close();
   }
   
-  @Override
-  public void tearDown() throws Exception {
-    dir.close();
-    super.tearDown();
-  }
-
   // Rolls back to previous commit point
   class RollbackDeletionPolicy extends IndexDeletionPolicy {
     private int rollbackPoint;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java
index 8228e27..79c5588 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java
@@ -21,9 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
@@ -152,20 +150,21 @@
 
     public void update(IndexWriter writer) throws IOException {
       // Add 10 docs:
-      FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("id");
+
       for(int j=0; j<10; j++) {
-        Document d = new Document();
+        Document d = writer.newDocument();
         int n = random().nextInt();
-        d.add(newField("id", Integer.toString(nextID++), customType));
-        d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
+        d.addUniqueInt("id", nextID++);
+        d.addLargeText("contents", English.intToEnglish(n));
         writer.addDocument(d);
       }
 
       // Delete 5 docs:
       int deleteID = nextID-1;
       for(int j=0; j<5; j++) {
-        writer.deleteDocuments(new Term("id", ""+deleteID));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", deleteID));
         deleteID -= 2;
       }
     }
@@ -214,9 +213,9 @@
   public void initIndex(Directory dir) throws Throwable {
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
     for(int j=0; j<7; j++) {
-      Document d = new Document();
+      Document d = writer.newDocument();
       int n = random().nextInt();
-      d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
+      d.addLargeText("contents", English.intToEnglish(n));
       writer.addDocument(d);
     }
     writer.close();
@@ -259,8 +258,9 @@
     for(int i=0;i<numThread;i++)
       threads[i].join();
 
-    for(int i=0;i<numThread;i++)
+    for(int i=0;i<numThread;i++) {
       assertTrue(!threads[i].failed);
+    }
     dir1.close();
     dir2.close();
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
index 4ad2475..87edd87 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
@@ -21,14 +21,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.search.SearcherFactory;
 import org.apache.lucene.search.SearcherManager;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
@@ -58,8 +56,8 @@
     IndexWriter writer = getWriter(directory);
 
     for (int i = 0; i < 10; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("foo", String.valueOf(i), Store.YES));
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("foo", i);
       writer.addDocument(doc);
     }
 
@@ -81,11 +79,11 @@
                                                               new SearcherFactory());
 
     TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
+    FieldTypes fieldTypes = mgrWriter.getFieldTypes();
 
     IndexSearcher searcher = mgr.acquire();
 
-    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
-                                      100);
+    TopDocs topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
     assertEquals(1, topDocs.totalHits);
 
     long result;
@@ -112,7 +110,7 @@
 
     searcher = mgr.acquire();
 
-    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+    topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
 
     assertEquals(0, topDocs.totalHits);
   }
@@ -123,6 +121,7 @@
     Directory directory = createIndex();
 
     IndexWriter writer = getWriter(directory);
+    FieldTypes fieldTypes = writer.getFieldTypes();
 
     ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
                                                               true,
@@ -130,8 +129,7 @@
 
     IndexSearcher searcher = mgr.acquire();
 
-    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
-                                      100);
+    TopDocs topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
     assertEquals(1, topDocs.totalHits);
 
     TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
@@ -148,7 +146,7 @@
 
     searcher = mgr.acquire();
 
-    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+    topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
 
     assertEquals(0, topDocs.totalHits);
 
@@ -156,7 +154,7 @@
 
     searcher = new IndexSearcher(DirectoryReader.open(directory));
 
-    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+    topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
 
     assertEquals(0, topDocs.totalHits);
 
@@ -168,6 +166,7 @@
     Directory directory = createIndex();
 
     IndexWriter writer = getWriter(directory);
+    FieldTypes fieldTypes = writer.getFieldTypes();
 
     ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
                                                               true,
@@ -175,13 +174,11 @@
 
     IndexSearcher searcher = mgr.acquire();
 
-    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
-                                      100);
+    TopDocs topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
     assertEquals(1, topDocs.totalHits);
 
     TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
-    long result = mgrWriter.deleteDocuments(new TermQuery(new Term("foo",
-                                                                   "0")));
+    long result = mgrWriter.deleteDocuments(fieldTypes.newExactIntQuery("foo", 0));
 
     assertEquals(1, result);
 
@@ -193,7 +190,7 @@
 
     searcher = mgr.acquire();
 
-    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+    topDocs = searcher.search(fieldTypes.newExactIntQuery("foo", 0), 100);
 
     assertEquals(0, topDocs.totalHits);
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java
new file mode 100644
index 0000000..8432e0d
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java
@@ -0,0 +1,563 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.document.LowSchemaField;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestUniqueFields extends LuceneTestCase {
+
+  public void testBasic1() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    shouldFail(() -> w.addDocument(doc),
+               "field \"field\" must be unique, but value=[6f 6e 65] appears more than once");
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, hitCount(s, fieldTypes.newExactBinaryQuery("field", new BytesRef("one"))));
+    assertEquals(1, r.numDocs());
+    r.close();
+    w.close();
+  }
+
+  public void testBasic1Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    shouldFail(() -> w.addDocument(doc),
+               "field \"field\" must be unique, but value=[80 0 0 11] appears more than once");
+    DirectoryReader r = DirectoryReader.open(w, true);
+    assertEquals(1, r.numDocs());
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, hitCount(s, fieldTypes.newExactIntQuery("field", 17)));
+    r.close();
+    w.close();
+  }
+
+  public void testBasic2() throws Exception {
+    IndexWriter w = newIndexWriter();
+    final Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    shouldFail(() -> w.addDocument(doc),
+               "field \"field\" must be unique, but value=[6f 6e 65] appears more than once");
+
+    Document doc2 = w.newDocument();
+    doc2.addUniqueAtom("field", new BytesRef("two"));
+    w.addDocument(doc2);
+
+    mgr.maybeRefresh();
+
+    r = mgr.acquire();
+    try {
+      assertEquals(2, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testBasic2Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    final Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    shouldFail(() -> w.addDocument(doc),
+               "field \"field\" must be unique, but value=[80 0 0 11] appears more than once");
+
+    Document doc2 = w.newDocument();
+    doc2.addUniqueInt("field", 22);
+    w.addDocument(doc2);
+
+    mgr.maybeRefresh();
+
+    r = mgr.acquire();
+    try {
+      assertEquals(2, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testExcInvalidChange1() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueAtom("field", new BytesRef("two")),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+  }
+
+  public void testExcInvalidChange1Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueInt("field", 22),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+  }
+
+  public void testExcInvalidChange2() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addAtom("field", new BytesRef("two")),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+  }
+
+  public void testExcInvalidChange2Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("field", 22),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+  }
+
+  public void testExcInvalidChange3() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addAtom("field", "one");
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueAtom("field", "two"),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+  }
+
+  public void testExcInvalidChange3Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueInt("field", 22),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+  }
+
+  public void testExcInvalidChange4() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", "one");
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addAtom("field", "two"),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+  }
+
+  public void testExcInvalidChange4Int() throws Exception {
+    IndexWriter w = newIndexWriter();
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("field", 22),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+  }
+
+  public void testDeletes() throws Exception {
+    IndexWriter w = newIndexWriter();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.deleteDocuments(new Term("field", new BytesRef("one")));
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testDeletesInt() throws Exception {
+    IndexWriter w = newIndexWriter();
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.deleteDocuments(fieldTypes.newIntTerm("field", 17));
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testUpdates() throws Exception {
+    IndexWriter w = newIndexWriter();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(new Term("field", new BytesRef("one")), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(new Term("field", new BytesRef("one")), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testUpdatesInt() throws Exception {
+    IndexWriter w = newIndexWriter();
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(fieldTypes.newIntTerm("field", 17), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(fieldTypes.newIntTerm("field", 17), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+  }
+
+  public void testRandom() throws Exception {
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Set<BytesRef> terms = new HashSet<>();
+    final int numTerms = atLeast(1000);
+    while (terms.size() < numTerms) {
+      terms.add(new BytesRef(TestUtil.randomRealisticUnicodeString(random())));
+    }
+    final List<BytesRef> termsList = new ArrayList<>(terms);
+    final CountDownLatch startingGun = new CountDownLatch(1);
+    Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 5)];
+    for(int i=0;i<threads.length;i++) {
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              startingGun.await();
+
+              // First add randomly for a while:
+              for(int iter=0;iter<3*numTerms;iter++) {
+                BytesRef term = termsList.get(random().nextInt(termsList.size()));
+                if (random().nextInt(4) == 1) {
+                  w.deleteDocuments(new Term("field", term));
+                } else {
+                  Document doc = w.newDocument();
+                  doc.addUniqueAtom("field", term);
+                  if (random().nextBoolean()) {
+                    w.updateDocument(new Term("field", term), doc);
+                  } else {
+                    try {
+                      w.addDocument(doc);
+                    } catch (NotUniqueException nue) {
+                      // OK
+                    }
+                  }
+                }
+              }
+
+              // Then add every single term, so we know all will be added:
+              for(BytesRef term : termsList) {
+                Document doc = w.newDocument();
+                doc.addUniqueAtom("field", term);
+                if (random().nextBoolean()) {
+                  w.updateDocument(new Term("field", term), doc);
+                } else {
+                  try {
+                    w.addDocument(doc);
+                  } catch (NotUniqueException nue) {
+                    // OK
+                  }
+                }
+              }
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+      threads[i].start();
+    }
+    startingGun.countDown();
+    for(Thread thread : threads) {
+      thread.join();
+    }
+    w.forceMerge(1);
+    IndexReader r = w.getReader();
+    assertEquals(terms.size(), r.maxDoc());
+    assertEquals(terms.size(), MultiFields.getTerms(r, "field").size());
+    r.close();
+    w.close();
+  }
+
+  public void testRandomInt() throws Exception {
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    final FieldTypes fieldTypes = w.getFieldTypes();
+
+    Set<Integer> terms = new HashSet<>();
+    final int numTerms = atLeast(1000);
+    while (terms.size() < numTerms) {
+      terms.add(random().nextInt());
+    }
+
+    final List<Integer> termsList = new ArrayList<>(terms);
+    final CountDownLatch startingGun = new CountDownLatch(1);
+    Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 5)];
+    for(int i=0;i<threads.length;i++) {
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              startingGun.await();
+
+              // First add randomly for a while:
+              for(int iter=0;iter<3*numTerms;iter++) {
+                Integer term = termsList.get(random().nextInt(termsList.size()));
+                if (iter > 0 && random().nextInt(4) == 1) {
+                  w.deleteDocuments(fieldTypes.newIntTerm("field", term.intValue()));
+                } else {
+                  Document doc = w.newDocument();
+                  doc.addUniqueInt("field", term.intValue());
+                  if (random().nextBoolean()) {
+                    w.updateDocument(fieldTypes.newIntTerm("field", term.intValue()), doc);
+                  } else {
+                    try {
+                      w.addDocument(doc);
+                    } catch (NotUniqueException nue) {
+                      // OK
+                    }
+                  }
+                }
+              }
+
+              // Then add every single term, so we know all will be added:
+              for(Integer term : termsList) {
+                Document doc = w.newDocument();
+                doc.addUniqueInt("field", term.intValue());
+                if (random().nextBoolean()) {
+                  w.updateDocument(fieldTypes.newIntTerm("field", term.intValue()), doc);
+                } else {
+                  try {
+                    w.addDocument(doc);
+                  } catch (NotUniqueException nue) {
+                    // OK
+                  }
+                }
+              }
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+      threads[i].start();
+    }
+    startingGun.countDown();
+    for(Thread thread : threads) {
+      thread.join();
+    }
+    w.forceMerge(1);
+    IndexReader r = w.getReader();
+    assertEquals(terms.size(), r.maxDoc());
+    assertEquals(terms.size(), MultiFields.getTerms(r, "field").size());
+    r.close();
+    w.close();
+  }
+
+  /** Make sure CheckIndex detects violation of unique constraint, and -exorcise properly repairs it. */
+  public void testExcCheckIndex() throws Exception {
+    IndexWriter w = newIndexWriter();
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+    Document doc2 = w.newDocument();
+    doc2.addUniqueAtom("field", "one");
+
+    w.addDocument(doc2);
+    mgr.maybeRefresh();
+
+    try {
+      w.addDocument(doc2);
+      fail("did not hit exception");
+    } catch (NotUniqueException nue) {
+      // expected
+    }
+    DirectoryReader r = mgr.acquire();
+    TestUtil.addIndexesSlowly(w, r);
+    r.close();
+    w.close();
+
+    try (CheckIndex checker = new CheckIndex(dir)) {
+        checker.setCrossCheckTermVectors(true);
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+        checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
+        CheckIndex.Status status = checker.checkIndex(null);
+        assertFalse(status.clean);
+        assertEquals(1, status.nonUniqueCount);
+        checker.exorciseIndex(status);
+        assertTrue(bos.toString(IOUtils.UTF_8).contains("field=\"field\" is supposed to be unique, but isn't: e.g. term=[6f 6e 65] matches both docID=0 and docID=2; total 1 non-unique documents would be deleted"));
+      }
+    
+    r = DirectoryReader.open(dir);
+    assertEquals(1, r.numDocs());
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, hitCount(s, new TermQuery(new Term("field", "one"))));
+    r.close();
+  }
+
+  public void testMultiValuedUnique() throws Exception {
+    IndexWriter w = newIndexWriter();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", "foo");
+    doc.addUniqueAtom("field", "bar");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "foo"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newExactStringQuery("field", "bar"), 1).totalHits);
+    r.close();
+    w.close();
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
index eab5f11..21d058f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
@@ -24,7 +24,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.Similarity;
@@ -50,11 +49,9 @@
     config.setMergePolicy(newLogMergePolicy());
     config.setSimilarity(new TestSimilarity());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    Document doc = new Document();
-    Field foo = newTextField("foo", "", Field.Store.NO);
-    doc.add(foo);
     for (int i = 0; i < 100; i++) {
-      foo.setStringValue(addValue());
+      Document doc = writer.newDocument();
+      doc.addLargeText("foo", addValue());
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
index 46d2bfd..291566b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -22,19 +22,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -121,40 +113,12 @@
   private static IndexReader build(Random random, TestIndex index) throws IOException {
     /* build an index */
     
-    Document doc = new Document();
-    Field idField = newStringField(random, "id", "", Field.Store.YES);
-    Field idDVField = new SortedDocValuesField("id", new BytesRef());
-    Field intIdField = new IntField("id_int", 0, Store.YES);
-    Field intDVField = new NumericDocValuesField("id_int", 0);
-    Field floatIdField = new FloatField("id_float", 0, Store.YES);
-    Field floatDVField = new NumericDocValuesField("id_float", 0);
-    Field longIdField = new LongField("id_long", 0, Store.YES);
-    Field longDVField = new NumericDocValuesField("id_long", 0);
-    Field doubleIdField = new DoubleField("id_double", 0, Store.YES);
-    Field doubleDVField = new NumericDocValuesField("id_double", 0);
-    Field randField = newStringField(random, "rand", "", Field.Store.YES);
-    Field randDVField = new SortedDocValuesField("rand", new BytesRef());
-    Field bodyField = newStringField(random, "body", "", Field.Store.NO);
-    Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
-    doc.add(idField);
-    doc.add(idDVField);
-    doc.add(intIdField);
-    doc.add(intDVField);
-    doc.add(floatIdField);
-    doc.add(floatDVField);
-    doc.add(longIdField);
-    doc.add(longDVField);
-    doc.add(doubleIdField);
-    doc.add(doubleDVField);
-    doc.add(randField);
-    doc.add(randDVField);
-    doc.add(bodyField);
-    doc.add(bodyDVField);
-
     RandomIndexWriter writer = new RandomIndexWriter(random, index.index, 
                                                      newIndexWriterConfig(random, new MockAnalyzer(random))
                                                      .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
     TestUtil.reduceOpenFiles(writer.w);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableSorting("id");
 
     while(true) {
 
@@ -162,18 +126,13 @@
       int maxCount = 0;
 
       for (int d = minId; d <= maxId; d++) {
-        idField.setStringValue(pad(d));
-        idDVField.setBytesValue(new BytesRef(pad(d)));
-        intIdField.setIntValue(d);
-        intDVField.setLongValue(d);
-        floatIdField.setFloatValue(d);
-        floatDVField.setLongValue(Float.floatToRawIntBits(d));
-        longIdField.setLongValue(d);
-        longDVField.setLongValue(d);
-        doubleIdField.setDoubleValue(d);
-        doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
-        int r = index.allowNegativeRandomInts ? random.nextInt() : random
-          .nextInt(Integer.MAX_VALUE);
+        Document doc = writer.newDocument();
+        doc.addAtom("id", pad(d));
+        doc.addInt("id_int", d);
+        doc.addFloat("id_float", d);
+        doc.addLong("id_long", d);
+        doc.addDouble("id_double", d);
+        int r = index.allowNegativeRandomInts ? random.nextInt() : random.nextInt(Integer.MAX_VALUE);
         if (index.maxR < r) {
           index.maxR = r;
           maxCount = 1;
@@ -187,10 +146,8 @@
         } else if (r == index.minR) {
           minCount++;
         }
-        randField.setStringValue(pad(r));
-        randDVField.setBytesValue(new BytesRef(pad(r)));
-        bodyField.setStringValue("body");
-        bodyDVField.setBytesValue(new BytesRef("body"));
+        doc.addAtom("rand", pad(r));
+        doc.addShortText("body", "body");
         writer.addDocument(doc);
       }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java b/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java
index 427888b..b14ce76 100644
--- a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java
@@ -16,14 +16,12 @@
  */
 package org.apache.lucene.search;
 
-
 import java.io.IOException;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -33,7 +31,6 @@
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
 
-
 public class FuzzyTermOnShortTermsTest extends LuceneTestCase {
    private final static String FIELD = "field";
    
@@ -87,10 +84,9 @@
           .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)).setMergePolicy(newLogMergePolicy()));
 
       for (String s : vals){
-         Document d = new Document();
-         d.add(newTextField(FIELD, s, Field.Store.YES));
+         Document d = writer.newDocument();
+         d.addLargeText(FIELD, s);
          writer.addDocument(d);
-            
       }
       writer.close();
       return directory;
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java b/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java
similarity index 89%
rename from lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java
rename to lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java
index 7f0da76..ce6a8db 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.queries;
+package org.apache.lucene.search;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,10 +17,14 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
@@ -33,14 +37,10 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
 public class TermFilterTest extends LuceneTestCase {
 
   public void testCachability() throws Exception {
@@ -56,8 +56,8 @@
     String fieldName = "field1";
     Directory rd = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), rd);
-    Document doc = new Document();
-    doc.add(newStringField(fieldName, "value1", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom(fieldName, "value1");
     w.addDocument(doc);
     IndexReader reader = SlowCompositeReaderWrapper.wrap(w.getReader());
     assertTrue(reader.getContext() instanceof LeafReaderContext);
@@ -89,8 +89,8 @@
       String field = "field" + i;
       String string = TestUtil.randomRealisticUnicodeString(random());
       terms.add(new Term(field, string));
-      Document doc = new Document();
-      doc.add(newStringField(field, string, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom(field, string);
       w.addDocument(doc);
     }
     IndexReader reader = w.getReader();
@@ -179,4 +179,18 @@
     return new TermFilter(term);
   }
 
+  public void testAllMatches() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    int numDocs = atLeast(10000);
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", "foo");
+      w.addDocument(doc);
+    }
+    IndexReader r = w.getReader();
+    IndexSearcher s = newSearcher(r);
+    assertEquals(numDocs, s.search(new ConstantScoreQuery(new TermFilter(new Term("field", "foo"))), 1).totalHits);
+    IOUtils.close(r, w, dir);
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
index 9f46ab2..6da3280 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
@@ -24,7 +24,6 @@
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -56,19 +55,26 @@
     super.setUp();
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    Field titleField = newTextField("title", "some title", Field.Store.NO);
-    Field field = newTextField(FN, "this is document one 2345", Field.Store.NO);
-    Field footerField = newTextField("footer", "a footer", Field.Store.NO);
-    doc.add(titleField);
-    doc.add(field);
-    doc.add(footerField);
+
+    Document doc = writer.newDocument();
+    doc.addLargeText(FN, "this is document one 2345");
+    doc.addShortText("title", "some title");
+    doc.addShortText("footer", "a footer");
     writer.addDocument(doc);
-    field.setStringValue("some text from doc two a short piece 5678.91");
+
+    doc = writer.newDocument();
+    doc.addLargeText(FN, "some text from doc two a short piece 5678.91");
+    doc.addShortText("title", "some title");
+    doc.addShortText("footer", "a footer");
     writer.addDocument(doc);
-    field.setStringValue("doc three has some different stuff"
+
+    doc = writer.newDocument();
+    doc.addLargeText(FN, "doc three has some different stuff"
         + " with numbers 1234 5678.9 and letter b");
+    doc.addShortText("title", "some title");
+    doc.addShortText("footer", "a footer");
     writer.addDocument(doc);
+
     reader = writer.getReader();
     searcher = newSearcher(reader);
     writer.close();
@@ -117,8 +123,8 @@
     assertAutomatonHits(2, Automata.makeString("doc"));
     assertAutomatonHits(1, Automata.makeChar('a'));
     assertAutomatonHits(2, Automata.makeCharRange('a', 'b'));
-    assertAutomatonHits(2, Automata.makeInterval(1233, 2346, 0));
-    assertAutomatonHits(1, Automata.makeInterval(0, 2000, 0));
+    assertAutomatonHits(2, Automata.makeDecimalInterval(1233, 2346, 0));
+    assertAutomatonHits(1, Automata.makeDecimalInterval(0, 2000, 0));
     assertAutomatonHits(2, Operations.union(Automata.makeChar('a'),
         Automata.makeChar('b')));
     assertAutomatonHits(0, Operations.intersection(Automata
@@ -194,8 +200,6 @@
     Automaton pfx = Automata.makeString("do");
     Automaton prefixAutomaton = Operations.concatenate(pfx, Automata.makeAnyString());
     AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), prefixAutomaton);
-    Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
-    assertTrue(aq.getTermsEnum(terms) instanceof PrefixTermsEnum);
     assertEquals(3, automatonQueryNrHits(aq));
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
index 6553ea3..72c779a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -46,38 +45,28 @@
     super.setUp();
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    Field titleField = newTextField("title", "some title", Field.Store.NO);
-    Field field = newTextField(FN, "", Field.Store.NO);
-    Field footerField = newTextField("footer", "a footer", Field.Store.NO);
-    doc.add(titleField);
-    doc.add(field);
-    doc.add(footerField);
-    field.setStringValue("\uD866\uDF05abcdef");
-    writer.addDocument(doc);
-    field.setStringValue("\uD866\uDF06ghijkl");
-    writer.addDocument(doc);
-    // this sorts before the previous two in UTF-8/UTF-32, but after in UTF-16!!!
-    field.setStringValue("\uFB94mnopqr"); 
-    writer.addDocument(doc);
-    field.setStringValue("\uFB95stuvwx"); // this one too.
-    writer.addDocument(doc);
-    field.setStringValue("a\uFFFCbc");
-    writer.addDocument(doc);
-    field.setStringValue("a\uFFFDbc");
-    writer.addDocument(doc);
-    field.setStringValue("a\uFFFEbc");
-    writer.addDocument(doc);
-    field.setStringValue("a\uFB94bc");
-    writer.addDocument(doc);
-    field.setStringValue("bacadaba");
-    writer.addDocument(doc);
-    field.setStringValue("\uFFFD");
-    writer.addDocument(doc);
-    field.setStringValue("\uFFFD\uD866\uDF05");
-    writer.addDocument(doc);
-    field.setStringValue("\uFFFD\uFFFD");
-    writer.addDocument(doc);
+
+    for(String body : new String[] {
+        "\uD866\uDF05abcdef",
+        "\uD866\uDF06ghijkl",
+        "\uFB94mnopqr", // this sorts before the previous two in UTF-8/UTF-32, but after in UTF-16!!!
+        "\uFB95stuvwx", // this one too.
+        "a\uFFFCbc",
+        "a\uFFFDbc",
+        "a\uFFFEbc",
+        "a\uFB94bc",
+        "bacadaba",
+        "\uFFFD",
+        "\uFFFD\uD866\uDF05",
+        "\uFFFD\uFFFD" }) {
+
+      Document doc = writer.newDocument();
+      doc.addLargeText(FN, body);
+      doc.addShortText("title", "some title");
+      doc.addShortText("footer", "a footer");
+      writer.addDocument(doc);
+    }
+
     reader = writer.getReader();
     searcher = newSearcher(reader);
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
index 9b2e4f0..fb4f134 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -22,11 +22,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
@@ -59,8 +58,8 @@
     directory = newDirectory();
     RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     for (int i = 0; i < docFields.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField(field, docFields[i], Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText(field, docFields[i]);
       writer.addDocument(doc);
     }
     writer.close();
@@ -93,13 +92,13 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), dir2, 
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    Document doc = new Document();
-    doc.add(newTextField("field2", "xxx", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field2", "xxx");
     for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
       w.addDocument(doc);
     }
-    doc = new Document();
-    doc.add(newTextField("field2", "big bad bug", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field2", "big bad bug");
     for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
       w.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanCoord.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanCoord.java
index 0265af9..d4ff9b5 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanCoord.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanCoord.java
@@ -21,13 +21,12 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
@@ -48,22 +47,24 @@
   public static void beforeClass() throws Exception {
     dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
     // we only add two documents for testing:
     // the first document has 3 terms A,B,C (for positive matching). we test scores against this.
     // the second document has 3 negative terms 1,2,3 that exist in the segment (for non-null scorers)
     // to test terms that don't exist (null scorers), we use X,Y,Z
     
-    Document doc = new Document();
-    doc.add(new StringField("field", "A", Field.Store.NO));
-    doc.add(new StringField("field", "B", Field.Store.NO));
-    doc.add(new StringField("field", "C", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addAtom("field", "A");
+    doc.addAtom("field", "B");
+    doc.addAtom("field", "C");
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new StringField("field", "1", Field.Store.NO));
-    doc.add(new StringField("field", "2", Field.Store.NO));
-    doc.add(new StringField("field", "3", Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addAtom("field", "1");
+    doc.addAtom("field", "2");
+    doc.addAtom("field", "3");
     iw.addDocument(doc);
 
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
index 083075e..a13b2dd 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
@@ -17,305 +17,302 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.similarities.DefaultSimilarity;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.store.Directory;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
 import java.text.DecimalFormat;
 import java.text.DecimalFormatSymbols;
 import java.util.Locale;
 import java.util.Random;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
 /** Test that BooleanQuery.setMinimumNumberShouldMatch works.
  */
 public class TestBooleanMinShouldMatch extends LuceneTestCase {
 
-    private static Directory index;
-    private static IndexReader r;
-    private static IndexSearcher s;
+  private static Directory index;
+  private static IndexReader r;
+  private static IndexSearcher s;
 
-    @BeforeClass
-    public static void beforeClass() throws Exception {
-        String[] data = new String [] {
-            "A 1 2 3 4 5 6",
-            "Z       4 5 6",
-            null,
-            "B   2   4 5 6",
-            "Y     3   5 6",
-            null,
-            "C     3     6",
-            "X       4 5 6"
-        };
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    String[] data = new String [] {
+      "A 1 2 3 4 5 6",
+      "Z       4 5 6",
+      null,
+      "B   2   4 5 6",
+      "Y     3   5 6",
+      null,
+      "C     3     6",
+      "X       4 5 6"
+    };
 
-        index = newDirectory();
-        RandomIndexWriter w = new RandomIndexWriter(random(), index);
+    index = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), index);
 
-        for (int i = 0; i < data.length; i++) {
-            Document doc = new Document();
-            doc.add(newStringField("id", String.valueOf(i), Field.Store.YES));//Field.Keyword("id",String.valueOf(i)));
-            doc.add(newStringField("all", "all", Field.Store.YES));//Field.Keyword("all","all"));
-            if (null != data[i]) {
-                doc.add(newTextField("data", data[i], Field.Store.YES));//Field.Text("data",data[i]));
-            }
-            w.addDocument(doc);
-        }
-
-        r = w.getReader();
-        s = newSearcher(r);
-        w.close();
-//System.out.println("Set up " + getName());
+    for (int i = 0; i < data.length; i++) {
+      Document doc = w.newDocument();
+      doc.addUniqueAtom("id", String.valueOf(i));//Field.Keyword("id",String.valueOf(i)));
+      doc.addLargeText("all", "all");//Field.Keyword("all","all"));
+      if (null != data[i]) {
+        doc.addLargeText("data", data[i]);//Field.Text("data",data[i]));
+      }
+      w.addDocument(doc);
     }
+
+    r = w.getReader();
+    s = newSearcher(r);
+    w.close();
+    //System.out.println("Set up " + getName());
+  }
     
-    @AfterClass
-    public static void afterClass() throws Exception {
-      s = null;
-      r.close();
-      r = null;
-      index.close();
-      index = null;
+  @AfterClass
+  public static void afterClass() throws Exception {
+    s = null;
+    r.close();
+    r = null;
+    index.close();
+    index = null;
+  }
+
+  public void verifyNrHits(Query q, int expected) throws Exception {
+    // bs1
+    ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+    if (expected != h.length) {
+      printHits(getTestName(), h, s);
     }
-
-
-    public void verifyNrHits(Query q, int expected) throws Exception {
-        // bs1
-        ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-        if (expected != h.length) {
-            printHits(getTestName(), h, s);
-        }
-        assertEquals("result count", expected, h.length);
-        //System.out.println("TEST: now check");
-        // bs2
-        TopScoreDocCollector collector = TopScoreDocCollector.create(1000);
-        s.search(q, collector);
-        ScoreDoc[] h2 = collector.topDocs().scoreDocs;
-        if (expected != h2.length) {
-          printHits(getTestName(), h2, s);
-        }
-        assertEquals("result count (bs2)", expected, h2.length);
-
-        QueryUtils.check(random(), q,s);
+    assertEquals("result count", expected, h.length);
+    //System.out.println("TEST: now check");
+    // bs2
+    TopScoreDocCollector collector = TopScoreDocCollector.create(1000);
+    s.search(q, collector);
+    ScoreDoc[] h2 = collector.topDocs().scoreDocs;
+    if (expected != h2.length) {
+      printHits(getTestName(), h2, s);
     }
+    assertEquals("result count (bs2)", expected, h2.length);
 
-    public void testAllOptional() throws Exception {
+    QueryUtils.check(random(), q,s);
+  }
 
-        BooleanQuery q = new BooleanQuery();
-        for (int i = 1; i <=4; i++) {
-            q.add(new TermQuery(new Term("data",""+i)), BooleanClause.Occur.SHOULD);//false, false);
-        }
-        q.setMinimumNumberShouldMatch(2); // match at least two of 4
-        verifyNrHits(q, 2);
+  public void testAllOptional() throws Exception {
+
+    BooleanQuery q = new BooleanQuery();
+    for (int i = 1; i <=4; i++) {
+      q.add(new TermQuery(new Term("data",""+i)), BooleanClause.Occur.SHOULD);//false, false);
     }
+    q.setMinimumNumberShouldMatch(2); // match at least two of 4
+    verifyNrHits(q, 2);
+  }
 
-    public void testOneReqAndSomeOptional() throws Exception {
+  public void testOneReqAndSomeOptional() throws Exception {
 
-        /* one required, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* one required, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+    q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
 
-        verifyNrHits(q, 5);
-    }
+    verifyNrHits(q, 5);
+  }
 
-    public void testSomeReqAndSomeOptional() throws Exception {
+  public void testSomeReqAndSomeOptional() throws Exception {
 
-        /* two required, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* two required, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+    q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
 
-        verifyNrHits(q, 5);
-    }
+    verifyNrHits(q, 5);
+  }
 
-    public void testOneProhibAndSomeOptional() throws Exception {
+  public void testOneProhibAndSomeOptional() throws Exception {
 
-        /* one prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* one prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+    q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testSomeProhibAndSomeOptional() throws Exception {
+  public void testSomeProhibAndSomeOptional() throws Exception {
 
-        /* two prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    /* two prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
 
-        q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
+    q.setMinimumNumberShouldMatch(2); // 2 of 3 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testOneReqOneProhibAndSomeOptional() throws Exception {
+  public void testOneReqOneProhibAndSomeOptional() throws Exception {
 
-        /* one required, one prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);// true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* one required, one prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);// true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+    q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testSomeReqOneProhibAndSomeOptional() throws Exception {
+  public void testSomeReqOneProhibAndSomeOptional() throws Exception {
 
-        /* two required, one prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* two required, one prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+    q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testOneReqSomeProhibAndSomeOptional() throws Exception {
+  public void testOneReqSomeProhibAndSomeOptional() throws Exception {
 
-        /* one required, two prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    /* one required, two prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
 
-        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+    q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testSomeReqSomeProhibAndSomeOptional() throws Exception {
+  public void testSomeReqSomeProhibAndSomeOptional() throws Exception {
 
-        /* two required, two prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    /* two required, two prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
 
-        q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
+    q.setMinimumNumberShouldMatch(3); // 3 of 4 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testMinHigherThenNumOptional() throws Exception {
+  public void testMinHigherThenNumOptional() throws Exception {
 
-        /* two required, two prohibited, some optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    /* two required, two prohibited, some optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all",  "all")), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "5"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "4"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST_NOT);//false, true );
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "1"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "C"  )), BooleanClause.Occur.MUST_NOT);//false, true );
 
-        q.setMinimumNumberShouldMatch(90); // 90 of 4 optional ?!?!?!
+    q.setMinimumNumberShouldMatch(90); // 90 of 4 optional ?!?!?!
 
-        verifyNrHits(q, 0);
-    }
+    verifyNrHits(q, 0);
+  }
 
-    public void testMinEqualToNumOptional() throws Exception {
+  public void testMinEqualToNumOptional() throws Exception {
 
-        /* two required, two optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
+    /* two required, two optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "6"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.SHOULD);//false, false);
 
-        q.setMinimumNumberShouldMatch(2); // 2 of 2 optional 
+    q.setMinimumNumberShouldMatch(2); // 2 of 2 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testOneOptionalEqualToMin() throws Exception {
+  public void testOneOptionalEqualToMin() throws Exception {
 
-        /* two required, one optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
+    /* two required, one optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "3"  )), BooleanClause.Occur.SHOULD);//false, false);
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
 
-        q.setMinimumNumberShouldMatch(1); // 1 of 1 optional 
+    q.setMinimumNumberShouldMatch(1); // 1 of 1 optional 
 
-        verifyNrHits(q, 1);
-    }
+    verifyNrHits(q, 1);
+  }
 
-    public void testNoOptionalButMin() throws Exception {
+  public void testNoOptionalButMin() throws Exception {
 
-        /* two required, no optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
-        q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
+    /* two required, no optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+    q.add(new TermQuery(new Term("data", "2"  )), BooleanClause.Occur.MUST);//true,  false);
 
-        q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
+    q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
 
-        verifyNrHits(q, 0);
-    }
+    verifyNrHits(q, 0);
+  }
 
-    public void testNoOptionalButMin2() throws Exception {
+  public void testNoOptionalButMin2() throws Exception {
 
-        /* one required, no optional */
-        BooleanQuery q = new BooleanQuery();
-        q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
+    /* one required, no optional */
+    BooleanQuery q = new BooleanQuery();
+    q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true,  false);
 
-        q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
+    q.setMinimumNumberShouldMatch(1); // 1 of 0 optional 
 
-        verifyNrHits(q, 0);
-    }
+    verifyNrHits(q, 0);
+  }
 
-    public void testRandomQueries() throws Exception {
-      final String field="data";
-      final String[] vals = {"1","2","3","4","5","6","A","Z","B","Y","Z","X","foo"};
-      int maxLev=4;
+  public void testRandomQueries() throws Exception {
+    final String field="data";
+    final String[] vals = {"1","2","3","4","5","6","A","Z","B","Y","Z","X","foo"};
+    int maxLev=4;
 
-      // callback object to set a random setMinimumNumberShouldMatch
-      TestBoolean2.Callback minNrCB = new TestBoolean2.Callback() {
+    // callback object to set a random setMinimumNumberShouldMatch
+    TestBoolean2.Callback minNrCB = new TestBoolean2.Callback() {
         @Override
         public void postCreate(BooleanQuery q) {
           BooleanClause[] c =q.getClauses();
@@ -334,123 +331,123 @@
 
 
 
-      // increase number of iterations for more complete testing      
-      int num = atLeast(20);
-      for (int i=0; i<num; i++) {
-        int lev = random().nextInt(maxLev);
-        final long seed = random().nextLong();
-        BooleanQuery q1 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
-        // BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), lev, field, vals, minNrCB);
-        BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
-        // only set minimumNumberShouldMatch on the top level query since setting
-        // at a lower level can change the score.
-        minNrCB.postCreate(q2);
+    // increase number of iterations for more complete testing      
+    int num = atLeast(20);
+    for (int i=0; i<num; i++) {
+      int lev = random().nextInt(maxLev);
+      final long seed = random().nextLong();
+      BooleanQuery q1 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
+      // BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), lev, field, vals, minNrCB);
+      BooleanQuery q2 = TestBoolean2.randBoolQuery(new Random(seed), true, lev, field, vals, null);
+      // only set minimumNumberShouldMatch on the top level query since setting
+      // at a lower level can change the score.
+      minNrCB.postCreate(q2);
 
-        // Can't use Hits because normalized scores will mess things
-        // up.  The non-sorting version of search() that returns TopDocs
-        // will not normalize scores.
-        TopDocs top1 = s.search(q1,null,100);
-        TopDocs top2 = s.search(q2,null,100);
-        if (i < 100) {
-          QueryUtils.check(random(), q1,s);
-          QueryUtils.check(random(), q2,s);
-        }
-        assertSubsetOfSameScores(q2, top1, top2);
+      // Can't use Hits because normalized scores will mess things
+      // up.  The non-sorting version of search() that returns TopDocs
+      // will not normalize scores.
+      TopDocs top1 = s.search(q1,null,100);
+      TopDocs top2 = s.search(q2,null,100);
+      if (i < 100) {
+        QueryUtils.check(random(), q1,s);
+        QueryUtils.check(random(), q2,s);
       }
-      // System.out.println("Total hits:"+tot);
+      assertSubsetOfSameScores(q2, top1, top2);
     }
+    // System.out.println("Total hits:"+tot);
+  }
     
-    private void assertSubsetOfSameScores(Query q, TopDocs top1, TopDocs top2) {
-      // The constrained query
-      // should be a subset to the unconstrained query.
-      if (top2.totalHits > top1.totalHits) {
-        fail("Constrained results not a subset:\n"
-                      + CheckHits.topdocsString(top1,0,0)
-                      + CheckHits.topdocsString(top2,0,0)
-                      + "for query:" + q.toString());
-      }
-
-      for (int hit=0; hit<top2.totalHits; hit++) {
-        int id = top2.scoreDocs[hit].doc;
-        float score = top2.scoreDocs[hit].score;
-        boolean found=false;
-        // find this doc in other hits
-        for (int other=0; other<top1.totalHits; other++) {
-          if (top1.scoreDocs[other].doc == id) {
-            found=true;
-            float otherScore = top1.scoreDocs[other].score;
-            // check if scores match
-            assertEquals("Doc " + id + " scores don't match\n"
-                + CheckHits.topdocsString(top1,0,0)
-                + CheckHits.topdocsString(top2,0,0)
-                + "for query:" + q.toString(),
-                score, otherScore, CheckHits.explainToleranceDelta(score, otherScore));
-          }
-        }
-
-        // check if subset
-        if (!found) fail("Doc " + id + " not found\n"
-              + CheckHits.topdocsString(top1,0,0)
-              + CheckHits.topdocsString(top2,0,0)
-              + "for query:" + q.toString());
-      }
+  private void assertSubsetOfSameScores(Query q, TopDocs top1, TopDocs top2) {
+    // The constrained query
+    // should be a subset to the unconstrained query.
+    if (top2.totalHits > top1.totalHits) {
+      fail("Constrained results not a subset:\n"
+           + CheckHits.topdocsString(top1,0,0)
+           + CheckHits.topdocsString(top2,0,0)
+           + "for query:" + q.toString());
     }
 
-    public void testRewriteCoord1() throws Exception {
-      final Similarity oldSimilarity = s.getSimilarity();
-      try {
-        s.setSimilarity(new DefaultSimilarity() {
+    for (int hit=0; hit<top2.totalHits; hit++) {
+      int id = top2.scoreDocs[hit].doc;
+      float score = top2.scoreDocs[hit].score;
+      boolean found=false;
+      // find this doc in other hits
+      for (int other=0; other<top1.totalHits; other++) {
+        if (top1.scoreDocs[other].doc == id) {
+          found=true;
+          float otherScore = top1.scoreDocs[other].score;
+          // check if scores match
+          assertEquals("Doc " + id + " scores don't match\n"
+                       + CheckHits.topdocsString(top1,0,0)
+                       + CheckHits.topdocsString(top2,0,0)
+                       + "for query:" + q.toString(),
+                       score, otherScore, CheckHits.explainToleranceDelta(score, otherScore));
+        }
+      }
+
+      // check if subset
+      if (!found) fail("Doc " + id + " not found\n"
+                       + CheckHits.topdocsString(top1,0,0)
+                       + CheckHits.topdocsString(top2,0,0)
+                       + "for query:" + q.toString());
+    }
+  }
+
+  public void testRewriteCoord1() throws Exception {
+    final Similarity oldSimilarity = s.getSimilarity();
+    try {
+      s.setSimilarity(new DefaultSimilarity() {
           @Override
           public float coord(int overlap, int maxOverlap) {
             return overlap / ((float)maxOverlap + 1);
           }
         });
-        BooleanQuery q1 = new BooleanQuery();
-        q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
-        BooleanQuery q2 = new BooleanQuery();
-        q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
-        q2.setMinimumNumberShouldMatch(1);
-        TopDocs top1 = s.search(q1,null,100);
-        TopDocs top2 = s.search(q2,null,100);
-        assertSubsetOfSameScores(q2, top1, top2);
-      } finally {
-        s.setSimilarity(oldSimilarity);
-      }
+      BooleanQuery q1 = new BooleanQuery();
+      q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+      BooleanQuery q2 = new BooleanQuery();
+      q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+      q2.setMinimumNumberShouldMatch(1);
+      TopDocs top1 = s.search(q1,null,100);
+      TopDocs top2 = s.search(q2,null,100);
+      assertSubsetOfSameScores(q2, top1, top2);
+    } finally {
+      s.setSimilarity(oldSimilarity);
     }
+  }
     
-    public void testRewriteNegate() throws Exception {
-      final Similarity oldSimilarity = s.getSimilarity();
-      try {
-        s.setSimilarity(new DefaultSimilarity() {
+  public void testRewriteNegate() throws Exception {
+    final Similarity oldSimilarity = s.getSimilarity();
+    try {
+      s.setSimilarity(new DefaultSimilarity() {
           @Override
           public float coord(int overlap, int maxOverlap) {
             return overlap / ((float)maxOverlap + 1);
           }
         });
-        BooleanQuery q1 = new BooleanQuery();
-        q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
-        BooleanQuery q2 = new BooleanQuery();
-        q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
-        q2.add(new TermQuery(new Term("data", "Z")), BooleanClause.Occur.MUST_NOT);
-        TopDocs top1 = s.search(q1,null,100);
-        TopDocs top2 = s.search(q2,null,100);
-        assertSubsetOfSameScores(q2, top1, top2);
-      } finally {
-        s.setSimilarity(oldSimilarity);
-      }
+      BooleanQuery q1 = new BooleanQuery();
+      q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+      BooleanQuery q2 = new BooleanQuery();
+      q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+      q2.add(new TermQuery(new Term("data", "Z")), BooleanClause.Occur.MUST_NOT);
+      TopDocs top1 = s.search(q1,null,100);
+      TopDocs top2 = s.search(q2,null,100);
+      assertSubsetOfSameScores(q2, top1, top2);
+    } finally {
+      s.setSimilarity(oldSimilarity);
     }
+  }
 
-    protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) throws Exception {
+  protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) throws Exception {
 
-        System.err.println("------- " + test + " -------");
+    System.err.println("------- " + test + " -------");
 
-        DecimalFormat f = new DecimalFormat("0.000000", DecimalFormatSymbols.getInstance(Locale.ROOT));
+    DecimalFormat f = new DecimalFormat("0.000000", DecimalFormatSymbols.getInstance(Locale.ROOT));
 
-        for (int i = 0; i < h.length; i++) {
-            StoredDocument d = searcher.doc(h[i].doc);
-            float score = h[i].score;
-            System.err.println("#" + i + ": " + f.format(score) + " - " +
-                               d.get("id") + " - " + d.get("data"));
-        }
+    for (int i = 0; i < h.length; i++) {
+      Document d = searcher.doc(h[i].doc);
+      float score = h[i].score;
+      System.err.println("#" + i + ": " + f.format(score) + " - " +
+                         d.get("id") + " - " + d.get("data"));
     }
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
index 4dcddb7..2e19829 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
@@ -25,7 +25,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -139,15 +138,13 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
     //
-    Document d = new Document();
-    d.add(newField(
+    Document d = writer.newDocument();
+    d.addLargeText(
         FIELD_T,
-        "Optimize not deleting all files",
-        TextField.TYPE_STORED));
-    d.add(newField(
+        "Optimize not deleting all files");
+    d.addLargeText(
         FIELD_C,
-        "Deleted When I run an optimize in our production environment.",
-        TextField.TYPE_STORED));
+        "Deleted When I run an optimize in our production environment.");
 
     //
     writer.addDocument(d);
@@ -172,8 +169,8 @@
     int docCount = atLeast(10000);
 
     for(int i=0;i<docCount;i++) {
-      Document doc = new Document();
-      doc.add(newField("field", "a", TextField.TYPE_NOT_STORED));
+      Document doc = riw.newDocument();
+      doc.addLargeText("field", "a");
       riw.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
index 668114a..07fcb18 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java
@@ -28,8 +28,6 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -81,8 +79,8 @@
   public void testNullOrSubScorer() throws Throwable {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c d", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c d");
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -145,16 +143,16 @@
   public void testDeMorgan() throws Exception {
     Directory dir1 = newDirectory();
     RandomIndexWriter iw1 = new RandomIndexWriter(random(), dir1);
-    Document doc1 = new Document();
-    doc1.add(newTextField("field", "foo bar", Field.Store.NO));
+    Document doc1 = iw1.newDocument();
+    doc1.addLargeText("field", "foo bar");
     iw1.addDocument(doc1);
     IndexReader reader1 = iw1.getReader();
     iw1.close();
     
     Directory dir2 = newDirectory();
     RandomIndexWriter iw2 = new RandomIndexWriter(random(), dir2);
-    Document doc2 = new Document();
-    doc2.add(newTextField("field", "foo baz", Field.Store.NO));
+    Document doc2 = iw2.newDocument();
+    doc2.addLargeText("field", "foo baz");
     iw2.addDocument(doc2);
     IndexReader reader2 = iw2.getReader();
     iw2.close();
@@ -205,8 +203,8 @@
       if (random().nextInt(20) <= 1) {
         contents += " f";
       }
-      Document doc = new Document();
-      doc.add(new TextField("field", contents, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("field", contents);
       w.addDocument(doc);
     }
     w.forceMerge(1);
@@ -301,8 +299,8 @@
     IndexWriterConfig config = new IndexWriterConfig(indexerAnalyzer);
     IndexWriter writer = new IndexWriter(directory, config);
     String FIELD = "content";
-    Document d = new Document();
-    d.add(new TextField(FIELD, "clockwork orange", Field.Store.YES));
+    Document d = writer.newDocument();
+    d.addLargeText(FIELD, "clockwork orange");
     writer.addDocument(d);
     writer.close();
 
@@ -367,8 +365,8 @@
   public void testMinShouldMatchLeniency() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c d", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c d");
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index 8b9e0bb..dd73c5b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -28,11 +28,9 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -63,9 +61,9 @@
     IndexWriterConfig config = newIndexWriterConfig(analyzer);
     config.setMergePolicy(newLogMergePolicy()); // we will use docids to validate
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    writer.addDocument(doc("lucene", "lucene is a very popular search engine library"));
-    writer.addDocument(doc("solr", "solr is a very popular search server and is using lucene"));
-    writer.addDocument(doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
+    writer.addDocument(doc(writer, "lucene", "lucene is a very popular search engine library"));
+    writer.addDocument(doc(writer, "solr", "solr is a very popular search server and is using lucene"));
+    writer.addDocument(doc(writer, "nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
     reader = writer.getReader();
     writer.close();
     // we do not use newSearcher because the assertingXXX layers break
@@ -117,10 +115,10 @@
     assertEquals(3, tfs.get(2).intValue()); // f2:is + f2:is + f2:lucene
   }
   
-  static Document doc(String v1, String v2) {
-    Document doc = new Document();
-    doc.add(new TextField(F1, v1, Store.YES));
-    doc.add(new TextField(F2, v2, Store.YES));
+  static Document doc(RandomIndexWriter w, String v1, String v2) {
+    Document doc = w.newDocument();
+    doc.addLargeText(F1, v1);
+    doc.addLargeText(F2, v2);
     return doc;
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 58f4f38..4a10afa 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -39,8 +38,8 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
     for (int i = 0; i < values.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField(FIELD, values[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addAtom(FIELD, values[i]);
       writer.addDocument(doc);
     }
     IndexReader ir = writer.getReader();
@@ -118,8 +117,8 @@
   public void testEmbeddedBooleanScorer() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "doctors are people who prescribe medicines of which they know little, to cure diseases of which they know less, in human beings of whom they know nothing", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "doctors are people who prescribe medicines of which they know little, to cure diseases of which they know less, in human beings of whom they know nothing");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
index d75a58f..d161bc8 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
@@ -21,8 +21,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -31,8 +30,8 @@
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
@@ -49,17 +48,17 @@
     super.setUp();
     dir = newDirectory();
     iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    doc.add(idField);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+
     // add 500 docs with id 0..499
     for (int i = 0; i < 500; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = iw.newDocument();
+      doc.addUniqueInt("id", i);
       iw.addDocument(doc);
     }
     // delete 20 of them
     for (int i = 0; i < 20; i++) {
-      iw.deleteDocuments(new Term("id", Integer.toString(random().nextInt(iw.maxDoc()))));
+      iw.deleteDocuments(fieldTypes.newIntTerm("id", random().nextInt(iw.maxDoc())));
     }
     ir = iw.getReader();
     is = newSearcher(ir);
@@ -250,17 +249,20 @@
   public void testIsCacheAble() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    writer.addDocument(new Document());
+    Document doc = writer.newDocument();
+    doc.addInt("test", 17);
+    writer.addDocument(doc);
     writer.close();
 
     IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
+    FieldTypes fieldTypes = reader.getFieldTypes();
 
     // not cacheable:
     assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
     // returns default empty docidset, always cacheable:
-    assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
+    assertDocIdSetCacheable(reader, fieldTypes.newIntRangeFilter("test", Integer.valueOf(10000), true, Integer.valueOf(-10000), true), true);
     // is cacheable:
-    assertDocIdSetCacheable(reader, DocValuesRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), false);
+    assertDocIdSetCacheable(reader, fieldTypes.newIntDocValuesRangeFilter("test", Integer.valueOf(10), true, Integer.valueOf(20), true), false);
     // a fixedbitset filter is always cacheable
     assertDocIdSetCacheable(reader, new Filter() {
       @Override
@@ -293,8 +295,8 @@
     IndexSearcher searcher = newSearcher(reader, false);
 
     // add a doc, refresh the reader, and check that it's there
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", "1");
     writer.addDocument(doc);
 
     reader = refreshReader(reader);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java b/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
index fdde65b..35928e3 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
@@ -22,13 +22,10 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.Similarity;
@@ -53,19 +50,19 @@
     IndexWriterConfig config = newIndexWriterConfig(analyzer);
     config.setMergePolicy(newLogMergePolicy()); // we will use docids to validate
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
-    writer.addDocument(doc("lucene", "lucene is a very popular search engine library"));
-    writer.addDocument(doc("solr", "solr is a very popular search server and is using lucene"));
-    writer.addDocument(doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
+    writer.addDocument(doc(writer, "lucene", "lucene is a very popular search engine library"));
+    writer.addDocument(doc(writer, "solr", "solr is a very popular search server and is using lucene"));
+    writer.addDocument(doc(writer, "nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
     reader = writer.getReader();
     writer.close();
     searcher = newSearcher(reader);
     searcher.setSimilarity(new TFSimilarity());
   }
   
-  static Document doc(String v1, String v2) {
-    Document doc = new Document();
-    doc.add(new StringField(F1, v1, Store.YES));
-    doc.add(new TextField(F2, v2, Store.YES));
+  static Document doc(RandomIndexWriter writer, String v1, String v2) {
+    Document doc = writer.newDocument();
+    doc.addAtom(F1, v1);
+    doc.addLargeText(F2, v2);
     return doc;
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 2cb8f52..b3d1498 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -17,8 +17,9 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -26,8 +27,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-
 /** This class only tests some basic functionality in CSQ, the main parts are mostly
  * tested by MultiTermQuery tests, explanations seems to be tested in TestExplanations! */
 public class TestConstantScoreQuery extends LuceneTestCase {
@@ -79,8 +78,8 @@
       directory = newDirectory();
       RandomIndexWriter writer = new RandomIndexWriter (random(), directory);
 
-      Document doc = new Document();
-      doc.add(newStringField("field", "term", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("field", "term");
       writer.addDocument(doc);
 
       reader = writer.getReader();
@@ -124,11 +123,11 @@
   public void testConstantScoreQueryAndFilter() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(newStringField("field", "a", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "a");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("field", "b", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "b");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -153,8 +152,8 @@
   public void testQueryWrapperFilter() throws IOException {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(newStringField("field", "a", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("field", "a");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
index 37bc44b..b9857f6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
@@ -24,17 +24,14 @@
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDocument;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -94,7 +91,7 @@
   }
 
   @Override
-  protected void updateDocuments(Term id, List<? extends IndexDocument> docs) throws Exception {
+  protected void updateDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
     final long gen = genWriter.updateDocuments(id, docs);
 
     // Randomly verify the update "took":
@@ -118,7 +115,7 @@
   }
 
   @Override
-  protected void addDocuments(Term id, List<? extends IndexDocument> docs) throws Exception {
+  protected void addDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
     final long gen = genWriter.addDocuments(docs);
     // Randomly verify the add "took":
     if (random().nextInt(20) == 2) {
@@ -140,7 +137,7 @@
   }
 
   @Override
-  protected void addDocument(Term id, IndexDocument doc) throws Exception {
+  protected void addDocument(Term id, Iterable<? extends IndexableField> doc) throws Exception {
     final long gen = genWriter.addDocument(doc);
 
     // Randomly verify the add "took":
@@ -163,7 +160,7 @@
   }
 
   @Override
-  protected void updateDocument(Term id, IndexDocument doc) throws Exception {
+  protected void updateDocument(Term id, Iterable<? extends IndexableField> doc) throws Exception {
     final long gen = genWriter.updateDocument(id, doc);
     // Randomly verify the udpate "took":
     if (random().nextInt(20) == 2) {
@@ -310,8 +307,8 @@
     LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal);
     final TrackingIndexWriter writer = new TrackingIndexWriter(_writer);
     final SearcherManager manager = new SearcherManager(_writer, false, null);
-    Document doc = new Document();
-    doc.add(newTextField("test", "test", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("test", "test");
     writer.addDocument(doc);
     manager.maybeRefresh();
     Thread t = new Thread() {
@@ -390,7 +387,7 @@
 
     @Override
     public void updateDocument(Term term,
-        IndexDocument doc)
+        Iterable<? extends IndexableField> doc)
         throws IOException {
       super.updateDocument(term, doc);
       try {
@@ -445,7 +442,7 @@
         }
       }
     });
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     assertFalse(afterRefreshCalled.get());
     sm.maybeRefreshBlocking();
@@ -511,9 +508,9 @@
         commitThread.start();
         commitThreads.add(commitThread);
       }
-      Document d = new Document();
-      d.add(new TextField("count", i + "", Field.Store.NO));
-      d.add(new TextField("content", content, Field.Store.YES));
+      Document d = iw.newDocument();
+      d.addLargeText("count", i + "");
+      d.addLargeText("content", content);
       long start = System.currentTimeMillis();
       long l = tiw.addDocument(d);
       controlledRealTimeReopenThread.waitForGeneration(l);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
index 7f638e1..2e7b0a0 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
@@ -27,13 +27,10 @@
 
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
 /** Unit test for sorting code. */
@@ -57,16 +54,16 @@
     RandomGen random = new RandomGen(random());
     for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the
                                            // problem doesn't show up
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       if ((i % 5) != 0) { // some documents must not have an entry in the first
                           // sort field
-        doc.add(new SortedDocValuesField("publicationDate_", new BytesRef(random.getLuceneDate())));
+        doc.addAtom("publicationDate_", random.getLuceneDate());
       }
       if ((i % 7) == 0) { // some documents to match the query (see below)
-        doc.add(newTextField("content", "test", Field.Store.YES));
+        doc.addLargeText("content", "test");
       }
       // every document has a defined 'mandant' field
-      doc.add(newStringField("mandant", Integer.toString(i % 3), Field.Store.YES));
+      doc.addAtom("mandant", Integer.toString(i % 3));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java
index 2226d8a..7ac3f7c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java
@@ -17,16 +17,15 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-
-import java.io.IOException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * DateFilter JUnit tests.
@@ -45,10 +44,10 @@
     
     long now = System.currentTimeMillis();
     
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     // add time that is in the past
-    doc.add(newStringField("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES));
-    doc.add(newTextField("body", "Today is a very sunny day in New York City", Field.Store.YES));
+    doc.addAtom("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND));
+    doc.addLargeText("body", "Today is a very sunny day in New York City");
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
@@ -108,10 +107,10 @@
     
     long now = System.currentTimeMillis();
     
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     // add time that is in the future
-    doc.add(newStringField("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND), Field.Store.YES));
-    doc.add(newTextField("body", "Today is a very sunny day in New York City", Field.Store.YES));
+    doc.addAtom("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND));
+    doc.addLargeText("body", "Today is a very sunny day in New York City");
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
index 6182d93..d86b402 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDateSort.java
@@ -19,18 +19,13 @@
 
 import java.util.Arrays;
 
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Test date sorting, i.e. auto-sorting of fields with type "long".
@@ -53,16 +48,16 @@
 
     // oldest doc:
     // Add the first document.  text = "Document 1"  dateTime = Oct 10 03:25:22 EDT 2007
-    writer.addDocument(createDocument("Document 1", 1192001122000L));
+    writer.addDocument(createDocument(writer, "Document 1", 1192001122000L));
     // Add the second document.  text = "Document 2"  dateTime = Oct 10 03:25:26 EDT 2007 
-    writer.addDocument(createDocument("Document 2", 1192001126000L));
+    writer.addDocument(createDocument(writer, "Document 2", 1192001126000L));
     // Add the third document.  text = "Document 3"  dateTime = Oct 11 07:12:13 EDT 2007 
-    writer.addDocument(createDocument("Document 3", 1192101133000L));
+    writer.addDocument(createDocument(writer, "Document 3", 1192101133000L));
     // Add the fourth document.  text = "Document 4"  dateTime = Oct 11 08:02:09 EDT 2007
-    writer.addDocument(createDocument("Document 4", 1192104129000L));
+    writer.addDocument(createDocument(writer, "Document 4", 1192104129000L));
     // latest doc:
     // Add the fifth document.  text = "Document 5"  dateTime = Oct 12 13:25:43 EDT 2007
-    writer.addDocument(createDocument("Document 5", 1192209943000L));
+    writer.addDocument(createDocument(writer, "Document 5", 1192209943000L));
 
     reader = writer.getReader();
     writer.close();
@@ -85,8 +80,8 @@
     String[] actualOrder = new String[5];
     ScoreDoc[] hits = searcher.search(query, null, 1000, sort).scoreDocs;
     for (int i = 0; i < hits.length; i++) {
-      StoredDocument document = searcher.doc(hits[i].doc);
-      String text = document.get(TEXT_FIELD);
+      Document document = searcher.doc(hits[i].doc);
+      String text = document.getString(TEXT_FIELD);
       actualOrder[i] = text;
     }
 
@@ -101,18 +96,15 @@
     assertEquals(Arrays.asList(expectedOrder), Arrays.asList(actualOrder));
   }
 
-  private Document createDocument(String text, long time) {
-    Document document = new Document();
+  private Document createDocument(RandomIndexWriter writer, String text, long time) {
+    Document document = writer.newDocument();
 
     // Add the text field.
-    Field textField = newTextField(TEXT_FIELD, text, Field.Store.YES);
-    document.add(textField);
+    document.addLargeText(TEXT_FIELD, text);
 
     // Add the date/time field.
-    String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
-    Field dateTimeField = newStringField(DATE_TIME_FIELD, dateTimeString, Field.Store.YES);
-    document.add(dateTimeField);
-    document.add(new SortedDocValuesField(DATE_TIME_FIELD, new BytesRef(dateTimeString)));
+    document.addAtom(DATE_TIME_FIELD,
+                     DateTools.timeToString(time, DateTools.Resolution.SECOND));
 
     return document;
   }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 34923e3..8833be9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -17,33 +17,30 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+import java.util.Locale;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
-
-import java.text.DecimalFormat;
-import java.text.DecimalFormatSymbols;
-import java.util.Locale;
-import java.io.IOException;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Test of the DisjunctionMaxQuery.
@@ -90,11 +87,6 @@
   public IndexReader r;
   public IndexSearcher s;
   
-  private static final FieldType nonAnalyzedType = new FieldType(TextField.TYPE_STORED);
-  static {
-    nonAnalyzedType.setTokenized(false);
-  }
-  
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -104,57 +96,47 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
                              .setSimilarity(sim).setMergePolicy(newLogMergePolicy()));
     
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dek");
+    fieldTypes.setMultiValued("hed");
+
     // hed is the most important field, dek is secondary
     
     // d1 is an "ok" match for: albino elephant
     {
-      Document d1 = new Document();
-      d1.add(newField("id", "d1", nonAnalyzedType));// Field.Keyword("id",
-                                                                               // "d1"));
-      d1
-          .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
-      d1
-          .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
+      Document d1 = writer.newDocument();
+      d1.addUniqueAtom("id", "d1");
+      d1.addLargeText("hed", "elephant");
+      d1.addLargeText("dek", "elephant");
       writer.addDocument(d1);
     }
     
     // d2 is a "good" match for: albino elephant
     {
-      Document d2 = new Document();
-      d2.add(newField("id", "d2", nonAnalyzedType));// Field.Keyword("id",
-                                                                               // "d2"));
-      d2
-          .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
-      d2.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
-                                                                                // "albino"));
-      d2
-          .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
+      Document d2 = writer.newDocument();
+      d2.addUniqueAtom("id", "d2");
+      d2.addLargeText("hed", "elephant");
+      d2.addLargeText("dek", "albino");
+      d2.addLargeText("dek", "elephant");
       writer.addDocument(d2);
     }
     
     // d3 is a "better" match for: albino elephant
     {
-      Document d3 = new Document();
-      d3.add(newField("id", "d3", nonAnalyzedType));// Field.Keyword("id",
-                                                                               // "d3"));
-      d3.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
-                                                                                // "albino"));
-      d3
-          .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
+      Document d3 = writer.newDocument();
+      d3.addUniqueAtom("id", "d3");
+      d3.addLargeText("hed", "albino");
+      d3.addLargeText("hed", "elephant");
       writer.addDocument(d3);
     }
     
     // d4 is the "best" match for: albino elephant
     {
-      Document d4 = new Document();
-      d4.add(newField("id", "d4", nonAnalyzedType));// Field.Keyword("id",
-                                                                               // "d4"));
-      d4.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
-                                                                                // "albino"));
-      d4
-          .add(newField("hed", "elephant", nonAnalyzedType));// Field.Text("hed", "elephant"));
-      d4.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
-                                                                                // "albino"));
+      Document d4 = writer.newDocument();
+      d4.addUniqueAtom("id", "d4");
+      d4.addLargeText("hed", "albino");
+      d4.addLargeText("hed", "elephant");
+      d4.addLargeText("dek", "albino");
       writer.addDocument(d4);
     }
     
@@ -399,10 +381,10 @@
       float score2 = h[2].score;
       float score3 = h[3].score;
       
-      String doc0 = s.doc(h[0].doc).get("id");
-      String doc1 = s.doc(h[1].doc).get("id");
-      String doc2 = s.doc(h[2].doc).get("id");
-      String doc3 = s.doc(h[3].doc).get("id");
+      String doc0 = s.doc(h[0].doc).getString("id");
+      String doc1 = s.doc(h[1].doc).getString("id");
+      String doc2 = s.doc(h[2].doc).getString("id");
+      String doc3 = s.doc(h[3].doc).getString("id");
       
       assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2")
           || doc0.equals("d4"));
@@ -453,10 +435,10 @@
       float score2 = h[2].score;
       float score3 = h[3].score;
       
-      String doc0 = s.doc(h[0].doc).get("id");
-      String doc1 = s.doc(h[1].doc).get("id");
-      String doc2 = s.doc(h[2].doc).get("id");
-      String doc3 = s.doc(h[3].doc).get("id");
+      String doc0 = s.doc(h[0].doc).getString("id");
+      String doc1 = s.doc(h[1].doc).getString("id");
+      String doc2 = s.doc(h[2].doc).getString("id");
+      String doc3 = s.doc(h[3].doc).getString("id");
       
       assertEquals("doc0 should be d4: ", "d4", doc0);
       assertEquals("doc1 should be d3: ", "d3", doc1);
@@ -485,8 +467,8 @@
     IndexWriterConfig config = new IndexWriterConfig(indexerAnalyzer);
     IndexWriter writer = new IndexWriter(directory, config);
     String FIELD = "content";
-    Document d = new Document();
-    d.add(new TextField(FIELD, "clockwork orange", Field.Store.YES));
+    Document d = writer.newDocument();
+    d.addLargeText(FIELD, "clockwork orange");
     writer.addDocument(d);
     writer.close();
 
@@ -529,10 +511,9 @@
     DecimalFormat f = new DecimalFormat("0.000000000", DecimalFormatSymbols.getInstance(Locale.ROOT));
     
     for (int i = 0; i < h.length; i++) {
-      StoredDocument d = searcher.doc(h[i].doc);
+      Document d = searcher.doc(h[i].doc);
       float score = h[i].score;
-      System.err
-          .println("#" + i + ": " + f.format(score) + " - " + d.get("id"));
+      System.err.println("#" + i + ": " + f.format(score) + " - " + d.get("id"));
     }
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
index f1e4b41..3a516d8 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java
@@ -21,8 +21,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.*;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
@@ -38,17 +38,12 @@
     Directory store = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), store, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
-    Field f1 = newTextField("field", "word", Field.Store.YES);
-    Field f2 = newTextField("field", "word", Field.Store.YES);
-    f2.setBoost(2.0f);
-
-    Document d1 = new Document();
-    Document d2 = new Document();
-
-    d1.add(f1);                                 // boost = 1
-    d2.add(f2);                                 // boost = 2
-
+    Document d1 = writer.newDocument();
+    d1.addLargeText("field", "word");           // boost = 1
     writer.addDocument(d1);
+
+    Document d2 = writer.newDocument();
+    d2.addLargeText("field", "word", 2.0f);     // boost = 2
     writer.addDocument(d2);
 
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java
index 35c80f3..b20c1fc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java
@@ -22,17 +22,16 @@
 import java.util.Arrays;
 import java.util.Iterator;
 
-import junit.framework.Assert;
-
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.LuceneTestCase;
 
+import junit.framework.Assert;
+
 public class TestDocIdSet extends LuceneTestCase {
   public void testFilteredDocIdSet() throws Exception {
     final int maxdoc=10;
@@ -111,8 +110,8 @@
     // IndexSearcher, everything works fine. This came up in LUCENE-1754.
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("c", "val", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addAtom("c", "val");
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     writer.close();
@@ -137,8 +136,8 @@
   public void testNullIteratorFilteredDocIdSet() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("c", "val", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addAtom("c", "val");
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
index 7f393fa..d155fff 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java
@@ -25,8 +25,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -54,16 +53,17 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, 
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued(fieldName);
     List<String> terms = new ArrayList<>();
     int num = atLeast(200);
     for (int i = 0; i < num; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("id", Integer.toString(i));
       int numTerms = random().nextInt(4);
       for (int j = 0; j < numTerms; j++) {
         String s = TestUtil.randomUnicodeString(random());
-        doc.add(newStringField(fieldName, s, Field.Store.NO));
-        doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
+        doc.addAtom(fieldName, s);
         terms.add(s);
       }
       writer.addDocument(doc);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
index 2b8857e..fc8893c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java
@@ -25,18 +25,16 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.RegExp;
-import org.apache.lucene.util.UnicodeUtil;
 
 /**
  * Tests the DocTermOrdsRewriteMethod
@@ -56,16 +54,17 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, 
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued(fieldName);
     List<String> terms = new ArrayList<>();
     int num = atLeast(200);
     for (int i = 0; i < num; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("id", Integer.toString(i));
       int numTerms = random().nextInt(4);
       for (int j = 0; j < numTerms; j++) {
         String s = TestUtil.randomUnicodeString(random());
-        doc.add(newStringField(fieldName, s, Field.Store.NO));
-        doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
+        doc.addAtom(fieldName, s);
         terms.add(s);
       }
       writer.addDocument(doc);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java
new file mode 100644
index 0000000..776beb0
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java
@@ -0,0 +1,480 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.junit.Test;
+
+/**
+ * A basic 'positive' Unit test class for the DocValues range filters.
+ *
+ * <p>
+ * NOTE: at the moment, this class only tests for 'positive' results,
+ * it does not verify the results to ensure there are no 'false positives',
+ * nor does it adequately test 'negative' results.  It also does not test
+ * that garbage in results in an Exception.
+ */
+public class TestDocValuesRangeFilter extends BaseTestRangeFilter {
+
+  @Test
+  public void testRangeFilterId() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int medId = ((maxId - minId) / 2);
+        
+    String minIP = pad(minId);
+    String maxIP = pad(maxId);
+    String medIP = pad(medId);
+    
+    int numDocs = reader.numDocs();
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+    FieldTypes fieldTypes = search.getFieldTypes();
+
+    // test id, bounded on both ends
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,maxIP,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,maxIP,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,maxIP,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,maxIP,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,maxIP,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,medIP,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+
+    // unbounded id
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",(String)null,T,null,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,null,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,maxIP,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,null,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,maxIP,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,maxIP,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,medIP,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,minIP,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,F,medIP,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,F,maxIP,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,minIP,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,minIP,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,T,maxIP,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,T,null,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,medIP,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+  }
+
+  @Test
+  public void testFieldCacheRangeFilterRand() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    String minRP = pad(signedIndexDir.minR);
+    String maxRP = pad(signedIndexDir.maxR);
+    
+    int numDocs = reader.numDocs();
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test extremes, bounded on both ends
+        
+    FieldTypes fieldTypes = search.getFieldTypes();
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,maxRP,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,maxRP,F), numDocs).scoreDocs;
+    assertEquals("all but biggest", numDocs-1, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,maxRP,T), numDocs).scoreDocs;
+    assertEquals("all but smallest", numDocs-1, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,maxRP,F), numDocs).scoreDocs;
+    assertEquals("all but extremes", numDocs-2, result.length);
+    
+    // unbounded
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,null,F), numDocs).scoreDocs;
+    assertEquals("smallest and up", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,maxRP,T), numDocs).scoreDocs;
+    assertEquals("biggest and down", numDocs, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,null,F), numDocs).scoreDocs;
+    assertEquals("not smallest, but up", numDocs-1, result.length);
+        
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,maxRP,F), numDocs).scoreDocs;
+    assertEquals("not biggest, but down", numDocs-1, result.length);
+        
+    // very small sets
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,minRP,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,F,maxRP,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,minRP,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,minRP,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,T,maxRP,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,T,null,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterInts() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    int medId = ((maxId - minId) / 2);
+    Integer minIdO = Integer.valueOf(minId);
+    Integer maxIdO = Integer.valueOf(maxId);
+    Integer medIdO = Integer.valueOf(medId);
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+        
+    FieldTypes fieldTypes = search.getFieldTypes();
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+    
+    // unbounded id
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",(Integer) null,T,null,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,null,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,null,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,medIdO,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,minIdO,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,F,medIdO,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,minIdO,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,minIdO,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,null,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    // special cases
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",Integer.valueOf(Integer.MAX_VALUE),F,null,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,Integer.valueOf(Integer.MIN_VALUE),F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,minIdO,T), numDocs).scoreDocs;
+    assertEquals("inverse range", 0, result.length);
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterLongs() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    int medId = ((maxId - minId) / 2);
+    Long minIdO = Long.valueOf(minId);
+    Long maxIdO = Long.valueOf(maxId);
+    Long medIdO = Long.valueOf(medId);
+        
+    assertEquals("num of docs", numDocs, 1+ maxId - minId);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    // test id, bounded on both ends
+        
+    FieldTypes fieldTypes = search.getFieldTypes();
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("all but last", numDocs-1, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("all but first", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("all but ends", numDocs-2, result.length);
+    
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("med and up", 1+ maxId-medId, result.length);
+        
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("up to med", 1+ medId-minId, result.length);
+    
+    // unbounded id
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",(Long) null,T,null,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,null,F), numDocs).scoreDocs;
+    assertEquals("min and up", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("max and down", numDocs, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,null,F), numDocs).scoreDocs;
+    assertEquals("not min, but up", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("not max, but down", numDocs-1, result.length);
+        
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("med and up, not max", maxId-medId, result.length);
+        
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,medIdO,T), numDocs).scoreDocs;
+    assertEquals("not min, up to med", medId-minId, result.length);
+
+    // very small sets
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,minIdO,F), numDocs).scoreDocs;
+    assertEquals("min,min,F,F", 0, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,F,medIdO,F), numDocs).scoreDocs;
+    assertEquals("med,med,F,F", 0, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
+    assertEquals("max,max,F,F", 0, result.length);
+                     
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,minIdO,T), numDocs).scoreDocs;
+    assertEquals("min,min,T,T", 1, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,minIdO,T), numDocs).scoreDocs;
+    assertEquals("nul,min,F,T", 1, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
+    assertEquals("max,max,T,T", 1, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,null,F), numDocs).scoreDocs;
+    assertEquals("max,nul,T,T", 1, result.length);
+
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("med,med,T,T", 1, result.length);
+    
+    // special cases
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",Long.valueOf(Long.MAX_VALUE),F,null,F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,Long.valueOf(Long.MIN_VALUE),F), numDocs).scoreDocs;
+    assertEquals("overflow special case", 0, result.length);
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,minIdO,T), numDocs).scoreDocs;
+    assertEquals("inverse range", 0, result.length);
+  }
+  
+  // float and double tests are a bit minimalistic, but its complicated, because missing precision
+  
+  @Test
+  public void testFieldCacheRangeFilterFloats() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    Float minIdO = Float.valueOf(minId + .5f);
+    Float medIdO = Float.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0f);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    FieldTypes fieldTypes = search.getFieldTypes();
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs/2, result.length);
+    int count = 0;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",null,F,medIdO,T), numDocs).scoreDocs;
+    count += result.length;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",medIdO,F,null,F), numDocs).scoreDocs;
+    count += result.length;
+    assertEquals("sum of two concenatted ranges", numDocs, count);
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",(Float) null,T,null,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",Float.valueOf(Float.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",null,F,Float.valueOf(Float.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+  }
+  
+  @Test
+  public void testFieldCacheRangeFilterDoubles() throws IOException {
+
+    IndexReader reader = signedIndexReader;
+    IndexSearcher search = newSearcher(reader);
+
+    int numDocs = reader.numDocs();
+    Double minIdO = Double.valueOf(minId + .5);
+    Double medIdO = Double.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0);
+        
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    FieldTypes fieldTypes = search.getFieldTypes();
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs/2, result.length);
+    int count = 0;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",null,F,medIdO,T), numDocs).scoreDocs;
+    count += result.length;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",medIdO,F,null,F), numDocs).scoreDocs;
+    count += result.length;
+    assertEquals("sum of two concenatted ranges", numDocs, count);
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",(Double) null,T,null,T), numDocs).scoreDocs;
+    assertEquals("find all", numDocs, result.length);
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",Double.valueOf(Double.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",null,F,Double.valueOf(Double.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
+    assertEquals("infinity special case", 0, result.length);
+  }
+  
+  // test using a sparse index (with deleted docs).
+  @Test
+  public void testSparseIndex() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    for (int d = -20; d <= 20; d++) {
+      Document doc = writer.newDocument();
+      doc.addInt("id_int", d);
+      doc.addAtom("body", "body");
+      writer.addDocument(doc);
+    }
+    
+    writer.forceMerge(1);
+    writer.deleteDocuments(fieldTypes.newIntTerm("id_int", 0));
+    writer.close();
+
+    IndexReader reader = DirectoryReader.open(dir);
+    IndexSearcher search = newSearcher(reader);
+    assertTrue(reader.hasDeletions());
+
+    ScoreDoc[] result;
+    Query q = new TermQuery(new Term("body","body"));
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,20,T), 100).scoreDocs;
+    assertEquals("find all", 40, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",0,T,20,T), 100).scoreDocs;
+    assertEquals("find all", 20, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,0,T), 100).scoreDocs;
+    assertEquals("find all", 20, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",10,T,20,T), 100).scoreDocs;
+    assertEquals("find all", 11, result.length);
+
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,-10,T), 100).scoreDocs;
+    assertEquals("find all", 11, result.length);
+    reader.close();
+    dir.close();
+  }
+  
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
index 89821c5..3df959c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
@@ -20,12 +20,10 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -47,21 +45,17 @@
   public void testSimple() throws Exception {    
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    Field field = newTextField("foo", "", Field.Store.NO);
-    doc.add(field);
-    Field dvField = new FloatDocValuesField("foo_boost", 0.0F);
-    doc.add(dvField);
-    Field field2 = newTextField("bar", "", Field.Store.NO);
-    doc.add(field2);
     
-    field.setStringValue("quick brown fox");
-    field2.setStringValue("quick brown fox");
-    dvField.setFloatValue(2f); // boost x2
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "quick brown fox");
+    doc.addLargeText("bar", "quick brown fox");
+    doc.addFloat("foo_boost", 2f);
     iw.addDocument(doc);
-    field.setStringValue("jumps over lazy brown dog");
-    field2.setStringValue("jumps over lazy brown dog");
-    dvField.setFloatValue(4f); // boost x4
+
+    doc = iw.newDocument();
+    doc.addLargeText("foo", "jumps over lazy brown dog");
+    doc.addLargeText("bar", "jumps over lazy brown dog");
+    doc.addFloat("foo_boost", 4f); // boost x4
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
index cfee9db..81831a3 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTermination.java
@@ -19,9 +19,8 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -38,7 +37,7 @@
     writer = new RandomIndexWriter(random(), dir);
     final int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; i++) {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       if (rarely()) {
         writer.commit();
       }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
index bb5887e..f818f34 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -17,20 +17,18 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.FieldValueHitQueue.Entry;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.store.*;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class TestElevationComparator extends LuceneTestCase {
 
@@ -40,18 +38,18 @@
   public void testSorting() throws Throwable {
     Directory directory = newDirectory();
     IndexWriter writer = new IndexWriter(
-        directory,
-        newIndexWriterConfig(new MockAnalyzer(random())).
-            setMaxBufferedDocs(2).
-            setMergePolicy(newLogMergePolicy(1000)).
-            setSimilarity(new DefaultSimilarity())
-    );
-    writer.addDocument(adoc(new String[] {"id", "a", "title", "ipod", "str_s", "a"}));
-    writer.addDocument(adoc(new String[] {"id", "b", "title", "ipod ipod", "str_s", "b"}));
-    writer.addDocument(adoc(new String[] {"id", "c", "title", "ipod ipod ipod", "str_s","c"}));
-    writer.addDocument(adoc(new String[] {"id", "x", "title", "boosted", "str_s", "x"}));
-    writer.addDocument(adoc(new String[] {"id", "y", "title", "boosted boosted", "str_s","y"}));
-    writer.addDocument(adoc(new String[] {"id", "z", "title", "boosted boosted boosted","str_s", "z"}));
+                                         directory,
+                                         newIndexWriterConfig(new MockAnalyzer(random())).
+                                         setMaxBufferedDocs(2).
+                                         setMergePolicy(newLogMergePolicy(1000)).
+                                         setSimilarity(new DefaultSimilarity())
+                                         );
+    adoc(writer, new String[] {"id", "a", "title", "ipod", "str_s", "a"});
+    adoc(writer, new String[] {"id", "b", "title", "ipod ipod", "str_s", "b"});
+    adoc(writer, new String[] {"id", "c", "title", "ipod ipod ipod", "str_s","c"});
+    adoc(writer, new String[] {"id", "x", "title", "boosted", "str_s", "x"});
+    adoc(writer, new String[] {"id", "y", "title", "boosted boosted", "str_s","y"});
+    adoc(writer, new String[] {"id", "z", "title", "boosted boosted boosted","str_s", "z"});
 
     IndexReader r = DirectoryReader.open(writer, true);
     writer.close();
@@ -75,9 +73,9 @@
     newq.add(getElevatedQuery(new String[] {"id", "a", "id", "x"}), BooleanClause.Occur.SHOULD);
 
     Sort sort = new Sort(
-        new SortField("id", new ElevationComparatorSource(priority), false),
-        new SortField(null, SortField.Type.SCORE, reversed)
-      );
+                         new SortField("id", new ElevationComparatorSource(priority), false),
+                         new SortField(null, SortField.Type.SCORE, reversed)
+                         );
 
     TopDocsCollector<Entry> topCollector = TopFieldCollector.create(sort, 50, false, true, true);
     searcher.search(newq, null, topCollector);
@@ -100,40 +98,41 @@
     }
 
     /*
-    for (int i = 0; i < nDocsReturned; i++) {
-     ScoreDoc scoreDoc = topDocs.scoreDocs[i];
-     ids[i] = scoreDoc.doc;
-     scores[i] = scoreDoc.score;
-     documents[i] = searcher.doc(ids[i]);
-     System.out.println("ids[i] = " + ids[i]);
-     System.out.println("documents[i] = " + documents[i]);
-     System.out.println("scores[i] = " + scores[i]);
-   }
+      for (int i = 0; i < nDocsReturned; i++) {
+      ScoreDoc scoreDoc = topDocs.scoreDocs[i];
+      ids[i] = scoreDoc.doc;
+      scores[i] = scoreDoc.score;
+      documents[i] = searcher.doc(ids[i]);
+      System.out.println("ids[i] = " + ids[i]);
+      System.out.println("documents[i] = " + documents[i]);
+      System.out.println("scores[i] = " + scores[i]);
+      }
     */
- }
+  }
 
- private Query getElevatedQuery(String[] vals) {
-   BooleanQuery q = new BooleanQuery(false);
-   q.setBoost(0);
-   int max = (vals.length / 2) + 5;
-   for (int i = 0; i < vals.length - 1; i += 2) {
-     q.add(new TermQuery(new Term(vals[i], vals[i + 1])), BooleanClause.Occur.SHOULD);
-     priority.put(new BytesRef(vals[i + 1]), Integer.valueOf(max--));
-     // System.out.println(" pri doc=" + vals[i+1] + " pri=" + (1+max));
-   }
-   return q;
- }
+  private Query getElevatedQuery(String[] vals) {
+    BooleanQuery q = new BooleanQuery(false);
+    q.setBoost(0);
+    int max = (vals.length / 2) + 5;
+    for (int i = 0; i < vals.length - 1; i += 2) {
+      q.add(new TermQuery(new Term(vals[i], vals[i + 1])), BooleanClause.Occur.SHOULD);
+      priority.put(new BytesRef(vals[i + 1]), Integer.valueOf(max--));
+      // System.out.println(" pri doc=" + vals[i+1] + " pri=" + (1+max));
+    }
+    return q;
+  }
 
- private Document adoc(String[] vals) {
-   Document doc = new Document();
-   for (int i = 0; i < vals.length - 2; i += 2) {
-     doc.add(newTextField(vals[i], vals[i + 1], Field.Store.YES));
-     if (vals[i].equals("id")) {
-       doc.add(new SortedDocValuesField(vals[i], new BytesRef(vals[i+1])));
-     }
-   }
-   return doc;
- }
+  private void adoc(IndexWriter w, String[] vals) throws IOException {
+    Document doc = w.newDocument();
+    for (int i = 0; i < vals.length - 2; i += 2) {
+      if (vals[i].equals("id")) {
+        doc.addUniqueAtom(vals[i], new BytesRef(vals[i+1]));
+      } else {
+        doc.addLargeText(vals[i], vals[i + 1]);
+      }
+    }
+    w.addDocument(doc);
+  }
 }
 
 class ElevationComparatorSource extends FieldComparatorSource {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
deleted file mode 100644
index 098f599..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
+++ /dev/null
@@ -1,480 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.NumericUtils;
-import org.junit.Test;
-
-/**
- * A basic 'positive' Unit test class for the FieldCacheRangeFilter class.
- *
- * <p>
- * NOTE: at the moment, this class only tests for 'positive' results,
- * it does not verify the results to ensure there are no 'false positives',
- * nor does it adequately test 'negative' results.  It also does not test
- * that garbage in results in an Exception.
- */
-public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
-
-  @Test
-  public void testRangeFilterId() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int medId = ((maxId - minId) / 2);
-        
-    String minIP = pad(minId);
-    String maxIP = pad(maxId);
-    String medIP = pad(medId);
-    
-    int numDocs = reader.numDocs();
-        
-    assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    // test id, bounded on both ends
-    result = search.search(q, DocValuesRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
-    assertEquals("all but last", numDocs-1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,F,T), numDocs).scoreDocs;
-    assertEquals("all but first", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,maxIP,F,F), numDocs).scoreDocs;
-    assertEquals("all but ends", numDocs-2, result.length);
-    
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,maxIP,T,T), numDocs).scoreDocs;
-    assertEquals("med and up", 1+ maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,medIP,T,T), numDocs).scoreDocs;
-    assertEquals("up to med", 1+ medId-minId, result.length);
-
-    // unbounded id
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,null,T,F), numDocs).scoreDocs;
-    assertEquals("min and up", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,maxIP,F,T), numDocs).scoreDocs;
-    assertEquals("max and down", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,null,F,F), numDocs).scoreDocs;
-    assertEquals("not min, but up", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,maxIP,F,F), numDocs).scoreDocs;
-    assertEquals("not max, but down", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,maxIP,T,F), numDocs).scoreDocs;
-    assertEquals("med and up, not max", maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,medIP,F,T), numDocs).scoreDocs;
-    assertEquals("not min, up to med", medId-minId, result.length);
-
-    // very small sets
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,minIP,F,F), numDocs).scoreDocs;
-    assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,medIP,F,F), numDocs).scoreDocs;
-    assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
-    assertEquals("max,max,F,F", 0, result.length);
-                     
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",minIP,minIP,T,T), numDocs).scoreDocs;
-    assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",null,minIP,F,T), numDocs).scoreDocs;
-    assertEquals("nul,min,F,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
-    assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",maxIP,null,T,F), numDocs).scoreDocs;
-    assertEquals("max,nul,T,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs;
-    assertEquals("med,med,T,T", 1, result.length);
-  }
-
-  @Test
-  public void testFieldCacheRangeFilterRand() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    String minRP = pad(signedIndexDir.minR);
-    String maxRP = pad(signedIndexDir.maxR);
-    
-    int numDocs = reader.numDocs();
-        
-    assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    // test extremes, bounded on both ends
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
-    assertEquals("all but biggest", numDocs-1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
-    assertEquals("all but smallest", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
-    assertEquals("all but extremes", numDocs-2, result.length);
-    
-    // unbounded
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,null,T,F), numDocs).scoreDocs;
-    assertEquals("smallest and up", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,maxRP,F,T), numDocs).scoreDocs;
-    assertEquals("biggest and down", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,null,F,F), numDocs).scoreDocs;
-    assertEquals("not smallest, but up", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,maxRP,F,F), numDocs).scoreDocs;
-    assertEquals("not biggest, but down", numDocs-1, result.length);
-        
-    // very small sets
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,minRP,F,F), numDocs).scoreDocs;
-    assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
-    assertEquals("max,max,F,F", 0, result.length);
-                     
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",minRP,minRP,T,T), numDocs).scoreDocs;
-    assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",null,minRP,F,T), numDocs).scoreDocs;
-    assertEquals("nul,min,F,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
-    assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs;
-    assertEquals("max,nul,T,T", 1, result.length);
-  }
-  
-  @Test
-  public void testFieldCacheRangeFilterInts() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int numDocs = reader.numDocs();
-    int medId = ((maxId - minId) / 2);
-    Integer minIdO = Integer.valueOf(minId);
-    Integer maxIdO = Integer.valueOf(maxId);
-    Integer medIdO = Integer.valueOf(medId);
-        
-    assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    // test id, bounded on both ends
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("all but last", numDocs-1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("all but first", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("all but ends", numDocs-2, result.length);
-    
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med and up", 1+ maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("up to med", 1+ medId-minId, result.length);
-    
-    // unbounded id
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("min and up", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("max and down", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,null,F,F), numDocs).scoreDocs;
-    assertEquals("not min, but up", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("not max, but down", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("med and up, not max", maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,medIdO,F,T), numDocs).scoreDocs;
-    assertEquals("not min, up to med", medId-minId, result.length);
-
-    // very small sets
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,minIdO,F,F), numDocs).scoreDocs;
-    assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,medIdO,F,F), numDocs).scoreDocs;
-    assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("max,max,F,F", 0, result.length);
-                     
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",minIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,minIdO,F,T), numDocs).scoreDocs;
-    assertEquals("nul,min,F,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("max,nul,T,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",medIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med,med,T,T", 1, result.length);
-    
-    // special cases
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",maxIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("inverse range", 0, result.length);
-  }
-  
-  @Test
-  public void testFieldCacheRangeFilterLongs() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int numDocs = reader.numDocs();
-    int medId = ((maxId - minId) / 2);
-    Long minIdO = Long.valueOf(minId);
-    Long maxIdO = Long.valueOf(maxId);
-    Long medIdO = Long.valueOf(medId);
-        
-    assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    // test id, bounded on both ends
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("all but last", numDocs-1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("all but first", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("all but ends", numDocs-2, result.length);
-    
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med and up", 1+ maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("up to med", 1+ medId-minId, result.length);
-    
-    // unbounded id
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("min and up", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("max and down", numDocs, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,null,F,F), numDocs).scoreDocs;
-    assertEquals("not min, but up", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("not max, but down", numDocs-1, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("med and up, not max", maxId-medId, result.length);
-        
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,medIdO,F,T), numDocs).scoreDocs;
-    assertEquals("not min, up to med", medId-minId, result.length);
-
-    // very small sets
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,minIdO,F,F), numDocs).scoreDocs;
-    assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,medIdO,F,F), numDocs).scoreDocs;
-    assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("max,max,F,F", 0, result.length);
-                     
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",minIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,minIdO,F,T), numDocs).scoreDocs;
-    assertEquals("nul,min,F,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("max,nul,T,T", 1, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",medIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med,med,T,T", 1, result.length);
-    
-    // special cases
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newLongRange("id_long",maxIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("inverse range", 0, result.length);
-  }
-  
-  // float and double tests are a bit minimalistic, but it's complicated, because missing precision
-  
-  @Test
-  public void testFieldCacheRangeFilterFloats() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int numDocs = reader.numDocs();
-    Float minIdO = Float.valueOf(minId + .5f);
-    Float medIdO = Float.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0f);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",minIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs/2, result.length);
-    int count = 0;
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,medIdO,F,T), numDocs).scoreDocs;
-    count += result.length;
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",medIdO,null,F,F), numDocs).scoreDocs;
-    count += result.length;
-    assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
-    assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newFloatRange("id_float",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
-    assertEquals("infinity special case", 0, result.length);
-  }
-  
-  @Test
-  public void testFieldCacheRangeFilterDoubles() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int numDocs = reader.numDocs();
-    Double minIdO = Double.valueOf(minId + .5);
-    Double medIdO = Double.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",minIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs/2, result.length);
-    int count = 0;
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null,medIdO,F,T), numDocs).scoreDocs;
-    count += result.length;
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",medIdO,null,F,F), numDocs).scoreDocs;
-    count += result.length;
-    assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
-    assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,DocValuesRangeFilter.newDoubleRange("id_double",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
-    assertEquals("infinity special case", 0, result.length);
-  }
-  
-  // test using a sparse index (with deleted docs).
-  @Test
-  public void testSparseIndex() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-
-    for (int d = -20; d <= 20; d++) {
-      Document doc = new Document();
-      doc.add(new IntField("id_int", d, Field.Store.NO));
-      doc.add(new NumericDocValuesField("id_int", d));
-      doc.add(newStringField("body", "body", Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.forceMerge(1);
-    BytesRefBuilder term0 = new BytesRefBuilder();
-    NumericUtils.intToPrefixCoded(0, 0, term0);
-    writer.deleteDocuments(new Term("id_int", term0.get()));
-    writer.close();
-
-    IndexReader reader = DirectoryReader.open(dir);
-    IndexSearcher search = newSearcher(reader);
-    assertTrue(reader.hasDeletions());
-
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,20,T,T), 100).scoreDocs;
-    assertEquals("find all", 40, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",0,20,T,T), 100).scoreDocs;
-    assertEquals("find all", 20, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,0,T,T), 100).scoreDocs;
-    assertEquals("find all", 20, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",10,20,T,T), 100).scoreDocs;
-    assertEquals("find all", 11, result.length);
-
-    result = search.search(q,DocValuesRangeFilter.newIntRange("id_int",-20,-10,T,T), 100).scoreDocs;
-    assertEquals("find all", 11, result.length);
-    reader.close();
-    dir.close();
-  }
-  
-}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
index 7dac515..eec5849 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
@@ -17,18 +17,14 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
-
-import java.util.ArrayList;
-import java.util.List;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * A basic unit test for FieldCacheTermsFilter
@@ -41,10 +37,9 @@
     Directory rd = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), rd);
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       int term = i * 10; //terms are units of 10;
-      doc.add(newStringField(fieldName, "" + term, Field.Store.YES));
-      doc.add(new SortedDocValuesField(fieldName, new BytesRef("" + term)));
+      doc.addAtom(fieldName, "" + term);
       w.addDocument(doc);
     }
     IndexReader reader = w.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
index 052bd43..dddc198 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
@@ -20,20 +20,17 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FilterLeafReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.BitSet;
 import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.SparseFixedBitSet;
@@ -142,25 +139,23 @@
 
   private int[] buildIndex(RandomIndexWriter writer, int docs)
       throws IOException {
+    FieldTypes fieldTypes = writer.getFieldTypes();
     int[] docStates = new int[docs];
     for (int i = 0; i < docs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       if (random().nextBoolean()) {
         docStates[i] = 1;
-        doc.add(newTextField("some", "value", Field.Store.YES));
-        doc.add(new SortedDocValuesField("some", new BytesRef("value")));
+        doc.addShortText("some", "value");
       }
-      doc.add(newTextField("all", "test", Field.Store.NO));
-      doc.add(new SortedDocValuesField("all", new BytesRef("test")));
-      doc.add(newTextField("id", "" + i, Field.Store.YES));
-      doc.add(new SortedDocValuesField("id", new BytesRef("" + i)));
+      doc.addShortText("all", "test");
+      doc.addUniqueInt("id", i);
       writer.addDocument(doc);
     }
     writer.commit();
     int numDeletes = random().nextInt(docs);
     for (int i = 0; i < numDeletes; i++) {
       int docID = random().nextInt(docs);
-      writer.deleteDocuments(new Term("id", "" + docID));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", docID));
       docStates[docID] = 2;
     }
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilterCachingPolicy.java b/lucene/core/src/test/org/apache/lucene/search/TestFilterCachingPolicy.java
index 4bf6cf7..5899daf 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFilterCachingPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFilterCachingPolicy.java
@@ -34,7 +34,7 @@
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     final int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; ++i) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
     }
     final IndexReader reader = w.getReader();
     for (float minSizeRatio : new float[] {Float.MIN_VALUE, 0.01f, 0.1f, 0.9f}) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
index c91ef33..7d87181 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -23,19 +23,18 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.FilteredQuery.FilterStrategy;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
@@ -64,28 +63,24 @@
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter (random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
-    Document doc = new Document();
-    doc.add (newTextField("field", "one two three four five", Field.Store.YES));
-    doc.add (newTextField("sorter", "b", Field.Store.YES));
-    doc.add (new SortedDocValuesField("sorter", new BytesRef("b")));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "one two three four five");
+    doc.addAtom("sorter", new BytesRef("b"));
     writer.addDocument (doc);
 
-    doc = new Document();
-    doc.add (newTextField("field", "one two three four", Field.Store.YES));
-    doc.add (newTextField("sorter", "d", Field.Store.YES));
-    doc.add (new SortedDocValuesField("sorter", new BytesRef("d")));
+    doc = writer.newDocument();
+    doc.addLargeText("field", "one two three four");
+    doc.addAtom("sorter", new BytesRef("d"));
     writer.addDocument (doc);
 
-    doc = new Document();
-    doc.add (newTextField("field", "one two three y", Field.Store.YES));
-    doc.add (newTextField("sorter", "a", Field.Store.YES));
-    doc.add (new SortedDocValuesField("sorter", new BytesRef("a")));
+    doc = writer.newDocument();
+    doc.addLargeText("field", "one two three y");
+    doc.addAtom("sorter", new BytesRef("a"));
     writer.addDocument (doc);
 
-    doc = new Document();
-    doc.add (newTextField("field", "one two x", Field.Store.YES));
-    doc.add (newTextField("sorter", "c", Field.Store.YES));
-    doc.add (new SortedDocValuesField("sorter", new BytesRef("c")));
+    doc = writer.newDocument();
+    doc.addLargeText("field", "one two x");
+    doc.addAtom("sorter", new BytesRef("c"));
     writer.addDocument (doc);
 
     // tests here require single segment (eg try seed
@@ -98,7 +93,7 @@
 
     searcher = newSearcher(reader);
 
-    query = new TermQuery (new Term ("field", "three"));
+    query = new TermQuery(new Term("field", "three"));
     filter = newStaticFilterB();
   }
 
@@ -410,29 +405,30 @@
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
         newIndexWriterConfig(new MockAnalyzer(random())));
+    final FieldTypes fieldTypes = writer.getFieldTypes();
     int numDocs = atLeast(50);
     int totalDocsWithZero = 0;
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       int num = random().nextInt(5);
       if (num == 0) {
         totalDocsWithZero++;
       }
-      doc.add(newTextField("field", "" + num, Field.Store.YES));
+      doc.addInt("field", num);
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(reader);
-    Query query = new FilteredQuery(new TermQuery(new Term("field", "0")),
+    Query query = new FilteredQuery(fieldTypes.newExactIntQuery("field", 0),
         new Filter() {
           @Override
           public DocIdSet getDocIdSet(LeafReaderContext context,
               Bits acceptDocs) throws IOException {
             final boolean nullBitset = random().nextInt(10) == 5;
             final LeafReader reader = context.reader();
-            DocsEnum termDocsEnum = reader.termDocsEnum(new Term("field", "0"));
+            DocsEnum termDocsEnum = reader.termDocsEnum(fieldTypes.newIntTerm("field", 0));
             if (termDocsEnum == null) {
               return null; // no docs -- return null
             }
@@ -475,7 +471,7 @@
                 assertTrue(
                     "iterator should not be called if bitset is present",
                     nullBitset);
-                return reader.termDocsEnum(new Term("field", "0"));
+                return reader.termDocsEnum(fieldTypes.newIntTerm("field", 0));
               }
               
             };
@@ -497,19 +493,20 @@
     int numDocs = atLeast(50);
     int totalDocsWithZero = 0;
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       int num = random().nextInt(10);
       if (num == 0) {
         totalDocsWithZero++;
       }
-      doc.add (newTextField("field", ""+num, Field.Store.YES));
+      doc.addInt("field", num);
       writer.addDocument (doc);  
     }
     IndexReader reader = writer.getReader();
+    final FieldTypes fieldTypes = reader.getFieldTypes();
     writer.close();
     final boolean queryFirst = random().nextBoolean();
     IndexSearcher searcher = newSearcher(reader);
-    Query query = new FilteredQuery(new TermQuery(new Term("field", "0")), new Filter() {
+    Query query = new FilteredQuery(fieldTypes.newExactIntQuery("field", 0), new Filter() {
       @Override
       public DocIdSet getDocIdSet(final LeafReaderContext context, Bits acceptDocs)
           throws IOException {
@@ -526,7 +523,7 @@
           }
           @Override
           public DocIdSetIterator iterator() throws IOException {
-            final DocsEnum termDocsEnum = context.reader().termDocsEnum(new Term("field", "0"));
+            final DocsEnum termDocsEnum = context.reader().termDocsEnum(fieldTypes.newIntTerm("field", 0));
             if (termDocsEnum == null) {
               return null;
             }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java
index d200105..f3ef0c4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java
@@ -19,20 +19,19 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
 
 
 
@@ -60,8 +59,8 @@
 
   public void searchFiltered(IndexWriter writer, Directory directory, Filter filter, boolean fullMerge) throws IOException {
     for (int i = 0; i < 60; i++) {//Simple docs
-      Document doc = new Document();
-      doc.add(newStringField(FIELD, Integer.toString(i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addAtom(FIELD, Integer.toString(i));
       writer.addDocument(doc);
     }
     if (fullMerge) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java
index 8284410..0d3c484 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java
@@ -17,14 +17,13 @@
  * limitations under the License.
  */
 
-import java.util.List;
-import java.util.Arrays;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -99,7 +98,7 @@
     assertEquals("3 documents should match", 3, hits.length);
     List<String> order = Arrays.asList("bbbbb","abbbb","aabbb");
     for (int i = 0; i < hits.length; i++) {
-      final String term = searcher.doc(hits[i].doc).get("field");
+      final String term = searcher.doc(hits[i].doc).getString("field");
       //System.out.println(hits[i].score);
       assertEquals(order.get(i), term);
     }
@@ -111,7 +110,7 @@
     assertEquals("only 2 documents should match", 2, hits.length);
     order = Arrays.asList("bbbbb","abbbb");
     for (int i = 0; i < hits.length; i++) {
-      final String term = searcher.doc(hits[i].doc).get("field");
+      final String term = searcher.doc(hits[i].doc).getString("field");
       //System.out.println(hits[i].score);
       assertEquals(order.get(i), term);
     }
@@ -412,8 +411,8 @@
   }
   
   private void addDoc(String text, RandomIndexWriter writer) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("field", text, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", text);
     writer.addDocument(doc);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java
index 8b6aad4..a0dcb39 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestIndexSearcher.java
@@ -23,8 +23,6 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -46,10 +44,9 @@
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("field", Integer.toString(i), Field.Store.NO));
-      doc.add(newStringField("field2", Boolean.toString(i % 2 == 0), Field.Store.NO));
-      doc.add(new SortedDocValuesField("field2", new BytesRef(Boolean.toString(i % 2 == 0))));
+      Document doc = iw.newDocument();
+      doc.addUniqueAtom("field", Integer.toString(i));
+      doc.addAtom("field2", Boolean.toString(i % 2 == 0));
       iw.addDocument(doc);
     }
     reader = iw.getReader();
@@ -126,7 +123,7 @@
     // LUCENE-5128: ensure we get a meaningful message if searchAfter exceeds maxDoc
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     IndexReader r = w.getReader();
     w.close();
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLRUFilterCache.java b/lucene/core/src/test/org/apache/lucene/search/TestLRUFilterCache.java
index 07cbc40..228c31f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLRUFilterCache.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLRUFilterCache.java
@@ -31,8 +31,6 @@
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReaderContext;
@@ -80,11 +78,9 @@
     Thread[] threads = new Thread[3];
     threads[0] = new Thread() {
       public void run() {
-        Document doc = new Document();
-        StringField f = new StringField("color", "", Store.NO);
-        doc.add(f);
         for (int i = 0; indexing.get() && i < numDocs; ++i) {
-          f.setStringValue(RandomPicks.randomFrom(random(), new String[] {"blue", "red", "yellow"}));
+          Document doc = w.newDocument();
+          doc.addAtom("color", RandomPicks.randomFrom(random(), new String[] {"blue", "red", "yellow"}));
           try {
             w.addDocument(doc);
             if ((i & 63) == 0) {
@@ -154,13 +150,16 @@
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "blue", Store.NO);
-    doc.add(f);
+    Document doc = w.newDocument();
+    doc.addAtom("color", "blue");
     w.addDocument(doc);
-    f.setStringValue("red");
+
+    doc = w.newDocument();
+    doc.addAtom("color", "red");
     w.addDocument(doc);
-    f.setStringValue("green");
+
+    doc = w.newDocument();
+    doc.addAtom("color", "green");
     w.addDocument(doc);
     final DirectoryReader reader = w.getReader();
     final IndexSearcher searcher = newSearcher(reader);
@@ -206,12 +205,10 @@
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; ++i) {
-      f.setStringValue(RandomPicks.randomFrom(random(), Arrays.asList("blue", "red", "green")));
+      Document doc = w.newDocument();
+      doc.addAtom("color", RandomPicks.randomFrom(random(), Arrays.asList("blue", "red", "green")));
       w.addDocument(doc);
     }
     final DirectoryReader reader = w.getReader();
@@ -240,12 +237,10 @@
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; ++i) {
-      f.setStringValue(random().nextBoolean() ? "red" : "blue");
+      Document doc = w.newDocument();
+      doc.addAtom("color", random().nextBoolean() ? "red" : "blue");
       w.addDocument(doc);
     }
     final DirectoryReader reader = w.getReader();
@@ -317,14 +312,12 @@
 
     final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     final int iters = atLeast(5);
     for (int iter = 0; iter < iters; ++iter) {
       final int numDocs = atLeast(10);
       for (int i = 0; i < numDocs; ++i) {
-        f.setStringValue(RandomPicks.randomFrom(random(), colors));
+        Document doc = w.newDocument();
+        doc.addAtom("color", RandomPicks.randomFrom(random(), colors));
         w.addDocument(doc);
       }
       try (final DirectoryReader reader = w.getReader()) {
@@ -378,10 +371,9 @@
 
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
     final int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; ++i) {
-      w.addDocument(doc);
+      w.addDocument(w.newDocument());
     }
     final DirectoryReader reader = w.getReader();
     final IndexSearcher searcher = new IndexSearcher(reader);
@@ -409,12 +401,10 @@
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; ++i) {
-      f.setStringValue(RandomPicks.randomFrom(random(), Arrays.asList("red", "blue", "green", "yellow")));
+      Document doc = w.newDocument();
+      doc.addAtom("color", RandomPicks.randomFrom(random(), Arrays.asList("red", "blue", "green", "yellow")));
       w.addDocument(doc);
       if (random().nextBoolean()) {
         w.getReader().close();
@@ -467,11 +457,9 @@
 
     final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     for (int i = 0; i < 10; ++i) {
-      f.setStringValue(RandomPicks.randomFrom(random(), colors));
+      Document doc = w.newDocument();
+      doc.addAtom("color", RandomPicks.randomFrom(random(), colors));
       w.addDocument(doc);
       if (random().nextBoolean()) {
         w.getReader().close();
@@ -553,12 +541,10 @@
 
     final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");
 
-    Document doc = new Document();
-    StringField f = new StringField("color", "", Store.NO);
-    doc.add(f);
     for (RandomIndexWriter w : Arrays.asList(w1, w2)) {
       for (int i = 0; i < 10; ++i) {
-        f.setStringValue(RandomPicks.randomFrom(random(), colors));
+        Document doc = w.newDocument();
+        doc.addAtom("color", RandomPicks.randomFrom(random(), colors));
         w.addDocument(doc);
         if (random().nextBoolean()) {
           w.getReader().close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
index ee8aefc..7e3603e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
@@ -29,13 +29,9 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -67,7 +63,7 @@
           if (hits.totalHits == 0) {
             return null;
           } else {
-            StoredDocument doc = s.doc(hits.scoreDocs[0].doc);
+            Document doc = s.doc(hits.scoreDocs[0].doc);
             return (Integer) doc.getField("field").numericValue();
           }
         }
@@ -102,14 +98,14 @@
               startingGun.await();
               for(int iter=0; iter<iters;iter++) {
                 // Add/update a document
-                Document doc = new Document();
+                Document doc = w.newDocument();
                 // Threads must not update the same id at the
                 // same time:
                 if (threadRandom.nextDouble() <= addChance) {
                   String id = String.format(Locale.ROOT, "%d_%04x", threadID, threadRandom.nextInt(idCount));
                   Integer field = threadRandom.nextInt(Integer.MAX_VALUE);
-                  doc.add(new StringField("id", id, Field.Store.YES));
-                  doc.add(new IntField("field", field.intValue(), Field.Store.YES));
+                  doc.addUniqueAtom("id", id);
+                  doc.addInt("field", field.intValue());
                   w.updateDocument(new Term("id", id), doc);
                   rt.add(id, field);
                   if (values.put(id, field) == null) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
index f41497e..24dd531 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
@@ -21,13 +21,11 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.store.Directory;
-
 import org.apache.lucene.util.LuceneTestCase;
 
 /**
@@ -96,10 +94,8 @@
   }
   
   private void addDoc(String text, IndexWriter iw, float boost) throws IOException {
-    Document doc = new Document();
-    Field f = newTextField("key", text, Field.Store.YES);
-    f.setBoost(boost);
-    doc.add(f);
+    Document doc = iw.newDocument();
+    doc.addLargeText("key", text, boost);
     iw.addDocument(doc);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index 2216ed5..e1a0892 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -26,11 +26,9 @@
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
@@ -62,9 +60,12 @@
   public static void beforeClass() throws Exception {
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    fieldTypes.setMultiValued("dv");
     final int numDocs = atLeast(300);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       
       addSome(doc, alwaysTerms);
       
@@ -107,8 +108,8 @@
     Collections.shuffle(list, random());
     int howMany = TestUtil.nextInt(random(), 1, list.size());
     for (int i = 0; i < howMany; i++) {
-      doc.add(new StringField("field", list.get(i), Field.Store.NO));
-      doc.add(new SortedSetDocValuesField("dv", new BytesRef(list.get(i))));
+      doc.addAtom("field", list.get(i));
+      doc.addBinary("dv", new BytesRef(list.get(i)));
     }
   }
   
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
index dc5a249..230a3d9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
@@ -23,8 +23,6 @@
 import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -195,8 +193,8 @@
   }
   
   private void add(String s, RandomIndexWriter writer) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("body", s, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("body", s);
     writer.addDocument(doc);
   }
   
@@ -315,9 +313,9 @@
   
   private void add(String s, String type, RandomIndexWriter writer)
       throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("body", s, Field.Store.YES));
-    doc.add(newStringField("type", type, Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("body", s);
+    doc.addAtom("type", type);
     writer.addDocument(doc);
   }
   
@@ -366,11 +364,11 @@
     tokens[2].setPositionIncrement(0);
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new TextField("field", new CannedTokenStream(tokens)));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", new CannedTokenStream(tokens));
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new TextField("field", new CannedTokenStream(tokens)));
+    doc = writer.newDocument();
+    doc.addLargeText("field", new CannedTokenStream(tokens));
     writer.addDocument(doc);
     IndexReader r = writer.getReader();
     writer.close();
@@ -467,8 +465,8 @@
     Directory dir = newDirectory(); // random dir
     IndexWriterConfig cfg = newIndexWriterConfig(null);
     IndexWriter writer = new IndexWriter(dir, cfg);
-    Document doc = new Document();
-    doc.add(new TextField("field", new CannedTokenStream(INCR_0_DOC_TOKENS)));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", new CannedTokenStream(INCR_0_DOC_TOKENS));
     writer.addDocument(doc);
     IndexReader r = DirectoryReader.open(writer,false);
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
index 9800867..1de9057 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
@@ -17,14 +17,15 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
+import junit.framework.Assert;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -33,10 +34,6 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.IOException;
-
-import junit.framework.Assert;
-
 public class TestMultiTermConstantScore extends BaseTestRangeFilter {
 
   /** threshold for comparing floats */
@@ -60,14 +57,12 @@
         newIndexWriterConfig(
             new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy()));
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setTokenized(false);
     for (int i = 0; i < data.length; i++) {
-      Document doc = new Document();
-      doc.add(newField("id", String.valueOf(i), customType));// Field.Keyword("id",String.valueOf(i)));
-      doc.add(newField("all", "all", customType));// Field.Keyword("all","all"));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", String.valueOf(i));
+      doc.addAtom("all", "all");
       if (null != data[i]) {
-        doc.add(newTextField("data", data[i], Field.Store.YES));// Field.Text("data",data[i]));
+        doc.addLargeText("data", data[i]);
       }
       writer.addDocument(doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
index 62c8ff0..09a6b8d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
@@ -17,16 +17,17 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
@@ -34,8 +35,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import java.io.IOException;
-
 public class TestMultiTermQueryRewrites extends LuceneTestCase {
 
   static Directory dir, sdir1, sdir2;
@@ -52,10 +51,13 @@
     final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random()));
 
     for (int i = 0; i < 10; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("data", Integer.toString(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("data", Integer.toString(i));
       writer.addDocument(doc);
-      ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc);
+      RandomIndexWriter otherWriter = (i % 2 == 0) ? swriter1 : swriter2;
+      doc = otherWriter.newDocument();
+      doc.addAtom("data", Integer.toString(i));
+      otherWriter.addDocument(doc);
     }
     writer.forceMerge(1); swriter1.forceMerge(1); swriter2.forceMerge(1);
     writer.close(); swriter1.close(); swriter2.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
index ed052d9..50b41f4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -23,6 +23,7 @@
 import org.apache.lucene.document.*;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Terms;
@@ -43,13 +44,12 @@
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     //writer.setNoCFSRatio(0.0);
     //writer.infoStream = System.out;
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setTokenized(false);
-    customType.setStoreTermVectors(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setIndexOptions("field", IndexOptions.DOCS);
+    fieldTypes.enableTermVectors("field");
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      Field fld = newField("field", English.intToEnglish(i), customType);
-      doc.add(fld);
+      Document doc = writer.newDocument();
+      doc.addAtom("field", English.intToEnglish(i));
       writer.addDocument(doc);
     }
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
index ffd231a..53c4fbd 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
@@ -17,14 +17,13 @@
  * limitations under the License.
  */
 
-import java.util.Locale;
 import java.text.DecimalFormat;
 import java.text.DecimalFormatSymbols;
+import java.util.Locale;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
@@ -43,16 +42,19 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    
+
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("asc");
+    fieldTypes.setMultiValued("trie");
     DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT));
     
     int num = atLeast(500);
     for (int l = 0; l < num; l++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       for (int m=0, c=random().nextInt(10); m<=c; m++) {
         int value = random().nextInt(Integer.MAX_VALUE);
-        doc.add(newStringField("asc", format.format(value), Field.Store.NO));
-        doc.add(new IntField("trie", value, Field.Store.NO));
+        doc.addAtom("asc", format.format(value));
+        doc.addInt("trie", value);
       }
       writer.addDocument(doc);
     }
@@ -67,11 +69,11 @@
       if (lower>upper) {
         int a=lower; lower=upper; upper=a;
       }
-      TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
-      NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange("trie", lower, upper, true, true);
+      Query cq = new ConstantScoreQuery(fieldTypes.newStringRangeFilter("asc", format.format(lower), true, format.format(upper), true));
+      Query tq = new ConstantScoreQuery(fieldTypes.newIntRangeFilter("trie", lower, true, upper, true));
       TopDocs trTopDocs = searcher.search(cq, 1);
       TopDocs nrTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
+      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits);
     }
     reader.close();
     directory.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNot.java b/lucene/core/src/test/org/apache/lucene/search/TestNot.java
index a7591b3..5bfc037 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestNot.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestNot.java
@@ -17,14 +17,12 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.LuceneTestCase;
-
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.util.LuceneTestCase;
 
 /** Similarity unit test.
  *
@@ -36,8 +34,8 @@
     Directory store = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), store);
 
-    Document d1 = new Document();
-    d1.add(newTextField("field", "a b", Field.Store.YES));
+    Document d1 = writer.newDocument();
+    d1.addLargeText("field", "a b");
 
     writer.addDocument(d1);
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
deleted file mode 100644
index 0782e42..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ /dev/null
@@ -1,612 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestNumericUtils; // NaN arrays
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery32 extends LuceneTestCase {
-  // distance of entries
-  private static int distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final int startOffset = - 1 << 15;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1 << 30) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-    
-    final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
-    storedInt.setStored(true);
-    storedInt.freeze();
-
-    final FieldType storedInt8 = new FieldType(storedInt);
-    storedInt8.setNumericPrecisionStep(8);
-
-    final FieldType storedInt4 = new FieldType(storedInt);
-    storedInt4.setNumericPrecisionStep(4);
-
-    final FieldType storedInt2 = new FieldType(storedInt);
-    storedInt2.setNumericPrecisionStep(2);
-
-    final FieldType storedIntNone = new FieldType(storedInt);
-    storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final FieldType unstoredInt = IntField.TYPE_NOT_STORED;
-
-    final FieldType unstoredInt8 = new FieldType(unstoredInt);
-    unstoredInt8.setNumericPrecisionStep(8);
-
-    final FieldType unstoredInt4 = new FieldType(unstoredInt);
-    unstoredInt4.setNumericPrecisionStep(4);
-
-    final FieldType unstoredInt2 = new FieldType(unstoredInt);
-    unstoredInt2.setNumericPrecisionStep(2);
-
-    IntField
-      field8 = new IntField("field8", 0, storedInt8),
-      field4 = new IntField("field4", 0, storedInt4),
-      field2 = new IntField("field2", 0, storedInt2),
-      fieldNoTrie = new IntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
-      ascfield8 = new IntField("ascfield8", 0, unstoredInt8),
-      ascfield4 = new IntField("ascfield4", 0, unstoredInt4),
-      ascfield2 = new IntField("ascfield2", 0, unstoredInt2);
-    
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing int values
-    for (int l=0; l<noDocs; l++) {
-      int val=distance*l+startOffset;
-      field8.setIntValue(val);
-      field4.setIntValue(val);
-      field2.setIntValue(val);
-      fieldNoTrie.setIntValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setIntValue(val);
-      ascfield4.setIntValue(val);
-      ascfield2.setIntValue(val);
-      writer.addDocument(doc);
-    }
-  
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
-  }
-  
-  /** test for both constant score and boolean query, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-    NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<3; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
-          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
-          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-          break;
-        case 2:
-          type = " (filter)";
-          topDocs = searcher.search(new MatchAllDocsQuery(), f, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      StoredDocument doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().intValue());
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testInverseRange() throws Exception {
-    LeafReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getContext();
-    NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
-    assertNull("A inverse range should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
-    f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
-    assertNull("A exclusive range starting with Integer.MAX_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
-    f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false);
-    assertNull("A exclusive range ending with Integer.MIN_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int upper=(count-1)*distance + (distance/3) + startOffset;
-    NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    StoredDocument doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    
-    q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(count-1)*distance + (distance/3) +startOffset;
-    NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    StoredDocument doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-
-    q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new FloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new IntField("int", Integer.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new FloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new IntField("int", Integer.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new FloatField("float", 0.0f, Field.Store.NO));
-    doc.add(new IntField("int", 0, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (float f : TestNumericUtils.FLOAT_NANs) {
-      doc = new Document();
-      doc.add(new FloatField("float", f, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q=NumericRangeQuery.newIntRange("int", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q=NumericRangeQuery.newIntRange("int", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newFloatRange("float", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newFloatRange("float", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestNumericUtils.FLOAT_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC;
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      final BytesRef lowerBytes, upperBytes;
-      BytesRefBuilder b = new BytesRefBuilder();
-      NumericUtils.intToPrefixCodedBytes(lower, 0, b);
-      lowerBytes = b.toBytesRef();
-      NumericUtils.intToPrefixCodedBytes(upper, 0, b);
-      upperBytes = b.toBytesRef();
-
-      // test inclusive range
-      NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      TopDocs cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test left exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test right exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-    }
-    
-    checkTermCounts(precisionStep, totalTermCountT, totalTermCountC);
-    if (VERBOSE && precisionStep != Integer.MAX_VALUE) {
-      System.out.println("Average number of terms during random search on '" + field + "':");
-      System.out.println(" Numeric query: " + (((double)totalTermCountT)/(num * 4)));
-      System.out.println(" Classical query: " + (((double)totalTermCountC)/(num * 4)));
-    }
-  }
-  
-  @Test
-  public void testEmptyEnums() throws Exception {
-    int count=3000;
-    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    // test empty enum
-    assert lower < upper;
-    assertTrue(0 < countTerms(NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
-    assertEquals(0, countTerms(NumericRangeQuery.newIntRange("field4", 4, upper, lower, true, true)));
-    // test empty enum outside of bounds
-    lower = distance*noDocs+startOffset;
-    upper = 2 * lower;
-    assert lower < upper;
-    assertEquals(0, countTerms(NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
-  }
-  
-  private int countTerms(MultiTermQuery q) throws Exception {
-    final Terms terms = MultiFields.getTerms(reader, q.getField());
-    if (terms == null)
-      return 0;
-    final TermsEnum termEnum = q.getTermsEnum(terms);
-    assertNotNull(termEnum);
-    int count = 0;
-    BytesRef cur, last = null;
-    while ((cur = termEnum.next()) != null) {
-      count++;
-      if (last != null) {
-        assertTrue(last.compareTo(cur) < 0);
-      }
-      last = BytesRef.deepCopyOf(cur);
-    } 
-    // LUCENE-3314: the results after next() already returned null are undefined,
-    // assertNull(termEnum.next());
-    return count;
-  }
-  
-  private void checkTermCounts(int precisionStep, int termCountT, int termCountC) {
-    if (precisionStep == Integer.MAX_VALUE) {
-      assertEquals("Number of terms should be equal for unlimited precStep", termCountC, termCountT);
-    } else {
-      assertTrue("Number of terms for NRQ should be <= compared to classical TRQ", termCountT <= termCountC);
-    }
-  }
-
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(8);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(4);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(2);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
-    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int  i =0;  i< num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs - noDocs/2);
-      int upper=(int)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a float test using int2float conversion of NumericUtils */
-  private void testFloatRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final int lower=-1000, upper=+2000;
-    
-    Query tq=NumericRangeQuery.newFloatRange(field, precisionStep,
-      NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-    
-    Filter tf=NumericRangeFilter.newFloatRange(field, precisionStep,
-      NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
-    tTopDocs = searcher.search(new MatchAllDocsQuery(), tf, 1);
-    assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testFloatRange_8bit() throws Exception {
-    testFloatRange(8);
-  }
-  
-  @Test
-  public void testFloatRange_4bit() throws Exception {
-    testFloatRange(4);
-  }
-  
-  @Test
-  public void testFloatRange_2bit() throws Exception {
-    testFloatRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true), 
-      NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true), 
-      NumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true), 
-      NumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true), 
-      NumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true), 
-      NumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true), 
-      NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
-    Query q1 = NumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
-    Query q2 = NumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
-    assertFalse(q1.equals(q2));
-    assertFalse(q2.equals(q1));
-  }
-  
-}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
deleted file mode 100644
index 42dd518..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ /dev/null
@@ -1,649 +0,0 @@
-package org.apache.lucene.search;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestNumericUtils; // NaN arrays
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery64 extends LuceneTestCase {
-  // distance of entries
-  private static long distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final long startOffset = - 1L << 31;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1L << 60) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-
-    final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
-    storedLong.setStored(true);
-    storedLong.freeze();
-
-    final FieldType storedLong8 = new FieldType(storedLong);
-    storedLong8.setNumericPrecisionStep(8);
-
-    final FieldType storedLong4 = new FieldType(storedLong);
-    storedLong4.setNumericPrecisionStep(4);
-
-    final FieldType storedLong6 = new FieldType(storedLong);
-    storedLong6.setNumericPrecisionStep(6);
-
-    final FieldType storedLong2 = new FieldType(storedLong);
-    storedLong2.setNumericPrecisionStep(2);
-
-    final FieldType storedLongNone = new FieldType(storedLong);
-    storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final FieldType unstoredLong = LongField.TYPE_NOT_STORED;
-
-    final FieldType unstoredLong8 = new FieldType(unstoredLong);
-    unstoredLong8.setNumericPrecisionStep(8);
-
-    final FieldType unstoredLong6 = new FieldType(unstoredLong);
-    unstoredLong6.setNumericPrecisionStep(6);
-
-    final FieldType unstoredLong4 = new FieldType(unstoredLong);
-    unstoredLong4.setNumericPrecisionStep(4);
-
-    final FieldType unstoredLong2 = new FieldType(unstoredLong);
-    unstoredLong2.setNumericPrecisionStep(2);
-
-    LongField
-      field8 = new LongField("field8", 0L, storedLong8),
-      field6 = new LongField("field6", 0L, storedLong6),
-      field4 = new LongField("field4", 0L, storedLong4),
-      field2 = new LongField("field2", 0L, storedLong2),
-      fieldNoTrie = new LongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
-      ascfield8 = new LongField("ascfield8", 0L, unstoredLong8),
-      ascfield6 = new LongField("ascfield6", 0L, unstoredLong6),
-      ascfield4 = new LongField("ascfield4", 0L, unstoredLong4),
-      ascfield2 = new LongField("ascfield2", 0L, unstoredLong2);
-
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing long values, by updating the fields
-    for (int l=0; l<noDocs; l++) {
-      long val=distance*l+startOffset;
-      field8.setLongValue(val);
-      field6.setLongValue(val);
-      field4.setLongValue(val);
-      field2.setLongValue(val);
-      fieldNoTrie.setLongValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setLongValue(val);
-      ascfield6.setLongValue(val);
-      ascfield4.setLongValue(val);
-      ascfield2.setLongValue(val);
-      writer.addDocument(doc);
-    }
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
-  }
-  
-  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-    NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<3; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
-          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
-          topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-          break;
-        case 2:
-          type = " (filter)";
-          topDocs = searcher.search(new MatchAllDocsQuery(), f, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      StoredDocument doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() );
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_6bit() throws Exception {
-    testRange(6);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testInverseRange() throws Exception {
-    LeafReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getContext();
-    NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
-    assertNull("A inverse range should return the null instance", 
-        f.getDocIdSet(context, context.reader().getLiveDocs()));
-    f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false);
-    assertNull("A exclusive range starting with Long.MAX_VALUE should return the null instance",
-               f.getDocIdSet(context, context.reader().getLiveDocs()));
-    f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false);
-    assertNull("A exclusive range ending with Long.MIN_VALUE should return the null instance",
-               f.getDocIdSet(context, context.reader().getLiveDocs()));
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long upper=(count-1)*distance + (distance/3) + startOffset;
-    NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    StoredDocument doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_6bit() throws Exception {
-    testLeftOpenRange(6);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(count-1)*distance + (distance/3) +startOffset;
-    NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    StoredDocument doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, null, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_6bit() throws Exception {
-    testRightOpenRange(6);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new DoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new LongField("long", Long.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new DoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new LongField("long", Long.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new DoubleField("double", 0.0, Field.Store.NO));
-    doc.add(new LongField("long", 0L, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (double d : TestNumericUtils.DOUBLE_NANs) {
-      doc = new Document();
-      doc.add(new DoubleField("double", d, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q=NumericRangeQuery.newLongRange("long", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q=NumericRangeQuery.newLongRange("long", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newDoubleRange("double", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newDoubleRange("double", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q=NumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestNumericUtils.DOUBLE_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC;
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      final BytesRef lowerBytes, upperBytes;
-      BytesRefBuilder b = new BytesRefBuilder();
-      NumericUtils.longToPrefixCodedBytes(lower, 0, b);
-      lowerBytes = b.toBytesRef();
-      NumericUtils.longToPrefixCodedBytes(upper, 0, b);
-      upperBytes = b.toBytesRef();
-      
-      // test inclusive range
-      NumericRangeQuery<Long> tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      TopDocs cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test left exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test right exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-    }
-    
-    checkTermCounts(precisionStep, totalTermCountT, totalTermCountC);
-    if (VERBOSE && precisionStep != Integer.MAX_VALUE) {
-      System.out.println("Average number of terms during random search on '" + field + "':");
-      System.out.println(" Numeric query: " + (((double)totalTermCountT)/(num * 4)));
-      System.out.println(" Classical query: " + (((double)totalTermCountC)/(num * 4)));
-    }
-  }
-  
-  @Test
-  public void testEmptyEnums() throws Exception {
-    int count=3000;
-    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    // test empty enum
-    assert lower < upper;
-    assertTrue(0 < countTerms(NumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
-    assertEquals(0, countTerms(NumericRangeQuery.newLongRange("field4", 4, upper, lower, true, true)));
-    // test empty enum outside of bounds
-    lower = distance*noDocs+startOffset;
-    upper = 2L * lower;
-    assert lower < upper;
-    assertEquals(0, countTerms(NumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
-  }
-  
-  private int countTerms(MultiTermQuery q) throws Exception {
-    final Terms terms = MultiFields.getTerms(reader, q.getField());
-    if (terms == null)
-      return 0;
-    final TermsEnum termEnum = q.getTermsEnum(terms);
-    assertNotNull(termEnum);
-    int count = 0;
-    BytesRef cur, last = null;
-    while ((cur = termEnum.next()) != null) {
-      count++;
-      if (last != null) {
-        assertTrue(last.compareTo(cur) < 0);
-      }
-      last = BytesRef.deepCopyOf(cur);
-    } 
-    // LUCENE-3314: the results after next() already returned null are undefined,
-    // assertNull(termEnum.next());
-    return count;
-  }
-  
-  private void checkTermCounts(int precisionStep, int termCountT, int termCountC) {
-    if (precisionStep == Integer.MAX_VALUE) {
-      assertEquals("Number of terms should be equal for unlimited precStep", termCountC, termCountT);
-    } else {
-      assertTrue("Number of terms for NRQ should be <= compared to classical TRQ", termCountT <= termCountC);
-    }
-  }
-
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(8);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_6bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(6);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(4);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(2);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
-    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs - noDocs/2);
-      long upper=(long)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_6bit() throws Exception {
-    testRangeSplit(6);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a double test using long2double conversion of NumericUtils */
-  private void testDoubleRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final long lower=-1000L, upper=+2000L;
-    
-    Query tq=NumericRangeQuery.newDoubleRange(field, precisionStep,
-      NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-    
-    Filter tf=NumericRangeFilter.newDoubleRange(field, precisionStep,
-      NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
-    tTopDocs = searcher.search(new MatchAllDocsQuery(), tf, 1);
-    assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testDoubleRange_8bit() throws Exception {
-    testDoubleRange(8);
-  }
-  
-  @Test
-  public void testDoubleRange_6bit() throws Exception {
-    testDoubleRange(6);
-  }
-  
-  @Test
-  public void testDoubleRange_4bit() throws Exception {
-    testDoubleRange(4);
-  }
-  
-  @Test
-  public void testDoubleRange_2bit() throws Exception {
-    testDoubleRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
-    QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
-    );
-    QueryUtils.checkUnequal(
-      NumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true), 
-      NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-     // difference to int range is tested in TestNumericRangeQuery32
-  }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
index 26cf76a..2789a76 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -17,20 +17,19 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.store.Directory;
-
 import java.io.IOException;
 import java.util.LinkedList;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
 /**
  * This class tests PhrasePrefixQuery class.
  */
@@ -42,16 +41,16 @@
   public void testPhrasePrefix() throws IOException {
     Directory indexStore = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
-    Document doc1 = new Document();
-    Document doc2 = new Document();
-    Document doc3 = new Document();
-    Document doc4 = new Document();
-    Document doc5 = new Document();
-    doc1.add(newTextField("body", "blueberry pie", Field.Store.YES));
-    doc2.add(newTextField("body", "blueberry strudel", Field.Store.YES));
-    doc3.add(newTextField("body", "blueberry pizza", Field.Store.YES));
-    doc4.add(newTextField("body", "blueberry chewing gum", Field.Store.YES));
-    doc5.add(newTextField("body", "piccadilly circus", Field.Store.YES));
+    Document doc1 = writer.newDocument();
+    Document doc2 = writer.newDocument();
+    Document doc3 = writer.newDocument();
+    Document doc4 = writer.newDocument();
+    Document doc5 = writer.newDocument();
+    doc1.addLargeText("body", "blueberry pie");
+    doc2.addLargeText("body", "blueberry strudel");
+    doc3.addLargeText("body", "blueberry pizza");
+    doc4.addLargeText("body", "blueberry chewing gum");
+    doc5.addLargeText("body", "piccadilly circus");
     writer.addDocument(doc1);
     writer.addDocument(doc2);
     writer.addDocument(doc3);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
index 68e958e..8a3c5b8 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -61,21 +61,22 @@
       }
     };
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory, analyzer);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("repeated");
     
-    Document doc = new Document();
-    doc.add(newTextField("field", "one two three four five", Field.Store.YES));
-    doc.add(newTextField("repeated", "this is a repeated field - first part", Field.Store.YES));
-    Field repeatedField = newTextField("repeated", "second part of a repeated field", Field.Store.YES);
-    doc.add(repeatedField);
-    doc.add(newTextField("palindrome", "one two three two one", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "one two three four five");
+    doc.addLargeText("repeated", "this is a repeated field - first part");
+    doc.addLargeText("repeated", "second part of a repeated field");
+    doc.addLargeText("palindrome", "one two three two one");
     writer.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newTextField("nonexist", "phrase exist notexist exist found", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("nonexist", "phrase exist notexist exist found");
     writer.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newTextField("nonexist", "phrase exist notexist exist found", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("nonexist", "phrase exist notexist exist found");
     writer.addDocument(doc);
 
     reader = writer.getReader();
@@ -216,8 +217,8 @@
     Analyzer stopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory, 
         newIndexWriterConfig(stopAnalyzer));
-    Document doc = new Document();
-    doc.add(newTextField("field", "the stop words are here", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "the stop words are here");
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     writer.close();
@@ -240,13 +241,13 @@
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
     
-    Document doc = new Document();
-    doc.add(newTextField("source", "marketing info", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("source", "marketing info");
     writer.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newTextField("contents", "foobar", Field.Store.YES));
-    doc.add(newTextField("source", "marketing info", Field.Store.YES)); 
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "foobar");
+    doc.addLargeText("source", "marketing info");
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
@@ -275,16 +276,16 @@
     
     writer = new RandomIndexWriter(random(), directory, 
         newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
-    doc = new Document();
-    doc.add(newTextField("contents", "map entry woo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "map entry woo");
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("contents", "woo map entry", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "woo map entry");
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("contents", "map foobarword entry woo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "map foobarword entry woo");
     writer.addDocument(doc);
 
     reader = writer.getReader();
@@ -328,16 +329,16 @@
           .setMergePolicy(newLogMergePolicy())
           .setSimilarity(new DefaultSimilarity()));
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "foo firstname lastname foo", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "foo firstname lastname foo");
     writer.addDocument(doc);
     
-    Document doc2 = new Document();
-    doc2.add(newTextField("field", "foo firstname zzz lastname foo", Field.Store.YES));
+    Document doc2 = writer.newDocument();
+    doc2.addLargeText("field", "foo firstname zzz lastname foo");
     writer.addDocument(doc2);
     
-    Document doc3 = new Document();
-    doc3.add(newTextField("field", "foo firstname zzz yyy lastname foo", Field.Store.YES));
+    Document doc3 = writer.newDocument();
+    doc3.addLargeText("field", "foo firstname zzz yyy lastname foo");
     writer.addDocument(doc3);
     
     IndexReader reader = writer.getReader();
@@ -591,9 +592,6 @@
 
     RandomIndexWriter w  = new RandomIndexWriter(random(), dir, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()));
     List<List<String>> docs = new ArrayList<>();
-    Document d = new Document();
-    Field f = newTextField("f", "", Field.Store.NO);
-    d.add(f);
 
     Random r = random();
 
@@ -638,7 +636,9 @@
         }
       }
       docs.add(doc);
-      f.setStringValue(sb.toString());
+
+      Document d = w.newDocument();
+      d.addLargeText("f", sb.toString());
       w.addDocument(d);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
index 6086ff6..dd3bb12 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
@@ -23,28 +23,26 @@
 import java.util.Collection;
 
 import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
 import org.apache.lucene.search.payloads.PayloadSpanUtil;
 import org.apache.lucene.search.spans.MultiSpansWrapper;
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.search.spans.Spans;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Term position unit test.
@@ -91,8 +89,8 @@
     };
     Directory store = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), store, analyzer);
-    Document d = new Document();
-    d.add(newTextField("field", "bogus", Field.Store.YES));
+    Document d = writer.newDocument();
+    d.addLargeText("field", "bogus");
     writer.addDocument(d);
     IndexReader reader = writer.getReader();
     writer.close();
@@ -204,9 +202,8 @@
   public void testPayloadsPos0() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockPayloadAnalyzer());
-    Document doc = new Document();
-    doc.add(new TextField("content", new StringReader(
-        "a a b c d e a f g h i j a b k k")));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", new StringReader("a a b c d e a f g h i j a b k k"));
     writer.addDocument(doc);
 
     final IndexReader readerFromWriter = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
index f464ff9..a97da89 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -22,7 +22,6 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.document.Document;
 
 public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
 
@@ -79,7 +78,7 @@
     
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     IndexReader ir = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java
index b41b6dd..1ed44ad 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java
@@ -17,13 +17,12 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Tests {@link PrefixFilter} class.
@@ -39,8 +38,8 @@
                                         "/Computers/Windows"};
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
     for (int i = 0; i < categories.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("category", categories[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addAtom("category", categories[i]);
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
index b8a0e11..04a781c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
@@ -18,7 +18,6 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -27,7 +26,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-
 /**
  * https://issues.apache.org/jira/browse/LUCENE-1974
  *
@@ -49,23 +47,25 @@
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
 
-    Document doc = new Document();
-    Field field = newStringField(FIELD, "meaninglessnames", Field.Store.NO);
-    doc.add(field);
+    Document doc = writer.newDocument();
+    doc.addAtom(FIELD, "meaninglessnames");
     
     for (int i = 0; i < 5137; ++i) {
       writer.addDocument(doc);
     }
     
-    field.setStringValue("tangfulin");
+    doc = writer.newDocument();
+    doc.addAtom(FIELD, "tangfulin");
     writer.addDocument(doc);
 
-    field.setStringValue("meaninglessnames");
+    doc = writer.newDocument();
+    doc.addAtom(FIELD, "meaninglessnames");
     for (int i = 5138; i < 11377; ++i) {
       writer.addDocument(doc);
     }
     
-    field.setStringValue("tangfulin");
+    doc = writer.newDocument();
+    doc.addAtom(FIELD, "tangfulin");
     writer.addDocument(doc);
     
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java
index 06f0595..b719407c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java
@@ -17,15 +17,27 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 
 /**
  * Tests {@link PrefixQuery} class.
@@ -40,8 +52,8 @@
                                         "/Computers/Windows"};
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
     for (int i = 0; i < categories.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("category", categories[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addAtom("category", categories[i]);
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
@@ -64,4 +76,90 @@
     reader.close();
     directory.close();
   }
+
+  /** Make sure auto prefix terms are used with PrefixQuery. */
+  public void testAutoPrefixTermsKickIn() throws Exception {
+
+    List<String> prefixes = new ArrayList<>();
+    for(int i=1;i<5;i++) {
+      char[] chars = new char[i];
+      Arrays.fill(chars, 'a');
+      prefixes.add(new String(chars));
+    }
+
+    Set<String> randomTerms = new HashSet<>();
+    int numTerms = atLeast(10000);
+    while (randomTerms.size() < numTerms) {
+      for(String prefix : prefixes) {
+        randomTerms.add(prefix + TestUtil.randomRealisticUnicodeString(random()));
+      }
+    }
+
+    int actualCount = 0;
+    for(String term : randomTerms) {
+      if (term.startsWith("aa")) {
+        actualCount++;
+      }
+    }
+
+    //System.out.println("actual count " + actualCount);
+
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    int minTermsInBlock = TestUtil.nextInt(random(), 2, 100);
+    int maxTermsInBlock = Math.max(2, (minTermsInBlock-1)*2 + random().nextInt(100));
+
+    // As long as this is never > actualCount, aa should always see at least one auto-prefix term:
+    int minTermsAutoPrefix = TestUtil.nextInt(random(), 2, actualCount);
+    int maxTermsAutoPrefix = random().nextBoolean() ? Math.max(2, (minTermsAutoPrefix-1)*2 + random().nextInt(100)) : Integer.MAX_VALUE;
+
+    iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene50PostingsFormat(minTermsInBlock, maxTermsInBlock,
+                                                                          minTermsAutoPrefix, maxTermsAutoPrefix)));
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+    for (String term : randomTerms) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", term);
+      w.addDocument(doc);
+    }
+
+    w.forceMerge(1);
+    IndexReader r = w.getReader();
+    final Terms terms = MultiFields.getTerms(r, "field");
+    IndexSearcher s = new IndexSearcher(r);
+    final int finalActualCount = actualCount;
+    PrefixQuery q = new PrefixQuery(new Term("field", "aa")) {
+      public PrefixQuery checkTerms() throws IOException {
+        TermsEnum termsEnum = getTermsEnum(terms, new AttributeSource());
+        int count = 0;
+        while (termsEnum.next() != null) {
+          //System.out.println("got term: " + termsEnum.term().utf8ToString());
+          count++;
+        }
+
+        // Auto-prefix term(s) should have kicked in, so we should have visited fewer than the total number of aa* terms:
+        assertTrue(count < finalActualCount);
+
+        return this;
+      }
+    }.checkTerms();
+
+    int x = BooleanQuery.getMaxClauseCount();
+    try {
+      BooleanQuery.setMaxClauseCount(randomTerms.size());
+      if (random().nextBoolean()) {
+        q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+      } else if (random().nextBoolean()) {
+        q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+      }
+
+      assertEquals(actualCount, s.search(q, 1).totalHits);
+    } finally {
+      BooleanQuery.setMaxClauseCount(x);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
index 3675716..ab95015 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
@@ -22,7 +22,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.FilteredTermsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -53,13 +52,10 @@
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
     
-    Document doc = new Document();
-    Field field = newStringField("field", "", Field.Store.NO);
-    doc.add(field);
-
     int num = atLeast(1000);
     for (int i = 0; i < num; i++) {
-      field.setStringValue(TestUtil.randomUnicodeString(random(), 10));
+      Document doc = writer.newDocument();
+      doc.addAtom("field", TestUtil.randomUnicodeString(random(), 10));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
index 20e337c..0242db9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
@@ -23,10 +23,8 @@
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -54,14 +52,14 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
-    doc.add(newTextField("field", "wizard the the the the the oz", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "0");
+    doc.addLargeText("field", "wizard the the the the the oz");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
     // 1 extra token, but wizard and oz are close;
-    doc.add(newTextField("field", "wizard oz the the the the the the", Field.Store.NO));
+    doc.addLargeText("field", "wizard oz the the the the the the");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -112,14 +110,14 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
-    doc.add(newTextField("field", "wizard the the the the the oz", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "0");
+    doc.addLargeText("field", "wizard the the the the the oz");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
     // 1 extra token, but wizard and oz are close;
-    doc.add(newTextField("field", "wizard oz the the the the the the", Field.Store.NO));
+    doc.addLargeText("field", "wizard oz the the the the the the");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -151,14 +149,14 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
-    doc.add(newTextField("field", "wizard the the the the the oz", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "0");
+    doc.addLargeText("field", "wizard the the the the the oz");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
     // 1 extra token, but wizard and oz are close;
-    doc.add(newTextField("field", "wizard oz the the the the the the", Field.Store.NO));
+    doc.addLargeText("field", "wizard oz the the the the the the");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -205,14 +203,14 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
-    doc.add(newTextField("field", "wizard the the the the the oz", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "0");
+    doc.addLargeText("field", "wizard the the the the the oz");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
     // 1 extra token, but wizard and oz are close;
-    doc.add(newTextField("field", "wizard oz the the the the the the", Field.Store.NO));
+    doc.addLargeText("field", "wizard oz the the the the the the");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -283,14 +281,14 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newStringField("id", "0", Field.Store.YES));
-    doc.add(newTextField("field", "wizard the the the the the oz", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("id", "0");
+    doc.addLargeText("field", "wizard the the the the the oz");
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addUniqueAtom("id", "1");
     // 1 extra token, but wizard and oz are close;
-    doc.add(newTextField("field", "wizard oz the the the the the the", Field.Store.NO));
+    doc.addLargeText("field", "wizard oz the the the the the the");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -342,16 +340,16 @@
     final int[] idToNum = new int[numDocs];
     int maxValue = TestUtil.nextInt(random(), 10, 1000000);
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
       int numTokens = TestUtil.nextInt(random(), 1, 10);
       StringBuilder b = new StringBuilder();
       for(int j=0;j<numTokens;j++) {
         b.append("a ");
       }
-      doc.add(newTextField("field", b.toString(), Field.Store.NO));
+      doc.addLargeText("field", b.toString());
       idToNum[i] = random().nextInt(maxValue);
-      doc.add(new NumericDocValuesField("num", idToNum[i]));
+      doc.addInt("num", idToNum[i]);
       w.addDocument(doc);
     }
     final IndexReader r = w.getReader();
@@ -383,8 +381,8 @@
                   @Override
                   public int compare(Integer a, Integer b) {
                     try {
-                      int av = idToNum[Integer.parseInt(r.document(a).get("id"))];
-                      int bv = idToNum[Integer.parseInt(r.document(b).get("id"))];
+                      int av = idToNum[r.document(a).getInt("id")];
+                      int bv = idToNum[r.document(b).getInt("id")];
                       if (av < bv) {
                         return -reverseInt;
                       } else if (bv < av) {
@@ -480,7 +478,7 @@
 
             @Override
             public float score() throws IOException {
-              int num = idToNum[Integer.parseInt(context.reader().document(docID).get("id"))];
+              int num = idToNum[context.reader().document(docID).getInt("id")];
               if (reverse) {
                 //System.out.println("score doc=" + docID + " num=" + num);
                 return num;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
index d727aec..d84b9ba 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
@@ -16,11 +16,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -34,8 +35,8 @@
   public void testBasic() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "value", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "value");
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     writer.close();
@@ -86,28 +87,29 @@
   public void testRandom() throws Exception {
     final Directory d = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), d);
+    FieldTypes fieldTypes = w.getFieldTypes();
+
     w.w.getConfig().setMaxBufferedDocs(17);
     final int numDocs = atLeast(100);
-    final Set<String> aDocs = new HashSet<>();
+    final Set<Integer> aDocs = new HashSet<>();
     for(int i=0;i<numDocs;i++) {
-      final Document doc = new Document();
+      final Document doc = w.newDocument();
       final String v;
       if (random().nextInt(5) == 4) {
         v = "a";
-        aDocs.add(""+i);
+        aDocs.add(i);
       } else {
         v = "b";
       }
-      final Field f = newStringField("field", v, Field.Store.NO);
-      doc.add(f);
-      doc.add(newStringField("id", ""+i, Field.Store.YES));
+      doc.addAtom("field", v);
+      doc.addUniqueInt("id", i);
       w.addDocument(doc);
     }
 
     final int numDelDocs = atLeast(10);
     for(int i=0;i<numDelDocs;i++) {
-      final String delID = ""+random().nextInt(numDocs);
-      w.deleteDocuments(new Term("id", delID));
+      int delID = random().nextInt(numDocs);
+      w.deleteDocuments(fieldTypes.newIntTerm("id", delID));
       aDocs.remove(delID);
     }
 
@@ -128,8 +130,8 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 1000; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("field", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addUniqueAtom("field", English.intToEnglish(i));
       writer.addDocument(doc);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
index 23fab2c..0c4a809 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
@@ -21,7 +21,6 @@
 import java.util.Arrays;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -49,8 +48,8 @@
     super.setUp();
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    doc.add(newTextField(FN, "the quick brown fox jumps over the lazy ??? dog 493432 49344", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText(FN, "the quick brown fox jumps over the lazy ??? dog 493432 49344");
     writer.addDocument(doc);
     reader = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java
index 7e78dd6..30eeb09 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom.java
@@ -24,9 +24,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -52,15 +50,13 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
     
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setOmitNorms(true);
-    Field field = newField("field", "", customType);
-    doc.add(field);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableNorms("field");
     
     NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ROOT));
     for (int i = 0; i < 1000; i++) {
-      field.setStringValue(df.format(i));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", df.format(i));
       writer.addDocument(doc);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
index 485b867..1e32c15 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
@@ -25,8 +25,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.FilteredTermsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -36,14 +34,13 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
-import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.automaton.RegExp;
 
 /**
@@ -65,18 +62,14 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, 
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    Document doc = new Document();
-    Field field = newStringField(fieldName, "", Field.Store.NO);
-    doc.add(field);
-    Field dvField = new SortedDocValuesField(fieldName, new BytesRef());
-    doc.add(dvField);
     List<String> terms = new ArrayList<>();
     int num = atLeast(200);
     for (int i = 0; i < num; i++) {
       String s = TestUtil.randomUnicodeString(random());
-      field.setStringValue(s);
-      dvField.setBytesValue(new BytesRef(s));
       terms.add(s);
+
+      Document doc = writer.newDocument();
+      doc.addAtom(fieldName, s);
       writer.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
index 01c845e..72f3be7 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
@@ -46,13 +46,13 @@
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, analyzer);
-    LineFileDocs docs = new LineFileDocs(random());
+    LineFileDocs docs = new LineFileDocs(w.w, random());
     int charsToIndex = atLeast(100000);
     int charsIndexed = 0;
     //System.out.println("bytesToIndex=" + charsToIndex);
     while(charsIndexed < charsToIndex) {
       Document doc = docs.nextDoc();
-      charsIndexed += doc.get("body").length();
+      charsIndexed += doc.getString("body").length();
       w.addDocument(doc);
       //System.out.println("  bytes=" + charsIndexed + " add: " + doc);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
index 75d3d86..fd4a868 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java
@@ -5,7 +5,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -13,8 +12,8 @@
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -50,7 +49,7 @@
     // This could possibly fail if Lucene starts checking for docid ranges...
     d = newDirectory();
     IndexWriter iw = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.close();
     r = DirectoryReader.open(d);
     s = newSearcher(r);
@@ -67,10 +66,10 @@
 
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
     for (int i=0; i<nDocs; i++) {
-      Document d = new Document();
+      Document d = iw.newDocument();
       for (int j=0; j<nTerms; j++) {
         if (random().nextInt(freq[j]) == 0) {
-          d.add(newStringField("f", terms[j].text(), Field.Store.NO));
+          d.addAtom("f", terms[j].text());
           //System.out.println(d);
         }
       }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
index b2029a9..c92ea77 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
@@ -23,14 +23,9 @@
 import java.util.List;
 import java.util.Random;
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -123,37 +118,66 @@
 
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("straightbytesdocvalues");
+    fieldTypes.setIndexOptions("intdocvalues", IndexOptions.NONE);
+    fieldTypes.setIndexOptions("floatdocvalues", IndexOptions.NONE);
+
     int numDocs = atLeast(200);
     Random r = random();
     for (int i = 0; i < numDocs; i++) {
-      List<Field> fields = new ArrayList<>();
-      fields.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
-      fields.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
-      fields.add(new NumericDocValuesField("byte", (byte) r.nextInt()));
-      fields.add(new NumericDocValuesField("short", (short) r.nextInt()));
-      fields.add(new NumericDocValuesField("int", r.nextInt()));
-      fields.add(new NumericDocValuesField("long", r.nextLong()));
-      fields.add(new FloatDocValuesField("float", r.nextFloat()));
-      fields.add(new DoubleDocValuesField("double", r.nextDouble()));
-      fields.add(new SortedDocValuesField("bytes", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
-      fields.add(new BinaryDocValuesField("bytesval", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+      Document doc = iw.newDocument();
+      if (random().nextInt(5) != 4) {
+        doc.addLargeText("english", English.intToEnglish(i));
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addInt("byte", (byte) random().nextInt());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addInt("short", (short) random().nextInt());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addInt("int", random().nextInt());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addLong("long", random().nextLong());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addFloat("float", random().nextFloat());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addDouble("double", random().nextDouble());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addAtom("bytes", TestUtil.randomRealisticUnicodeString(random()));
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addAtom("bytesval", TestUtil.randomRealisticUnicodeString(random()));
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addInt("intdocvalues", random().nextInt());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addFloat("floatdocvalues", random().nextFloat());
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addShortText("sortedbytesdocvalues", TestUtil.randomRealisticUnicodeString(random()));
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addShortText("sortedbytesdocvaluesval", TestUtil.randomRealisticUnicodeString(random()));
+      }
+      if (random().nextInt(5) != 4) {
+        doc.addBinary("straightbytesdocvalues", new BytesRef(TestUtil.randomRealisticUnicodeString(random())));
+      }
 
-      Document document = new Document();
-      document.add(new StoredField("id", ""+i));
+      doc.addUniqueInt("id", i);
       if (VERBOSE) {
         System.out.println("  add doc id=" + i);
       }
-      for(Field field : fields) {
-        // So we are sometimes missing that field:
-        if (random().nextInt(5) != 4) {
-          document.add(field);
-          if (VERBOSE) {
-            System.out.println("    " + field);
-          }
-        }
-      }
-
-      iw.addDocument(document);
+      iw.addDocument(doc);
 
       if (random().nextInt(50) == 17) {
         iw.commit();
@@ -232,7 +256,7 @@
       System.out.println("  all.totalHits=" + all.totalHits);
       int upto = 0;
       for(ScoreDoc scoreDoc : all.scoreDocs) {
-        System.out.println("    hit " + (upto++) + ": id=" + searcher.doc(scoreDoc.doc).get("id") + " " + scoreDoc);
+        System.out.println("    hit " + (upto++) + ": id=" + searcher.doc(scoreDoc.doc).getInt("id") + " " + scoreDoc);
       }
     }
     int pageStart = 0;
@@ -275,8 +299,8 @@
       ScoreDoc sd2 = paged.scoreDocs[i];
       if (VERBOSE) {
         System.out.println("    hit " + (pageStart + i));
-        System.out.println("      expected id=" + searcher.doc(sd1.doc).get("id") + " " + sd1);
-        System.out.println("        actual id=" + searcher.doc(sd2.doc).get("id") + " " + sd2);
+        System.out.println("      expected id=" + searcher.doc(sd1.doc).getInt("id") + " " + sd1);
+        System.out.println("        actual id=" + searcher.doc(sd2.doc).getInt("id") + " " + sd2);
       }
       assertEquals(sd1.doc, sd2.doc);
       assertEquals(sd1.score, sd2.score, 0f);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
index cd5ac03..1b3200b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
@@ -21,13 +21,12 @@
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 
 @SuppressCodecs({ "SimpleText", "Memory", "Direct" })
 public class TestSearchWithThreads extends LuceneTestCase {
@@ -50,9 +49,6 @@
 
     // TODO: replace w/ the @nightly test data; make this
     // into an optional @nightly stress test
-    final Document doc = new Document();
-    final Field body = newTextField("body", "", Field.Store.NO);
-    doc.add(body);
     final StringBuilder sb = new StringBuilder();
     for(int docCount=0;docCount<NUM_DOCS;docCount++) {
       final int numTerms = random().nextInt(10);
@@ -60,7 +56,8 @@
         sb.append(random().nextBoolean() ? "aaa" : "bbb");
         sb.append(' ');
       }
-      body.setStringValue(sb.toString());
+      Document doc = w.newDocument();
+      doc.addLargeText("body", sb.toString());
       w.addDocument(doc);
       sb.delete(0, sb.length());
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
index 81ec266..9aa7a3f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
@@ -28,7 +28,6 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FilterDirectoryReader;
@@ -209,7 +208,7 @@
     // Test can deadlock if we use SMS:
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         new MockAnalyzer(random())).setMergeScheduler(new ConcurrentMergeScheduler()));
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     final CountDownLatch awaitEnterWarm = new CountDownLatch(1);
     final CountDownLatch awaitClose = new CountDownLatch(1);
@@ -241,7 +240,7 @@
     } finally {
       searcherManager.release(searcher);
     }
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     final AtomicBoolean success = new AtomicBoolean(false);
     final Throwable[] exc = new Throwable[1];
@@ -312,7 +311,7 @@
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         new MockAnalyzer(random())).setMergeScheduler(new ConcurrentMergeScheduler()));
     SearcherManager sm = new SearcherManager(writer, false, new SearcherFactory());
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     sm.maybeRefreshBlocking();
 
@@ -380,7 +379,7 @@
         }
       }
     });
-    iw.addDocument(new Document());
+    iw.addDocument(iw.newDocument());
     iw.commit();
     assertFalse(afterRefreshCalled.get());
     sm.maybeRefreshBlocking();
@@ -487,7 +486,7 @@
 
     SearcherManager mgr = new SearcherManager(reader, null);
     for(int i=0;i<10;i++) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
       mgr.maybeRefresh();
       IndexSearcher s = mgr.acquire();
       try {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
index a2c5afc..0daa070 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java
@@ -17,20 +17,18 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
-
 import java.io.IOException;
 
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.util.LuceneTestCase;
 
 /** Similarity unit test.
  *
@@ -58,11 +56,11 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setSimilarity(new SimpleSimilarity()));
     
-    Document d1 = new Document();
-    d1.add(newTextField("field", "a c", Field.Store.YES));
+    Document d1 = writer.newDocument();
+    d1.addLargeText("field", "a c");
 
-    Document d2 = new Document();
-    d2.add(newTextField("field", "a b c", Field.Store.YES));
+    Document d2 = writer.newDocument();
+    d2.addLargeText("field", "a b c");
     
     writer.addDocument(d1);
     writer.addDocument(d2);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
index 81da556..51d6b39 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
@@ -19,11 +19,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -47,17 +46,14 @@
     PerFieldSimilarityWrapper sim = new ExampleSimilarityProvider();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setSimilarity(sim);
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc);
-    Document doc = new Document();
-    Field field = newTextField("foo", "", Field.Store.NO);
-    doc.add(field);
-    Field field2 = newTextField("bar", "", Field.Store.NO);
-    doc.add(field2);
     
-    field.setStringValue("quick brown fox");
-    field2.setStringValue("quick brown fox");
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "quick brown fox");
+    doc.addLargeText("bar", "quick brown fox");
     iw.addDocument(doc);
-    field.setStringValue("jumps over lazy brown dog");
-    field2.setStringValue("jumps over lazy brown dog");
+    doc = iw.newDocument();
+    doc.addLargeText("foo", "jumps over lazy brown dog");
+    doc.addLargeText("bar", "jumps over lazy brown dog");
     iw.addDocument(doc);
     reader = iw.getReader();
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
index afd3030..4ab76c5 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
@@ -18,18 +18,17 @@
  */
 
 import java.io.IOException;
+import java.nio.file.Path;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
 
@@ -38,15 +37,15 @@
   private static final String S_1 = "A A A";
   private static final String S_2 = "A 1 2 3 A 4 5 6 A";
 
-  private static final Document DOC_1 = makeDocument("X " + S_1 + " Y");
-  private static final Document DOC_2 = makeDocument("X " + S_2 + " Y");
-  private static final Document DOC_3 = makeDocument("X " + S_1 + " A Y");
-  private static final Document DOC_1_B = makeDocument("X " + S_1 + " Y N N N N " + S_1 + " Z");
-  private static final Document DOC_2_B = makeDocument("X " + S_2 + " Y N N N N " + S_2 + " Z");
-  private static final Document DOC_3_B = makeDocument("X " + S_1 + " A Y N N N N " + S_1 + " A Y");
-  private static final Document DOC_4 = makeDocument("A A X A X B A X B B A A X B A A");
-  private static final Document DOC_5_3 = makeDocument("H H H X X X H H H X X X H H H");
-  private static final Document DOC_5_4 = makeDocument("H H H H");
+  private static final String DOC_1 = "X " + S_1 + " Y";
+  private static final String DOC_2 = "X " + S_2 + " Y";
+  private static final String DOC_3 = "X " + S_1 + " A Y";
+  private static final String DOC_1_B = "X " + S_1 + " Y N N N N " + S_1 + " Z";
+  private static final String DOC_2_B = "X " + S_2 + " Y N N N N " + S_2 + " Z";
+  private static final String DOC_3_B = "X " + S_1 + " A Y N N N N " + S_1 + " A Y";
+  private static final String DOC_4 = "A A X A X B A X B B A A X B A A";
+  private static final String DOC_5_3 = "H H H X X X H H H X X X H H H";
+  private static final String DOC_5_4 = "H H H H";
 
   private static final PhraseQuery QUERY_1 = makePhraseQuery( S_1 );
   private static final PhraseQuery QUERY_2 = makePhraseQuery( S_2 );
@@ -134,12 +133,12 @@
     }
   }
   
-  private float  checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception {
+  private float checkPhraseQuery(String doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception {
     query.setSlop(slop);
 
     MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random(), new RAMDirectory());
     RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
-    writer.addDocument(doc);
+    addDocument(writer, doc);
 
     IndexReader reader = writer.getReader();
 
@@ -158,13 +157,13 @@
     return c.max; 
   }
 
-  private static Document makeDocument(String docText) {
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f = new Field("f", docText, customType);
-    doc.add(f);
-    return doc;
+  private static void addDocument(RandomIndexWriter w, String text) throws IOException {
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableNorms("f");
+
+    Document doc = w.newDocument();
+    doc.addLargeText("f", text);
+    w.addDocument(doc);
   }
 
   private static PhraseQuery makePhraseQuery(String terms) {
@@ -215,24 +214,29 @@
   // LUCENE-3215
   public void testSlopWithHoles() throws Exception {  
     Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
-    Field f = new Field("lyrics", "", customType);
-    Document doc = new Document();
-    doc.add(f);
-    f.setStringValue("drug drug");
+    RandomIndexWriter iw = newRandomIndexWriter(dir);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableNorms("lyrics");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("lyrics", "drug drug");
     iw.addDocument(doc);
-    f.setStringValue("drug druggy drug");
+
+    doc = iw.newDocument();
+    doc.addLargeText("lyrics", "drug druggy drug");
     iw.addDocument(doc);
-    f.setStringValue("drug druggy druggy drug");
+
+    doc = iw.newDocument();
+    doc.addLargeText("lyrics", "drug druggy druggy drug");
     iw.addDocument(doc);
-    f.setStringValue("drug druggy drug druggy drug");
+
+    doc = iw.newDocument();
+    doc.addLargeText("lyrics", "drug druggy drug druggy drug");
     iw.addDocument(doc);
+
     IndexReader ir = iw.getReader();
     iw.close();
     IndexSearcher is = newSearcher(ir);
-    
     PhraseQuery pq = new PhraseQuery();
     // "drug the drug"~1
     pq.add(new Term("lyrics", "drug"), 1);
@@ -253,8 +257,8 @@
     
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_NOT_STORED)));
+    Document doc = iw.newDocument();
+    doc.addLargeText("lyrics", document);
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -306,8 +310,8 @@
      Directory dir = newDirectory();
 
      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-     Document doc = new Document();
-     doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_NOT_STORED)));
+     Document doc = iw.newDocument();
+     doc.addLargeText("lyrics", document);
      iw.addDocument(doc);
      IndexReader ir = iw.getReader();
      iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSort.java b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
index 3fb598c..d6def0b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
@@ -18,15 +18,14 @@
  */
 
 import java.io.IOException;
+import java.util.Locale;
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -53,19 +52,18 @@
   public void testString() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -81,19 +79,18 @@
   public void testStringReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -109,19 +106,21 @@
   public void testStringVal() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setDocValuesType("value", DocValuesType.BINARY);
+    fieldTypes.enableSorting("value");
+    
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -137,19 +136,22 @@
   public void testStringValReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setDocValuesType("value", DocValuesType.BINARY);
+    fieldTypes.enableSorting("value");
+
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -165,18 +167,19 @@
   public void testStringValSorted() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
+
+    // NOTE: don't ask FieldTypes here, because we are forcing STRING_VAL even though we indexed SORTED DV:
     Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@@ -193,18 +196,19 @@
   public void testStringValReverseSorted() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
+    // NOTE: don't ask FieldTypes here, because we are forcing STRING_VAL even though we indexed SORTED DV:
     Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@@ -221,30 +225,26 @@
   public void testInt() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300000));
-    doc.add(newStringField("value", "300000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    
+    for(int value : new int[] {300000, -1, 4}) {
+      Document doc = writer.newDocument();
+      doc.addInt("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(-1, searcher.doc(td.scoreDocs[0].doc).getInt("value").intValue());
+    assertEquals(4, searcher.doc(td.scoreDocs[1].doc).getInt("value").intValue());
+    assertEquals(300000, searcher.doc(td.scoreDocs[2].doc).getInt("value").intValue());
 
     ir.close();
     dir.close();
@@ -254,61 +254,54 @@
   public void testIntReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300000));
-    doc.add(newStringField("value", "300000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(int value : new int[] {300000, -1, 4}) {
+      Document doc = writer.newDocument();
+      doc.addInt("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // reverse numeric order
-    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(300000, searcher.doc(td.scoreDocs[0].doc).getInt("value").intValue());
+    assertEquals(4, searcher.doc(td.scoreDocs[1].doc).getInt("value").intValue());
+    assertEquals(-1, searcher.doc(td.scoreDocs[2].doc).getInt("value").intValue());
 
     ir.close();
     dir.close();
   }
   
   /** Tests sorting on type int with a missing value */
-  public void testIntMissing() throws IOException {
+  public void testIntMissingDefault() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    writer.addDocument(writer.newDocument());
+    for(int value : new int[] {-1, 4}) {
+      Document doc = writer.newDocument();
+      doc.addInt("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.INT));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as a 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    // sort missing last by default:
+    assertEquals(-1, searcher.doc(td.scoreDocs[0].doc).getInt("value").intValue());
+    assertEquals(4, searcher.doc(td.scoreDocs[1].doc).getInt("value").intValue());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
 
     ir.close();
     dir.close();
@@ -318,30 +311,26 @@
   public void testIntMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setSortMissingLast("value");
+    writer.addDocument(writer.newDocument());
+    for(int value : new int[] {-1, 4}) {
+      Document doc = writer.newDocument();
+      doc.addInt("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.INT);
-    sortField.setMissingValue(Integer.MAX_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as a Integer.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(-1, searcher.doc(td.scoreDocs[0].doc).getInt("value").intValue());
+    assertEquals(4, searcher.doc(td.scoreDocs[1].doc).getInt("value").intValue());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
 
     ir.close();
     dir.close();
@@ -351,30 +340,24 @@
   public void testLong() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 3000000000L));
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(long value : new long[] {3000000000L, -1L, 4L}) {
+      Document doc = writer.newDocument();
+      doc.addLong("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(-1L, searcher.doc(td.scoreDocs[0].doc).getLong("value").longValue());
+    assertEquals(4L, searcher.doc(td.scoreDocs[1].doc).getLong("value").longValue());
+    assertEquals(3000000000L, searcher.doc(td.scoreDocs[2].doc).getLong("value").longValue());
 
     ir.close();
     dir.close();
@@ -384,61 +367,53 @@
   public void testLongReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 3000000000L));
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(long value : new long[] {3000000000L, -1L, 4L}) {
+      Document doc = writer.newDocument();
+      doc.addLong("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // reverse numeric order
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(3000000000L, searcher.doc(td.scoreDocs[0].doc).getLong("value").longValue());
+    assertEquals(4L, searcher.doc(td.scoreDocs[1].doc).getLong("value").longValue());
+    assertEquals(-1L, searcher.doc(td.scoreDocs[2].doc).getLong("value").longValue());
 
     ir.close();
     dir.close();
   }
   
   /** Tests sorting on type long with a missing value */
-  public void testLongMissing() throws IOException {
+  public void testLongMissingDefault() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    writer.addDocument(writer.newDocument());
+    for(long value : new long[] {-1L, 4L}) {
+      Document doc = writer.newDocument();
+      doc.addLong("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    // sort missing last by default:
+    assertEquals(-1L, searcher.doc(td.scoreDocs[0].doc).getLong("value").longValue());
+    assertEquals(4L, searcher.doc(td.scoreDocs[1].doc).getLong("value").longValue());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
 
     ir.close();
     dir.close();
@@ -448,30 +423,26 @@
   public void testLongMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setSortMissingLast("value");
+    writer.addDocument(writer.newDocument());
+    for(long value : new long[] {-1L, 4L}) {
+      Document doc = writer.newDocument();
+      doc.addLong("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.LONG);
-    sortField.setMissingValue(Long.MAX_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as Long.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(-1, searcher.doc(td.scoreDocs[0].doc).getLong("value").longValue());
+    assertEquals(4, searcher.doc(td.scoreDocs[1].doc).getLong("value").longValue());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
 
     ir.close();
     dir.close();
@@ -480,31 +451,26 @@
   /** Tests sorting on type float */
   public void testFloat() throws IOException {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatDocValuesField("value", 30.1F));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(float value : new float[] {30.1F, -1.3F, 4.2F}) {
+      Document doc = writer.newDocument();
+      doc.addFloat("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals(-1.3f, searcher.doc(td.scoreDocs[0].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(4.2f, searcher.doc(td.scoreDocs[1].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(30.1f, searcher.doc(td.scoreDocs[2].doc).getFloat("value").floatValue(), 0.0f);
 
     ir.close();
     dir.close();
@@ -513,62 +479,56 @@
   /** Tests sorting on type float in reverse */
   public void testFloatReverse() throws IOException {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatDocValuesField("value", 30.1F));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(float value : new float[] {30.1F, -1.3F, 4.2F}) {
+      Document doc = writer.newDocument();
+      doc.addFloat("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    // numeric order
+    assertEquals(30.1f, searcher.doc(td.scoreDocs[0].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(4.2f, searcher.doc(td.scoreDocs[1].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(-1.3f, searcher.doc(td.scoreDocs[2].doc).getFloat("value").floatValue(), 0.0f);
 
     ir.close();
     dir.close();
   }
   
   /** Tests sorting on type float with a missing value */
-  public void testFloatMissing() throws IOException {
+  public void testFloatMissingDefault() throws IOException {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    writer.addDocument(writer.newDocument());
+    for(float value : new float[] {-1.3F, 4.2F}) {
+      Document doc = writer.newDocument();
+      doc.addFloat("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    // sort missing last by default:
+    assertEquals(-1.3f, searcher.doc(td.scoreDocs[0].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(4.2f, searcher.doc(td.scoreDocs[1].doc).getFloat("value").floatValue(), 0.0f);
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
 
     ir.close();
     dir.close();
@@ -577,31 +537,28 @@
   /** Tests sorting on type float, specifying the missing value should be treated as Float.MAX_VALUE */
   public void testFloatMissingLast() throws IOException {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", -1.3F));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatDocValuesField("value", 4.2F));
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
-    writer.addDocument(doc);
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setSortMissingLast("value");
+    writer.addDocument(writer.newDocument());
+    for(float value : new float[] {-1.3F, 4.2F}) {
+      Document doc = writer.newDocument();
+      doc.addFloat("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.FLOAT);
-    sortField.setMissingValue(Float.MAX_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as Float.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    assertEquals(-1.3f, searcher.doc(td.scoreDocs[0].doc).getFloat("value").floatValue(), 0.0f);
+    assertEquals(4.2f, searcher.doc(td.scoreDocs[1].doc).getFloat("value").floatValue(), 0.0f);
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getFloat("value"));
 
     ir.close();
     dir.close();
@@ -611,35 +568,26 @@
   public void testDouble() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 30.1));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    for(double value : new double[] {30.1, -1.3, 4.2333333333333, 4.2333333333332}) {
+      Document doc = writer.newDocument();
+      doc.addDouble("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
+    assertEquals(-1.3, searcher.doc(td.scoreDocs[0].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(4.2333333333332, searcher.doc(td.scoreDocs[1].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(4.2333333333333, searcher.doc(td.scoreDocs[2].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(30.1, searcher.doc(td.scoreDocs[3].doc).getDouble("value").doubleValue(), 0.0);
 
     ir.close();
     dir.close();
@@ -649,26 +597,24 @@
   public void testDoubleSignedZero() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", +0D));
-    doc.add(newStringField("value", "+0", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -0D));
-    doc.add(newStringField("value", "-0", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(double value : new double[] {+0D, -0D}) {
+      Document doc = writer.newDocument();
+      doc.addDouble("value", value);
+      writer.addDocument(doc);
+    }
+
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // numeric order
-    assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals(-0D, searcher.doc(td.scoreDocs[0].doc).getDouble("value").doubleValue(), 0.0D);
+    assertEquals(+0D, searcher.doc(td.scoreDocs[1].doc).getDouble("value").doubleValue(), 0.0D);
 
     ir.close();
     dir.close();
@@ -678,71 +624,56 @@
   public void testDoubleReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 30.1));
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    for(double value : new double[] {30.1, -1.3, 4.2333333333333, 4.2333333333332}) {
+      Document doc = writer.newDocument();
+      doc.addDouble("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
+    Sort sort = fieldTypes.newSort("value", true);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
+    assertEquals(30.1, searcher.doc(td.scoreDocs[0].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(4.2333333333333, searcher.doc(td.scoreDocs[1].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(4.2333333333332, searcher.doc(td.scoreDocs[2].doc).getDouble("value").doubleValue(), 0.0);
+    assertEquals(-1.3, searcher.doc(td.scoreDocs[3].doc).getDouble("value").doubleValue(), 0.0);
 
     ir.close();
     dir.close();
   }
   
   /** Tests sorting on type double with a missing value */
-  public void testDoubleMissing() throws IOException {
+  public void testDoubleMissingDefault() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    writer.addDocument(writer.newDocument());
+    for(double value : new double[] {-1.3, 4.2333333333333, 4.2333333333332}) {
+      Document doc = writer.newDocument();
+      doc.addDouble("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
-    // null treated as a 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
+
+    // sort missing last by default:
+    assertEquals(-1.3, searcher.doc(td.scoreDocs[0].doc).getDouble("value").doubleValue(), 0.0d);
+    assertEquals(4.2333333333332, searcher.doc(td.scoreDocs[1].doc).getDouble("value").doubleValue(), 0.0d);
+    assertEquals(4.2333333333333, searcher.doc(td.scoreDocs[2].doc).getDouble("value").doubleValue(), 0.0d);
+    assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
 
     ir.close();
     dir.close();
@@ -752,34 +683,25 @@
   public void testDoubleMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", -1.3));
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333333));
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleDocValuesField("value", 4.2333333333332));
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
-    writer.addDocument(doc);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    writer.addDocument(writer.newDocument());
+    for(double value : new double[] {-1.3, 4.2333333333333, 4.2333333333332}) {
+      Document doc = writer.newDocument();
+      doc.addDouble("value", value);
+      writer.addDocument(doc);
+    }
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.DOUBLE);
-    sortField.setMissingValue(Double.MAX_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
-    // null treated as Double.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
+
+    assertEquals(-1.3, searcher.doc(td.scoreDocs[0].doc).getDouble("value").doubleValue(), 0.0d);
+    assertEquals(4.2333333333332, searcher.doc(td.scoreDocs[1].doc).getDouble("value").doubleValue(), 0.0d);
+    assertEquals(4.2333333333333, searcher.doc(td.scoreDocs[2].doc).getDouble("value").doubleValue(), 0.0d);
     assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
 
     ir.close();
@@ -790,30 +712,26 @@
   public void testMultiSort() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("value1", new BytesRef("foo")));
-    doc.add(new NumericDocValuesField("value2", 0));
-    doc.add(newStringField("value1", "foo", Field.Store.YES));
-    doc.add(newStringField("value2", "0", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom("value1", new BytesRef("foo"));
+    doc.addInt("value2", 0);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value1", new BytesRef("bar")));
-    doc.add(new NumericDocValuesField("value2", 1));
-    doc.add(newStringField("value1", "bar", Field.Store.YES));
-    doc.add(newStringField("value2", "1", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value1", new BytesRef("bar"));
+    doc.addInt("value2", 1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value1", new BytesRef("bar")));
-    doc.add(new NumericDocValuesField("value2", 0));
-    doc.add(newStringField("value1", "bar", Field.Store.YES));
-    doc.add(newStringField("value2", "0", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value1", new BytesRef("bar"));
+    doc.addInt("value2", 0);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("value1", new BytesRef("foo")));
-    doc.add(new NumericDocValuesField("value2", 1));
-    doc.add(newStringField("value1", "foo", Field.Store.YES));
-    doc.add(newStringField("value2", "1", Field.Store.YES));
+
+    doc = writer.newDocument();
+    doc.addAtom("value1", new BytesRef("foo"));
+    doc.addInt("value2", 1);
     writer.addDocument(doc);
+
     IndexReader ir = writer.getReader();
     writer.close();
     
@@ -825,21 +743,21 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value1"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value1"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value1"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[3].doc).get("value1"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).getBinary("value1").utf8ToString());
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).getBinary("value1").utf8ToString());
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).getBinary("value1").utf8ToString());
+    assertEquals("foo", searcher.doc(td.scoreDocs[3].doc).getBinary("value1").utf8ToString());
     // 0 comes before 1
-    assertEquals("0", searcher.doc(td.scoreDocs[0].doc).get("value2"));
-    assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("value2"));
-    assertEquals("0", searcher.doc(td.scoreDocs[2].doc).get("value2"));
-    assertEquals("1", searcher.doc(td.scoreDocs[3].doc).get("value2"));
+    assertEquals(0, searcher.doc(td.scoreDocs[0].doc).getInt("value2").intValue());
+    assertEquals(1, searcher.doc(td.scoreDocs[1].doc).getInt("value2").intValue());
+    assertEquals(0, searcher.doc(td.scoreDocs[2].doc).getInt("value2").intValue());
+    assertEquals(1, searcher.doc(td.scoreDocs[3].doc).getInt("value2").intValue());
 
     // Now with overflow
     td = searcher.search(new MatchAllDocsQuery(), 1, sort);
     assertEquals(4, td.totalHits);
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value1"));
-    assertEquals("0", searcher.doc(td.scoreDocs[0].doc).get("value2"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).getBinary("value1").utf8ToString());
+    assertEquals(0, searcher.doc(td.scoreDocs[0].doc).getInt("value2").intValue());
 
     ir.close();
     dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java b/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java
new file mode 100644
index 0000000..9f078cb
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java
@@ -0,0 +1,123 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.text.Collator;
+import java.util.Locale;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestSortLocale extends LuceneTestCase {
+
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setSortLocale("collated", Locale.ENGLISH, Collator.IDENTICAL);
+
+    Document doc = w.newDocument();
+    doc.addAtom("field", "ABC");
+    doc.addAtom("collated", "ABC");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("field", "abc");
+    doc.addAtom("collated", "abc");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs td = s.search(new MatchAllDocsQuery(), 5, fieldTypes.newSort("collated"));
+    assertEquals("abc", r.document(td.scoreDocs[0].doc).get("field"));
+    assertEquals("ABC", r.document(td.scoreDocs[1].doc).get("field"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRanges() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+
+    Collator collator = Collator.getInstance(Locale.getDefault());
+    if (random().nextBoolean()) {
+      collator.setStrength(Collator.PRIMARY);
+    }
+    fieldTypes.setSortLocale("collated", Locale.getDefault(), collator.getStrength()); // uses -Dtests.locale
+    
+    int numDocs = atLeast(500);
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = iw.newDocument();
+      String value = TestUtil.randomSimpleString(random());
+      doc.addAtom("field", value);
+      doc.addAtom("collated", value);
+      iw.addDocument(doc);
+    }
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    IndexSearcher is = newSearcher(ir);
+    
+    int numChecks = atLeast(100);
+
+    try {
+      for (int i = 0; i < numChecks; i++) {
+        String start = TestUtil.randomSimpleString(random());
+        String end = TestUtil.randomSimpleString(random());
+        Query query = new ConstantScoreQuery(fieldTypes.newStringDocValuesRangeFilter("collated", start, true, end, true));
+        doTestRanges(is, start, end, query, collator);
+      }
+    } finally {    
+      ir.close();
+      dir.close();
+    }
+  }
+  
+  private void doTestRanges(IndexSearcher is, String startPoint, String endPoint, Query query, Collator collator) throws Exception { 
+    QueryUtils.check(query);
+    
+    // positive test
+    TopDocs docs = is.search(query, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).getString("field");
+      assertTrue(collate(collator, value, startPoint) >= 0);
+      assertTrue(collate(collator, value, endPoint) <= 0);
+    }
+    
+    // negative test
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD);
+    bq.add(query, BooleanClause.Occur.MUST_NOT);
+    docs = is.search(bq, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).getString("field");
+      assertTrue(collate(collator, value, startPoint) < 0 || collate(collator, value, endPoint) > 0);
+    }
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
index c4ea459..bdf89f6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java
@@ -27,19 +27,16 @@
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -72,10 +69,9 @@
     final List<BytesRef> docValues = new ArrayList<>();
     // TODO: deletions
     while (numDocs < NUM_DOCS) {
-      final Document doc = new Document();
+      final Document doc = writer.newDocument();
 
       // 10% of the time, the document is missing the value:
-      final BytesRef br;
       if (random().nextInt(10) != 7) {
         final String s;
         if (random.nextBoolean()) {
@@ -95,20 +91,17 @@
           System.out.println("  " + numDocs + ": s=" + s);
         }
 
-        br = new BytesRef(s);
-        doc.add(new SortedDocValuesField("stringdv", br));
-        docValues.add(br);
+        doc.addShortText("stringdv", s);
+        docValues.add(new BytesRef(s));
 
       } else {
-        br = null;
         if (VERBOSE) {
           System.out.println("  " + numDocs + ": <missing>");
         }
         docValues.add(null);
       }
 
-      doc.add(new NumericDocValuesField("id", numDocs));
-      doc.add(new StoredField("id", numDocs));
+      doc.addInt("id", numDocs);
       writer.addDocument(doc);
       numDocs++;
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java
index 033b37f..98b13c2 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRescorer.java
@@ -22,8 +22,6 @@
 import java.util.Comparator;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -43,22 +41,22 @@
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 5));
+    Document doc = iw.newDocument();
+    doc.addUniqueAtom("id", "1");
+    doc.addLargeText("body", "some contents and more contents");
+    doc.addInt("popularity", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 20));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "2");
+    doc.addLargeText("body", "another document with different contents");
+    doc.addInt("popularity", 20);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    doc.add(newTextField("body", "crappy contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 2));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "3");
+    doc.addLargeText("body", "crappy contents");
+    doc.addInt("popularity", 2);
     iw.addDocument(doc);
     
     reader = iw.getReader();
@@ -116,16 +114,16 @@
     final int[] idToNum = new int[numDocs];
     int maxValue = TestUtil.nextInt(random(), 10, 1000000);
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addUniqueInt("id", i);
       int numTokens = TestUtil.nextInt(random(), 1, 10);
       StringBuilder b = new StringBuilder();
       for(int j=0;j<numTokens;j++) {
         b.append("a ");
       }
-      doc.add(newTextField("field", b.toString(), Field.Store.NO));
+      doc.addLargeText("field", b.toString());
       idToNum[i] = random().nextInt(maxValue);
-      doc.add(new NumericDocValuesField("num", idToNum[i]));
+      doc.addInt("num", idToNum[i]);
       w.addDocument(doc);
     }
     final IndexReader r = w.getReader();
@@ -152,8 +150,8 @@
                   @Override
                   public int compare(Integer a, Integer b) {
                     try {
-                      int av = idToNum[Integer.parseInt(r.document(a).get("id"))];
-                      int bv = idToNum[Integer.parseInt(r.document(b).get("id"))];
+                      int av = idToNum[r.document(a).getInt("id")];
+                      int bv = idToNum[r.document(b).getInt("id")];
                       if (av < bv) {
                         return -reverseInt;
                       } else if (bv < av) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
index 9d555e8..a62c431 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
@@ -18,15 +18,13 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
 
 /** Simple tests for SortedNumericSortField */
 public class TestSortedNumericSortField extends LuceneTestCase {
@@ -67,15 +65,17 @@
   
   public void testForward() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 5));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);    
+    FieldTypes fieldTypes = writer.getFieldTypes();     
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addInt("value", 5);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 3));
-    doc.add(new SortedNumericDocValuesField("value", 7));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 3);
+    doc.addInt("value", 7);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -96,14 +96,16 @@
   public void testReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 3));
-    doc.add(new SortedNumericDocValuesField("value", 7));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addInt("value", 3);
+    doc.addInt("value", 7);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 5));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 5);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
 
     IndexReader ir = writer.getReader();
@@ -125,25 +127,28 @@
   public void testMissingFirst() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 5));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    fieldTypes.enableSorting("value");
+    fieldTypes.setSortMissingFirst("value");
+
+    Document doc = writer.newDocument();
+    doc.addInt("value", 5);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 3));
-    doc.add(new SortedNumericDocValuesField("value", 7));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 3);
+    doc.addInt("value", 7);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedNumericSortField("value", SortField.Type.INT);
-    sortField.setMissingValue(Integer.MIN_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
@@ -160,25 +165,25 @@
   public void testMissingLast() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 5));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addInt("value", 5);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 3));
-    doc.add(new SortedNumericDocValuesField("value", 7));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 3);
+    doc.addInt("value", 7);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedNumericSortField("value", SortField.Type.INT);
-    sortField.setMissingValue(Integer.MAX_VALUE);
-    Sort sort = new Sort(sortField);
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
@@ -195,19 +200,20 @@
   public void testSingleton() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 5));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document doc = writer.newDocument();
+    doc.addInt("value", 5);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", 3));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 3);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedNumericSortField("value", SortField.Type.INT));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -222,20 +228,24 @@
   public void testFloat() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(-3f)));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+
+    Document doc = writer.newDocument();
+    doc.addFloat("value", -3f);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(-5f)));
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(7f)));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", -5f);
+    doc.addFloat("value", 7f);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedNumericSortField("value", SortField.Type.FLOAT));
+    Sort sort = fieldTypes.newSort("value");
+    System.out.println("sort: " + sort);
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
@@ -250,20 +260,23 @@
   public void testDouble() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(-3d)));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+
+    Document doc = writer.newDocument();
+    doc.addDouble("value", -3d);
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(-5d)));
-    doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(7d)));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -5d);
+    doc.addDouble("value", 7d);
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedNumericSortField("value", SortField.Type.DOUBLE));
+    Sort sort = fieldTypes.newSort("value");
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
index 9b2ac3d..30ba157 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSelector.java
@@ -18,14 +18,13 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
 
 /** Tests for SortedSetSortField selectors other than MIN,
  *  these require optional codec support (random access to ordinals) */
@@ -35,14 +34,16 @@
   public void testMax() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -65,14 +66,16 @@
   public void testMaxReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -95,17 +98,20 @@
   public void testMaxMissingFirst() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    fieldTypes.setSortMissingFirst("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -132,17 +138,19 @@
   public void testMaxMissingLast() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -169,13 +177,15 @@
   public void testMaxSingleton() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -197,16 +207,18 @@
   public void testMiddleMin() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -228,16 +240,18 @@
   public void testMiddleMinReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -259,19 +273,21 @@
   public void testMiddleMinMissingFirst() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -297,19 +313,21 @@
   public void testMiddleMinMissingLast() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -335,13 +353,15 @@
   public void testMiddleMinSingleton() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -363,16 +383,18 @@
   public void testMiddleMax() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -394,16 +416,18 @@
   public void testMiddleMaxReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -425,19 +449,21 @@
   public void testMiddleMaxMissingFirst() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -463,19 +489,21 @@
   public void testMiddleMaxMissingLast() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("a")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("c")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("d")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("a"));
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addAtom("value", new BytesRef("c"));
+    doc.addAtom("value", new BytesRef("d"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("b")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("b"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -501,13 +529,15 @@
   public void testMiddleMaxSingleton() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
index 26b6221..55e8070 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
@@ -18,8 +18,7 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -67,22 +66,23 @@
   public void testForward() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    Document doc = writer.newDocument();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedSetSortField("value", false));
 
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("value"));
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
@@ -95,23 +95,24 @@
   public void testReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
 
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortedSetSortField("value", true));
 
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("value", true));
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
@@ -124,27 +125,27 @@
   public void testMissingFirst() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    fieldTypes.setSortMissingFirst("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedSetSortField("value", false);
-    sortField.setMissingValue(SortField.STRING_FIRST);
-    Sort sort = new Sort(sortField);
 
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("value"));
     assertEquals(3, td.totalHits);
     // 'bar' comes before 'baz'
     // null comes first
@@ -159,27 +160,26 @@
   public void testMissingLast() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("foo"));
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addUniqueAtom("id", "3");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
     
     IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortedSetSortField("value", false);
-    sortField.setMissingValue(SortField.STRING_LAST);
-    Sort sort = new Sort(sortField);
 
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
+    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, fieldTypes.newSort("value"));
     assertEquals(3, td.totalHits);
     // 'bar' comes before 'baz'
     assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
@@ -194,13 +194,17 @@
   public void testSingleton() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+
+    Document doc = writer.newDocument();
+
+    doc.addAtom("value", new BytesRef("baz"));
+    doc.addUniqueAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", new BytesRef("bar"));
+    doc.addUniqueAtom("id", "1");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
index 3a448d1..d334e49 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
@@ -44,12 +44,12 @@
     // make sure we have more than one segment occationally
     int num = atLeast(31);
     for (int i = 0; i < num; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("f", "a b c d b c d c d d", Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addLargeText("f", "a b c d b c d c d d");
       w.addDocument(doc);
 
-      doc = new Document();
-      doc.add(newTextField("f", "a b c d", Field.Store.NO));
+      doc = w.newDocument();
+      doc.addLargeText("f", "a b c d");
       w.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java
index b954e33..25fe79e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java
@@ -18,21 +18,32 @@
  */
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
-
+import org.apache.lucene.util.TestUtil;
 
 public class TestTermRangeQuery extends LuceneTestCase {
 
@@ -104,19 +115,20 @@
     initializeIndex(new String[]{"A", "B", "C", "D"});
     IndexReader reader = DirectoryReader.open(dir);
     IndexSearcher searcher = newSearcher(reader);
+
     TermRangeQuery query = new TermRangeQuery("content", null, null, true, true);
     Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "content");
+    // Should return the unfiltered TermsEnum:
     assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
     assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
-    query = new TermRangeQuery("content", null, null, false, false);
+
+    query = TermRangeQuery.newStringRange("content", "", null, true, true);
+    // Should return the unfiltered TermsEnum:
     assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
     assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
-    query = TermRangeQuery.newStringRange("content", "", null, true, false);
-    assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
-    assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length);
-    // and now anothe one
-    query = TermRangeQuery.newStringRange("content", "B", null, true, false);
-    assertTrue(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
+
+    // and now another one
+    query = TermRangeQuery.newStringRange("content", "B", null, true, true);
     assertEquals(3, searcher.search(query, null, 1000).scoreDocs.length);
     reader.close();
   }
@@ -258,10 +270,10 @@
   }
 
   private void insertDoc(IndexWriter writer, String content) throws IOException {
-    Document doc = new Document();
+    Document doc = writer.newDocument();
 
-    doc.add(newStringField("id", "id" + docCount, Field.Store.YES));
-    doc.add(newTextField("content", content, Field.Store.NO));
+    doc.addAtom("id", "id" + docCount);
+    doc.addLargeText("content", content);
 
     writer.addDocument(doc);
     docCount++;
@@ -336,4 +348,153 @@
     //assertEquals("C added => A,B,<empty string>,C in range", 3, hits.length());
      reader.close();
   }
+
+  /** Make sure auto prefix terms are used with TermRangeQuery */
+  public void testAutoPrefixTermsKickIn() throws Exception {
+
+    List<String> prefixes = new ArrayList<>();
+    for(int i=1;i<5;i++) {
+      char[] chars = new char[i];
+      Arrays.fill(chars, 'a');
+      prefixes.add(new String(chars));
+    }
+
+    Set<String> randomTerms = new HashSet<>();
+    int numTerms = atLeast(10000);
+    while (randomTerms.size() < numTerms) {
+      for(String prefix : prefixes) {
+        randomTerms.add(prefix + TestUtil.randomSimpleString(random()));
+      }
+    }
+
+    // We make term range aa<start> - aa<end>
+    char start;
+    char end;
+
+    int actualCount;
+    boolean startInclusive = random().nextBoolean();
+    boolean endInclusive = random().nextBoolean();
+    String startTerm;
+    String endTerm;
+
+    while (true) {
+      start = (char) TestUtil.nextInt(random(), 'a', 'm');
+      end = (char) TestUtil.nextInt(random(), start+1, 'z');
+
+      actualCount = 0;
+
+      startTerm = "aa" + start;
+      endTerm = "aa" + end;
+
+      for(String term : randomTerms) {
+        int cmpStart = startTerm.compareTo(term);
+        int cmpEnd = endTerm.compareTo(term);
+        if ((cmpStart < 0 || (startInclusive && cmpStart == 0)) &&
+            (cmpEnd > 0 || (endInclusive && cmpEnd == 0))) {
+          actualCount++;
+        }
+      }
+
+      if (actualCount > 2000) {
+        break;
+      }
+    }
+
+    //System.out.println("start " + startTerm + " inclusive? " + startInclusive);
+    //System.out.println("end " + endTerm + " inclusive? " + endInclusive);
+    //System.out.println("actual count " + actualCount);
+
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    int minTermsInBlock = TestUtil.nextInt(random(), 2, 100);
+    int maxTermsInBlock = Math.max(2, (minTermsInBlock-1)*2 + random().nextInt(100));
+
+    int minTermsAutoPrefix = TestUtil.nextInt(random(), 2, 100);
+    int maxTermsAutoPrefix = random().nextBoolean() ? Math.max(2, (minTermsAutoPrefix-1)*2 + random().nextInt(100)) : Integer.MAX_VALUE;
+
+    //System.out.println("minTermsAutoPrefix " + minTermsAutoPrefix);
+    //System.out.println("maxTermsAutoPrefix " + maxTermsAutoPrefix);
+
+    iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene50PostingsFormat(minTermsInBlock, maxTermsInBlock,
+                                                                          minTermsAutoPrefix, maxTermsAutoPrefix)));
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+    for (String term : randomTerms) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", term);
+      w.addDocument(doc);
+    }
+
+    w.forceMerge(1);
+    IndexReader r = w.getReader();
+    final Terms terms = MultiFields.getTerms(r, "field");
+    IndexSearcher s = new IndexSearcher(r);
+    final int finalActualCount = actualCount;
+    TermRangeQuery q = new TermRangeQuery("field", new BytesRef(startTerm), new BytesRef(endTerm), startInclusive, endInclusive) {
+        public TermRangeQuery checkTerms() throws IOException {
+        TermsEnum termsEnum = getTermsEnum(terms, new AttributeSource());
+        int count = 0;
+        while (termsEnum.next() != null) {
+          //System.out.println("got term: " + termsEnum.term().utf8ToString());
+          count++;
+        }
+        //System.out.println("count " + count);
+
+        // Auto-prefix term(s) should have kicked in, so we should have visited fewer than the total number of aa* terms:
+        assertTrue(count < finalActualCount);
+
+        return this;
+      }
+    }.checkTerms();
+
+    if (random().nextBoolean()) {
+      q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    } else if (random().nextBoolean()) {
+      q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+    }
+
+    assertEquals(actualCount, s.search(q, 1).totalHits);
+
+    // Test when min == max:
+    List<String> randomTermsList = new ArrayList<>(randomTerms);
+    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
+      String term = randomTermsList.get(random().nextInt(randomTermsList.size()));
+      q = new TermRangeQuery("field", new BytesRef(term), new BytesRef(term), true, true);
+      assertEquals(1, s.search(q, 1).totalHits);
+    }
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSkipSomeSegments() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+    iwc.setMaxBufferedDocs(2);
+    IndexWriter w = new IndexWriter(dir, iwc);
+    
+    for(int i=0;i<26;i++) {
+      Document doc = w.newDocument();
+      byte[] bytes = new byte[1];
+      bytes[0] = (byte) i;
+      doc.addAtom("field", new BytesRef(bytes));
+      w.addDocument(doc);
+    }
+
+    IndexReader r = DirectoryReader.open(w, true);
+    FieldTypes fieldTypes = r.getFieldTypes();
+
+    IndexSearcher s = newSearcher(r);
+    byte[] min = new byte[1];
+    min[0] = 17;
+    byte[] max = new byte[1];
+    max[0] = 18;
+    assertEquals(2, s.search(new ConstantScoreQuery(fieldTypes.newBinaryRangeFilter("field", min, true, max, true)), 1).totalHits);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 30a3581..ffb0933 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -23,9 +23,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
@@ -52,9 +51,8 @@
         .setMergePolicy(newLogMergePolicy())
         .setSimilarity(new DefaultSimilarity()));
     for (int i = 0; i < values.length; i++) {
-      Document doc = new Document();
-      doc
-          .add(newTextField(FIELD, values[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText(FIELD, values[i]);
       writer.addDocument(doc);
     }
     indexReader = SlowCompositeReaderWrapper.wrap(writer.getReader());
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index cb603ad..326b705 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -22,9 +22,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException;
@@ -118,8 +117,8 @@
   }
 
   private void add(String value, RandomIndexWriter iw) throws IOException {
-    Document d = new Document();
-    d.add(newTextField(FIELD_NAME, value, Field.Store.NO));
+    Document d = iw.newDocument();
+    d.addLargeText(FIELD_NAME, value);
     iw.addDocument(d);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
index 57a6306..e721e04 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
@@ -19,9 +19,8 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -105,7 +104,7 @@
     dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 30; i++) {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
     }
     reader = writer.getReader();
     writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
index 0967712..48ab87c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
@@ -17,30 +17,23 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.CompositeReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.CompositeReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
 public class TestTopDocsMerge extends LuceneTestCase {
 
   private static class ShardSearcher extends IndexSearcher {
@@ -105,10 +98,10 @@
       }
 
       for(int docIDX=0;docIDX<numDocs;docIDX++) {
-        final Document doc = new Document();
-        doc.add(new SortedDocValuesField("string", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
-        doc.add(newTextField("text", content[random().nextInt(content.length)], Field.Store.NO));
-        doc.add(new FloatDocValuesField("float", random().nextFloat()));
+        final Document doc = w.newDocument();
+        doc.addAtom("string", TestUtil.randomRealisticUnicodeString(random()));
+        doc.addLargeText("text", content[random().nextInt(content.length)]);
+        doc.addFloat("float", random().nextFloat());
         final int intValue;
         if (random().nextInt(100) == 17) {
           intValue = Integer.MIN_VALUE;
@@ -117,7 +110,7 @@
         } else {
           intValue = random().nextInt();
         }
-        doc.add(new NumericDocValuesField("int", intValue));
+        doc.addInt("int", intValue);
         if (VERBOSE) {
           System.out.println("  doc=" + doc);
         }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
index 473f6c6..5dfd1b9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -37,8 +36,7 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      iw.addDocument(doc);
+      iw.addDocument(iw.newDocument());
     }
     ir = iw.getReader();
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTotalHitCountCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTotalHitCountCollector.java
index 409ef71..08cf428 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTotalHitCountCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTotalHitCountCollector.java
@@ -18,8 +18,6 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
@@ -31,9 +29,8 @@
     Directory indexStore = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
     for(int i=0; i<5; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("string", "a"+i, Field.Store.NO));
-      doc.add(new StringField("string", "b"+i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("string", "a"+i);
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestUsageTrackingFilterCachingPolicy.java b/lucene/core/src/test/org/apache/lucene/search/TestUsageTrackingFilterCachingPolicy.java
index 8095183..59e06e9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestUsageTrackingFilterCachingPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestUsageTrackingFilterCachingPolicy.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.Term;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.RoaringDocIdSet;
 
 public class TestUsageTrackingFilterCachingPolicy extends LuceneTestCase {
@@ -37,7 +38,7 @@
 
   public void testCostlyFilter() {
     assertTrue(UsageTrackingFilterCachingPolicy.isCostly(new PrefixFilter(new Term("field", "prefix"))));
-    assertTrue(UsageTrackingFilterCachingPolicy.isCostly(NumericRangeFilter.newIntRange("intField", 8, 1, 1000, true, true)));
+    assertTrue(UsageTrackingFilterCachingPolicy.isCostly(new TermRangeFilter("intField", NumericUtils.intToBytes(1), NumericUtils.intToBytes(1000), true, true)));
     assertFalse(UsageTrackingFilterCachingPolicy.isCostly(new QueryWrapperFilter(new TermQuery(new Term("field", "value")))));
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
index dbbaad6..1dd4f65 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
@@ -17,9 +17,8 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
@@ -28,8 +27,8 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-
-import java.io.IOException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * TestWildcard tests the '*' and '?' wildcard characters.
@@ -126,13 +125,9 @@
 
     MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
     assertMatches(searcher, wq, 2);
-    Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field");
-    assertTrue(wq.getTermsEnum(terms) instanceof PrefixTermsEnum);
     
     wq = new WildcardQuery(new Term("field", "*"));
     assertMatches(searcher, wq, 2);
-    assertFalse(wq.getTermsEnum(terms) instanceof PrefixTermsEnum);
-    assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum"));
     reader.close();
     indexStore.close();
   }
@@ -243,8 +238,8 @@
     Directory indexStore = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
     for (int i = 0; i < contents.length; ++i) {
-      Document doc = new Document();
-      doc.add(newTextField(field, contents[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText(field, contents[i]);
       writer.addDocument(doc);
     }
     writer.close();
@@ -342,8 +337,8 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMergePolicy(newLogMergePolicy()));
     for (int i = 0; i < docs.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField(field, docs[i], Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addLargeText(field, docs[i]);
       iw.addDocument(doc);
     }
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWildcardRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestWildcardRandom.java
index c722506..3688c30 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWildcardRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWildcardRandom.java
@@ -24,7 +24,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -50,13 +49,10 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
     
-    Document doc = new Document();
-    Field field = newStringField("field", "", Field.Store.NO);
-    doc.add(field);
-    
     NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ROOT));
     for (int i = 0; i < 1000; i++) {
-      field.setStringValue(df.format(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("field", df.format(i));
       writer.addDocument(doc);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/payloads/PayloadHelper.java b/lucene/core/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
index 144b727..dd96ac1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
+++ b/lucene/core/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
@@ -17,26 +17,24 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.English;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
-
-import java.io.IOException;
-import java.util.Random;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
 
 /**
  *
@@ -121,10 +119,10 @@
         analyzer).setSimilarity(similarity));
     // writer.infoStream = System.out;
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new TextField(FIELD, English.intToEnglish(i), Field.Store.YES));
-      doc.add(new TextField(MULTI_FIELD, English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES));
-      doc.add(new TextField(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText(FIELD, English.intToEnglish(i));
+      doc.addLargeText(MULTI_FIELD, English.intToEnglish(i) + "  " + English.intToEnglish(i));
+      doc.addLargeText(NO_PAYLOAD_FIELD, English.intToEnglish(i));
       writer.addDocument(doc);
     }
     reader = DirectoryReader.open(writer, true);
diff --git a/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
index 0ddb46a..03cd175 100644
--- a/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -15,26 +15,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 import java.io.IOException;
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryUtils;
 import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
-import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -43,7 +43,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-
 public class TestPayloadNearQuery extends LuceneTestCase {
   private static IndexSearcher searcher;
   private static IndexReader reader;
@@ -110,10 +109,10 @@
         .setSimilarity(similarity));
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", English.intToEnglish(i));
       String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
-      doc.add(newTextField("field2", txt, Field.Store.YES));
+      doc.addLargeText("field2", txt);
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
index 7833f57..e1d6c2f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
@@ -16,36 +16,35 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.English;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.CheckHits;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.similarities.DefaultSimilarity;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.spans.MultiSpansWrapper;
-import org.apache.lucene.search.spans.SpanTermQuery;
-import org.apache.lucene.search.spans.Spans;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.spans.MultiSpansWrapper;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.Spans;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import java.io.IOException;
-
 
 /**
  *
@@ -120,12 +119,10 @@
            .setSimilarity(similarity).setMergePolicy(newLogMergePolicy()));
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
-      Document doc = new Document();
-      Field noPayloadField = newTextField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES);
-      //noPayloadField.setBoost(0);
-      doc.add(noPayloadField);
-      doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
-      doc.add(newTextField("multiField", English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i));
+      doc.addLargeText("field", English.intToEnglish(i));
+      doc.addLargeText("multiField", English.intToEnglish(i) + "  " + English.intToEnglish(i));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java
index eed1aa0..42ab890 100644
--- a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java
@@ -21,9 +21,7 @@
 import java.util.List;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -93,8 +91,8 @@
   public void testEmptyField() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("foo", "bar", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -115,8 +113,8 @@
   public void testEmptyTerm() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("foo", "bar", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -137,11 +135,10 @@
   public void testNoNorms() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setOmitNorms(true);
-    ft.freeze();
-    doc.add(newField("foo", "bar", ft));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableNorms("foo");
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -161,12 +158,12 @@
   public void testOmitTF() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    ft.freeze();
-    Field f = newField("foo", "bar", ft);
-    doc.add(f);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setIndexOptions("foo", IndexOptions.DOCS);
+    fieldTypes.disableHighlighting("foo");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -186,13 +183,13 @@
   public void testOmitTFAndNorms() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    ft.setOmitNorms(true);
-    ft.freeze();
-    Field f = newField("foo", "bar", ft);
-    doc.add(f);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+
+    Document doc = iw.newDocument();
+    fieldTypes.setIndexOptions("foo", IndexOptions.DOCS);
+    fieldTypes.disableNorms("foo");
+    fieldTypes.disableHighlighting("foo");
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
@@ -216,9 +213,8 @@
     // inner queries are not real queries, their boosts are ignored, etc.
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    doc.add(newField("foo", "bar", ft));
+    Document doc = iw.newDocument();
+    doc.addLargeText("foo", "bar");
     iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java
index f1ae67b..3cebf33 100644
--- a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java
+++ b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java
@@ -22,11 +22,7 @@
 import java.util.List;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -110,11 +106,9 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
     for (int i = 0; i < docs.length; i++) {
-      Document d = new Document();
-      FieldType ft = new FieldType(TextField.TYPE_STORED);
-      ft.setIndexOptions(IndexOptions.NONE);
-      d.add(newField(FIELD_ID, Integer.toString(i), ft));
-      d.add(newTextField(FIELD_BODY, docs[i], Field.Store.YES));
+      Document d = writer.newDocument();
+      d.addStoredString(FIELD_ID, Integer.toString(i));
+      d.addLargeText(FIELD_BODY, docs[i]);
       writer.addDocument(d);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java
index 068964a..ea280e1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java
@@ -28,7 +28,6 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -114,8 +113,8 @@
             .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)).setMergePolicy(newLogMergePolicy()));
     //writer.infoStream = System.out;
     for (int i = 0; i < 2000; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", English.intToEnglish(i));
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
index e05832b..c7ca333 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
@@ -22,7 +22,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -38,18 +38,17 @@
 
 public class TestFieldMaskingSpanQuery extends LuceneTestCase {
 
-  protected static Document doc(Field[] fields) {
-    Document doc = new Document();
-    for (int i = 0; i < fields.length; i++) {
-      doc.add(fields[i]);
+  protected static Document doc(RandomIndexWriter w, String... nameAndValues) {
+    Document doc = w.newDocument();
+    int upto = 0;
+    while (upto < nameAndValues.length) {
+      doc.addLargeText(nameAndValues[upto],
+                       nameAndValues[upto+1]);
+      upto += 2;
     }
     return doc;
   }
   
-  protected static Field field(String name, String value) {
-    return newTextField(name, value, Field.Store.NO);
-  }
-
   protected static IndexSearcher searcher;
   protected static Directory directory;
   protected static IndexReader reader;
@@ -57,61 +56,60 @@
   @BeforeClass
   public static void beforeClass() throws Exception {
     directory = newDirectory();
-    RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    fieldTypes.setMultiValued("gender");
+    fieldTypes.setMultiValued("first");
+    fieldTypes.setMultiValued("last");
     
-    writer.addDocument(doc(new Field[] { field("id", "0")
-                                         ,
-                                         field("gender", "male"),
-                                         field("first",  "james"),
-                                         field("last",   "jones")     }));
+    writer.addDocument(doc(writer,
+                           "id", "0",
+                           "gender", "male",
+                           "first",  "james",
+                           "last",   "jones"));
                                                
-    writer.addDocument(doc(new Field[] { field("id", "1")
-                                         ,
-                                         field("gender", "male"),
-                                         field("first",  "james"),
-                                         field("last",   "smith")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "sally"),
-                                         field("last",   "jones")     }));
+    writer.addDocument(doc(writer,
+                           "id", "1",
+                           "gender", "male",
+                           "first",  "james",
+                           "last",   "smith",
+                           "gender", "female",
+                           "first",  "sally",
+                           "last",   "jones"));
     
-    writer.addDocument(doc(new Field[] { field("id", "2")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "greta"),
-                                         field("last",   "jones")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "sally"),
-                                         field("last",   "smith")
-                                         ,
-                                         field("gender", "male"),
-                                         field("first",  "james"),
-                                         field("last",   "jones")     }));
+    writer.addDocument(doc(writer,
+                           "id", "2",
+                           "gender", "female",
+                           "first",  "greta",
+                           "last",   "jones",
+                           "gender", "female",
+                           "first",  "sally",
+                           "last",   "smith",
+                           "gender", "male",
+                           "first",  "james",
+                           "last",   "jones"));
      
-    writer.addDocument(doc(new Field[] { field("id", "3")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "lisa"),
-                                         field("last",   "jones")
-                                         ,
-                                         field("gender", "male"),
-                                         field("first",  "bob"),
-                                         field("last",   "costas")     }));
+    writer.addDocument(doc(writer,
+                           "id", "3",
+                           "gender", "female",
+                           "first",  "lisa",
+                           "last",   "jones",
+                           "gender", "male",
+                           "first",  "bob",
+                           "last",   "costas"));
     
-    writer.addDocument(doc(new Field[] { field("id", "4")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "sally"),
-                                         field("last",   "smith")
-                                         ,
-                                         field("gender", "female"),
-                                         field("first",  "linda"),
-                                         field("last",   "dixit")
-                                         ,
-                                         field("gender", "male"),
-                                         field("first",  "bubba"),
-                                         field("last",   "jones")     }));
+    writer.addDocument(doc(writer,
+                           "id", "4",
+                           "gender", "female",
+                           "first",  "sally",
+                           "last",   "smith",
+                           "gender", "female",
+                           "first",  "linda",
+                           "last",   "dixit",
+                           "gender", "male",
+                           "first",  "bubba",
+                           "last",   "jones"));
     reader = writer.getReader();
     writer.close();
     searcher = newSearcher(reader);
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 649f301..8f2fd2f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -19,17 +19,16 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.CheckHits;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -53,8 +52,8 @@
     directory = newDirectory();
     RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     for (int i = 0; i < docFields.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField(FIELD, docFields[i], Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText(FIELD, docFields[i]);
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
index 3d02ea5..85ec482 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
@@ -28,14 +28,12 @@
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
@@ -112,8 +110,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity));
 
-    Document doc = new Document();
-    doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText(PayloadHelper.FIELD, "one two three one four three");
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     writer.close();
@@ -255,8 +253,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new TestPayloadAnalyzer()));
 
-    Document doc = new Document();
-    doc.add(new TextField("content", new StringReader("a b c d e f g h i j a k")));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", new StringReader("a b c d e f g h i j a k"));
     writer.addDocument(doc);
 
     IndexReader reader = writer.getReader();
@@ -292,8 +290,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new TestPayloadAnalyzer()));
 
-    Document doc = new Document();
-    doc.add(new TextField("content", new StringReader("a b a d k f a h i k a k")));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", new StringReader("a b a d k f a h i k a k"));
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     IndexSearcher is = newSearcher(reader);
@@ -327,8 +325,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new TestPayloadAnalyzer()));
 
-    Document doc = new Document();
-    doc.add(new TextField("content", new StringReader("j k a l f k k p a t a k l k t a")));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", new StringReader("j k a l f k k p a t a k l k t a"));
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     IndexSearcher is = newSearcher(reader);
@@ -368,8 +366,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity));
 
-    Document doc = new Document();
-    doc.add(newTextField(PayloadHelper.FIELD, "xx rr yy mm  pp", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText(PayloadHelper.FIELD, "xx rr yy mm  pp");
     writer.addDocument(doc);
   
     IndexReader reader = writer.getReader();
@@ -428,11 +426,10 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
                                                      newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity));
 
-    Document doc = null;
     for(int i = 0; i < docs.length; i++) {
-      doc = new Document();
+      Document doc = writer.newDocument();
       String docText = docs[i];
-      doc.add(newTextField(PayloadHelper.FIELD, docText, Field.Store.YES));
+      doc.addLargeText(PayloadHelper.FIELD, docText);
       writer.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java
index 3567285..6c1233f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java
@@ -21,7 +21,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -40,11 +39,11 @@
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet);
     
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer);
-    Document doc = new Document();
-    doc.add(newTextField("field", "the quick brown fox", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "the quick brown fox");
     writer.addDocument(doc);
-    Document doc2 = new Document();
-    doc2.add(newTextField("field", "quick brown fox", Field.Store.NO));
+    Document doc2 = writer.newDocument();
+    doc2.addLargeText("field", "quick brown fox");
     writer.addDocument(doc2);
     
     IndexReader reader = writer.getReader();
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
index c5023c7..489092a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
@@ -18,7 +18,6 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -43,16 +42,18 @@
     super.setUp();
     directory = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    Field field = newTextField("field", "", Field.Store.NO);
-    doc.add(field);
-    
-    field.setStringValue("quick brown fox");
+    Document doc = iw.newDocument();
+    doc.addLargeText("field", "quick brown fox");
     iw.addDocument(doc);
-    field.setStringValue("jumps over lazy broun dog");
+
+    doc = iw.newDocument();
+    doc.addLargeText("field", "jumps over lazy broun dog");
     iw.addDocument(doc);
-    field.setStringValue("jumps over extremely very lazy broxn dog");
+
+    doc = iw.newDocument();
+    doc.addLargeText("field", "jumps over extremely very lazy broxn dog");
     iw.addDocument(doc);
+
     reader = iw.getReader();
     iw.close();
     searcher = newSearcher(reader);
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
index 5a960f3..362c80b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -22,13 +22,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Term;
@@ -54,10 +53,10 @@
   public void setUp() throws Exception {
     super.setUp();
     directory = newDirectory();
-    RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     for (int i = 0; i < docFields.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField(field, docFields[i], Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText(field, docFields[i]);
       writer.addDocument(doc);
     }
     reader = writer.getReader();
@@ -446,9 +445,9 @@
 
   // LUCENE-1404
   private void addDoc(IndexWriter writer, String id, String text) throws IOException {
-    final Document doc = new Document();
-    doc.add( newStringField("id", id, Field.Store.YES) );
-    doc.add( newTextField("text", text, Field.Store.YES) );
+    Document doc = writer.newDocument();
+    doc.addUniqueAtom("id", id);
+    doc.addLargeText("text", text);
     writer.addDocument(doc);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
index 1ae6168..9392e45 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
@@ -19,20 +19,17 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
 
 /*******************************************************************************
  * Tests the span query bug in Lucene. It demonstrates that SpanTermQuerys don't
@@ -89,9 +86,9 @@
   protected void addDocument(final RandomIndexWriter writer, final String id,
       final String text) throws IOException {
     
-    final Document document = new Document();
-    document.add(newStringField(FIELD_ID, id, Field.Store.YES));
-    document.add(newTextField(FIELD_TEXT, text, Field.Store.YES));
+    Document document = writer.newDocument();
+    document.addUniqueAtom(FIELD_ID, id);
+    document.addLargeText(FIELD_TEXT, text);
     writer.addDocument(document);
   }
   
@@ -153,8 +150,8 @@
       
       int id = topdocs.scoreDocs[i].doc;
       float score = topdocs.scoreDocs[i].score;
-      StoredDocument doc = s.doc(id);
-      assertEquals(expectedIds[i], doc.get(FIELD_ID));
+      Document doc = s.doc(id);
+      assertEquals(expectedIds[i], doc.getString(FIELD_ID));
       boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
       if (!scoreEq) {
         System.out.println(i + " warning, expected score: " + expectedScores[i]
@@ -166,4 +163,4 @@
     }
   }
   
-}
\ No newline at end of file
+}
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
index 003d4c4..600e790 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
@@ -25,12 +25,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -218,10 +218,12 @@
                 setOpenMode(OpenMode.CREATE).
                 setMergePolicy(newLogMergePolicy(false))
         );
+        FieldTypes fieldTypes = writer.getFieldTypes();
+
         for(int i=0;i<37;i++) {
-          Document doc = new Document();
-          doc.add(newTextField("content", "aaa bbb ccc ddd" + i, Field.Store.YES));
-          doc.add(newTextField("id", "" + i, Field.Store.YES));
+          Document doc = writer.newDocument();
+          doc.addLargeText("content", "aaa bbb ccc ddd" + i);
+          doc.addUniqueInt("id", i);
           writer.addDocument(doc);
         }
 
@@ -234,7 +236,7 @@
         reader.close();
         
         dir.tweakBufferSizes();
-        writer.deleteDocuments(new Term("id", "0"));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", 0));
         reader = DirectoryReader.open(writer, true);
         IndexSearcher searcher = newSearcher(reader);
         ScoreDoc[] hits = searcher.search(new TermQuery(bbb), null, 1000).scoreDocs;
@@ -244,7 +246,7 @@
         reader.close();
         
         dir.tweakBufferSizes();
-        writer.deleteDocuments(new Term("id", "4"));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", 4));
         reader = DirectoryReader.open(writer, true);
         searcher = newSearcher(reader);
 
@@ -252,7 +254,7 @@
         dir.tweakBufferSizes();
         assertEquals(35, hits.length);
         dir.tweakBufferSizes();
-        hits = searcher.search(new TermQuery(new Term("id", "33")), null, 1000).scoreDocs;
+        hits = searcher.search(fieldTypes.newExactIntQuery("id", 33), null, 1000).scoreDocs;
         dir.tweakBufferSizes();
         assertEquals(1, hits.length);
         hits = searcher.search(new TermQuery(aaa), null, 1000).scoreDocs;
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
index 8c5c695..790e479 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
@@ -26,13 +26,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
@@ -333,8 +332,8 @@
     }
 
     private void addDoc(IndexWriter writer) throws IOException {
-        Document doc = new Document();
-        doc.add(newTextField("content", "aaa", Field.Store.NO));
+        Document doc = writer.newDocument();
+        doc.addLargeText("content", "aaa");
         writer.addDocument(doc);
     }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
index bfff86f..bcdf4fc 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
@@ -23,7 +23,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.util.BytesRef;
@@ -344,16 +343,12 @@
       mmapDir.setUseUnmap(true);
     MockDirectoryWrapper dir = new MockDirectoryWrapper(random, mmapDir);
     RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
-    Field docid = newStringField("docid", "0", Field.Store.YES);
-    Field junk = newStringField("junk", "", Field.Store.YES);
-    doc.add(docid);
-    doc.add(junk);
     
     int numDocs = 100;
     for (int i = 0; i < numDocs; i++) {
-      docid.setStringValue("" + i);
-      junk.setStringValue(TestUtil.randomUnicodeString(random));
+      Document doc = writer.newDocument();
+      doc.addUniqueInt("docid", i);
+      doc.addAtom("junk", TestUtil.randomUnicodeString(random));
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
@@ -362,7 +357,7 @@
     int numAsserts = atLeast(100);
     for (int i = 0; i < numAsserts; i++) {
       int docID = random.nextInt(numDocs);
-      assertEquals("" + docID, reader.document(docID).get("docid"));
+      assertEquals(docID, reader.document(docID).getInt("docid").intValue());
     }
     reader.close();
     dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
index a94cb15..174473a 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
@@ -56,7 +56,7 @@
     analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     RandomIndexWriter w = new RandomIndexWriter(random(), cachedDir, conf);
-    final LineFileDocs docs = new LineFileDocs(random(), true);
+    final LineFileDocs docs = new LineFileDocs(w.w, random());
     final int numDocs = TestUtil.nextInt(random(), 100, 400);
 
     if (VERBOSE) {
@@ -67,7 +67,7 @@
     DirectoryReader r = null;
     for(int docCount=0;docCount<numDocs;docCount++) {
       final Document doc = docs.nextDoc();
-      ids.add(new BytesRef(doc.get("docid")));
+      ids.add(new BytesRef(doc.getString("docid")));
       w.addDocument(doc);
       if (random().nextInt(20) == 17) {
         if (r == null) {
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
index 694ec1a..5a3dfdb 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
@@ -23,13 +23,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.IOUtils;
@@ -55,10 +53,9 @@
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
         new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
     // add some documents
-    Document doc = null;
     for (int i = 0; i < docsToAdd; i++) {
-      doc = new Document();
-      doc.add(newStringField("content", English.intToEnglish(i).trim(), Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addAtom("content", English.intToEnglish(i).trim());
       writer.addDocument(doc);
     }
     assertEquals(docsToAdd, writer.maxDoc());
@@ -103,8 +100,8 @@
     
     // search for all documents
     for (int i = 0; i < docsToAdd; i++) {
-      StoredDocument doc = searcher.doc(i);
-      assertTrue(doc.getField("content") != null);
+      Document doc = searcher.doc(i);
+      assertTrue(doc.get("content") != null);
     }
 
     // cleanup
@@ -135,8 +132,8 @@
         @Override
         public void run() {
           for (int j=1; j<docsPerThread; j++) {
-            Document doc = new Document();
-            doc.add(newStringField("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES));
+            Document doc = writer.newDocument();
+            doc.addAtom("sizeContent", English.intToEnglish(num*docsPerThread+j).trim());
             try {
               writer.addDocument(doc);
             } catch (IOException e) {
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java b/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java
index 8cea53b..79077ba 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java
@@ -19,9 +19,6 @@
 
 import java.nio.file.Path;
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
@@ -30,6 +27,7 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class TestWindowsMMap extends LuceneTestCase {
   
@@ -80,8 +78,8 @@
     int num = atLeast(1000);
     for(int dx = 0; dx < num; dx ++) {
       String f = randomField();
-      Document doc = new Document();
-      doc.add(newTextField("data", f, Field.Store.YES));  
+      Document doc = writer.newDocument();
+      doc.addLargeText("data", f);  
       writer.addDocument(doc);
     }
     
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java b/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java
index 1d49b16..fbae8d1 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestNumericUtils.java
@@ -22,124 +22,10 @@
 import java.util.Iterator;
 import java.util.Random;
 
+import org.apache.lucene.document.Document;
+
 public class TestNumericUtils extends LuceneTestCase {
 
-  public void testLongConversionAndOrdering() throws Exception {
-    // generate a series of encoded longs, each numerical one bigger than the one before
-    BytesRefBuilder last = new BytesRefBuilder();
-    BytesRefBuilder act = new BytesRefBuilder();
-    for (long l=-100000L; l<100000L; l++) {
-      NumericUtils.longToPrefixCodedBytes(l, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same long", l, NumericUtils.prefixCodedToLong(act.get()));
-      // next step
-      last.copyBytes(act);
-    }
-  }
-
-  public void testIntConversionAndOrdering() throws Exception {
-    // generate a series of encoded ints, each numerical one bigger than the one before
-    BytesRefBuilder act = new BytesRefBuilder();
-    BytesRefBuilder last = new BytesRefBuilder();
-    for (int i=-100000; i<100000; i++) {
-      NumericUtils.intToPrefixCodedBytes(i, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same int", i, NumericUtils.prefixCodedToInt(act.get()));
-      // next step
-      last.copyBytes(act.get());
-    }
-  }
-
-  public void testLongSpecialValues() throws Exception {
-    long[] vals=new long[]{
-      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
-      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals = new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      NumericUtils.longToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same long", vals[i], NumericUtils.prefixCodedToLong(prefixVals[i].get()) );
-
-      // test if decoding values as int fails correctly
-      try {
-        NumericUtils.prefixCodedToInt(prefixVals[i].get());
-        fail("decoding a prefix coded long value as int should fail");
-      } catch (NumberFormatException e) {
-        // worked
-      }
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-        
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<64; j++) {
-        NumericUtils.longToPrefixCodedBytes(vals[i], j, ref);
-        long prefixVal=NumericUtils.prefixCodedToLong(ref.get());
-        long mask=(1L << j) - 1L;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
-  public void testIntSpecialValues() throws Exception {
-    int[] vals=new int[]{
-      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
-      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals=new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      NumericUtils.intToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same int", vals[i], NumericUtils.prefixCodedToInt(prefixVals[i].get()) );
-      
-      // test if decoding values as long fails correctly
-      try {
-        NumericUtils.prefixCodedToLong(prefixVals[i].get());
-        fail("decoding a prefix coded int value as long should fail");
-      } catch (NumberFormatException e) {
-        // worked
-      }
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-    
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<32; j++) {
-        NumericUtils.intToPrefixCodedBytes(vals[i], j, ref);
-        int prefixVal=NumericUtils.prefixCodedToInt(ref.get());
-        int mask=(1 << j) - 1;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
   public void testDoubles() throws Exception {
     double[] vals=new double[]{
       Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
@@ -149,8 +35,8 @@
     
     // check forward and back conversion
     for (int i=0; i<vals.length; i++) {
-      longVals[i]=NumericUtils.doubleToSortableLong(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
+      longVals[i]=NumericUtils.doubleToLong(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.longToDouble(longVals[i]))==0 );
     }
     
     // check sort order (prefixVals should be ascending)
@@ -168,10 +54,10 @@
   };
 
   public void testSortableDoubleNaN() {
-    final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
+    final long plusInf = NumericUtils.doubleToLong(Double.POSITIVE_INFINITY);
     for (double nan : DOUBLE_NANs) {
       assertTrue(Double.isNaN(nan));
-      final long sortable = NumericUtils.doubleToSortableLong(nan);
+      final long sortable = NumericUtils.doubleToLong(nan);
       assertTrue("Double not sorted correctly: " + nan + ", long repr: " 
           + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
     }
@@ -186,8 +72,8 @@
     
     // check forward and back conversion
     for (int i=0; i<vals.length; i++) {
-      intVals[i]=NumericUtils.floatToSortableInt(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
+      intVals[i]=NumericUtils.floatToInt(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.intToFloat(intVals[i]))==0 );
     }
     
     // check sort order (prefixVals should be ascending)
@@ -205,364 +91,22 @@
   };
 
   public void testSortableFloatNaN() {
-    final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
+    final int plusInf = NumericUtils.floatToInt(Float.POSITIVE_INFINITY);
     for (float nan : FLOAT_NANs) {
       assertTrue(Float.isNaN(nan));
-      final int sortable = NumericUtils.floatToSortableInt(nan);
+      final int sortable = NumericUtils.floatToInt(nan);
       assertTrue("Float not sorted correctly: " + nan + ", int repr: " 
           + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
     }
   }
 
-  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
-  
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    // Cannot use FixedBitSet since the range could be long:
-    final LongBitSet bits=useBitSet ? new LongBitSet(upper-lower+1) : null;
-    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+  public void testHalfFloat() throws Exception {
+    for(int x=Short.MIN_VALUE;x<=Short.MAX_VALUE;x++) {
+      BytesRef bytes = NumericUtils.shortToBytes((short) x);
+      assertEquals(x, NumericUtils.bytesToShort(bytes));
 
-    NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
-      @Override
-      public void addRange(long min, long max, int shift) {
-        assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
-        if (useBitSet) for (long l=min; l<=max; l++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(l-lower) );
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (l == max) break;
-        }
-        if (neededBounds == null || neededShifts == null)
-          return;
-        // make unsigned longs for easier display and understanding
-        min ^= 0x8000000000000000L;
-        max ^= 0x8000000000000000L;
-        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
-        assertEquals( "shift", neededShifts.next().intValue(), shift);
-        assertEquals( "inner min bound", neededBounds.next().longValue(), min>>>shift);
-        assertEquals( "inner max bound", neededBounds.next().longValue(), max>>>shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0,upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+      short y = NumericUtils.sortableHalfFloatBits((short) x);
+      assertEquals(x, NumericUtils.sortableHalfFloatBits(y));
     }
   }
-  
-  /** LUCENE-2541: NumericRangeQuery errors with endpoints near long min and max values */
-  public void testLongExtremeValues() throws Exception {
-    // upper end extremes
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffefL,0xffffffffffffffefL,
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      0, 4
-    ));
-
-    // lower end extremes
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
-      0x0000000000000010L,0x0000000000000010L,
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      0, 4
-    ));
-  }
-  
-  public void testRandomSplit() throws Exception {
-    long num = (long) atLeast(10);
-    for (long i=0; i < num; i++) {
-      executeOneRandomSplit(random());
-    }
-  }
-  
-  private void executeOneRandomSplit(final Random random) throws Exception {
-    long lower = randomLong(random);
-    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
-    while (lower + len < lower) { // overflow
-      lower >>= 1;
-    }
-    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
-  }
-  
-  private long randomLong(final Random random) {
-    long val;
-    switch(random.nextInt(4)) {
-      case 0:
-        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
-        break;
-      case 1:
-        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
-        break;
-      default:
-        val = random.nextLong();
-    }
-
-    val += random.nextInt(5)-2;
-
-    if (random.nextBoolean()) {
-      if (random.nextBoolean()) val += random.nextInt(100)-50;
-      if (random.nextBoolean()) val = ~val;
-      if (random.nextBoolean()) val = val<<1;
-      if (random.nextBoolean()) val = val>>>1;
-    }
-
-    return val;
-  }
-  
-  public void testSplitLongRange() throws Exception {
-    // a hard-coded "standard" range
-    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
-      0x7fffffffffffec78L,0x7fffffffffffec7fL,
-      0x8000000000002510L,0x800000000000251cL,
-      0x7fffffffffffec8L, 0x7fffffffffffecfL,
-      0x800000000000250L, 0x800000000000250L,
-      0x7fffffffffffedL,  0x7fffffffffffefL,
-      0x80000000000020L,  0x80000000000024L,
-      0x7ffffffffffffL,   0x8000000000001L
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
-      0x7fffffffffffec78L,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
-      0x800000000000040L, 0x800000000000043L,
-      0x80000000000000L,  0x80000000000003L
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
-      0x00L,0xffL
-    ), Arrays.asList(
-      56
-    ));
-
-    // the same with precisionStep=4
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
-      0x0L,0xfL
-    ), Arrays.asList(
-      60
-    ));
-
-    // the same with precisionStep=2
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
-      0x0L,0x3L
-    ), Arrays.asList(
-      62
-    ));
-
-    // the same with precisionStep=1
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
-      0x0L,0x1L
-    ), Arrays.asList(
-      63
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
-      0x800000000000251cL,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-  }
-
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
-    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
-    
-    NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
-      @Override
-      public void addRange(int min, int max, int shift) {
-        assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
-        if (useBitSet) for (int i=min; i<=max; i++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(i-lower) );
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (i == max) break;
-        }
-        if (neededBounds == null)
-          return;
-        // make unsigned ints for easier display and understanding
-        min ^= 0x80000000;
-        max ^= 0x80000000;
-        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
-        assertEquals( "shift", neededShifts.next().intValue(), shift);
-        assertEquals( "inner min bound", neededBounds.next().intValue(), min>>>shift);
-        assertEquals( "inner max bound", neededBounds.next().intValue(), max>>>shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0, upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
-    }
-  }
-  
-  public void testSplitIntRange() throws Exception {
-    // a hard-coded "standard" range
-    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
-      0x7fffec78,0x7fffec7f,
-      0x80002510,0x8000251c,
-      0x7fffec8, 0x7fffecf,
-      0x8000250, 0x8000250,
-      0x7fffed,  0x7fffef,
-      0x800020,  0x800024,
-      0x7ffff,   0x80001
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
-      0x7fffec78,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
-      0x8000040, 0x8000043,
-      0x800000,  0x800003
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
-      0x00,0xff
-    ), Arrays.asList(
-      24
-    ));
-
-    // the same with precisionStep=4
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
-      0x0,0xf
-    ), Arrays.asList(
-      28
-    ));
-
-    // the same with precisionStep=2
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
-      0x0,0x3
-    ), Arrays.asList(
-      30
-    ));
-
-    // the same with precisionStep=1
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
-      0x0,0x1
-    ), Arrays.asList(
-      31
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
-      0x8000251c,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-  }
-
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestAutomaton.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestAutomaton.java
index ba269fc..becb83b 100644
--- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestAutomaton.java
+++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestAutomaton.java
@@ -232,7 +232,7 @@
   }
 
   public void testInterval() throws Exception {
-    Automaton a = Operations.determinize(Automata.makeInterval(17, 100, 3),
+    Automaton a = Operations.determinize(Automata.makeDecimalInterval(17, 100, 3),
       DEFAULT_MAX_DETERMINIZED_STATES);
     assertFalse(Operations.run(a, ""));
     assertTrue(Operations.run(a, "017"));
@@ -431,7 +431,7 @@
   }
 
   public void testOneInterval() throws Exception {
-    Automaton a = Automata.makeInterval(999, 1032, 0);
+    Automaton a = Automata.makeDecimalInterval(999, 1032, 0);
     a = Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES);
     assertTrue(Operations.run(a, "0999"));
     assertTrue(Operations.run(a, "00999"));
@@ -439,7 +439,7 @@
   }
 
   public void testAnotherInterval() throws Exception {
-    Automaton a = Automata.makeInterval(1, 2, 0);
+    Automaton a = Automata.makeDecimalInterval(1, 2, 0);
     a = Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES);
     assertTrue(Operations.run(a, "01"));
   }
@@ -462,7 +462,7 @@
       }
       String prefix = b.toString();
 
-      Automaton a = Operations.determinize(Automata.makeInterval(min, max, digits),
+      Automaton a = Operations.determinize(Automata.makeDecimalInterval(min, max, digits),
         DEFAULT_MAX_DETERMINIZED_STATES);
       if (random().nextBoolean()) {
         a = MinimizationOperations.minimize(a, DEFAULT_MAX_DETERMINIZED_STATES);
@@ -942,7 +942,7 @@
           if (VERBOSE) {
             System.out.println("  op=union interval min=" + min + " max=" + max + " digits=" + digits);
           }
-          a = Operations.union(a, Automata.makeInterval(min, max, digits));
+          a = Operations.union(a, Automata.makeDecimalInterval(min, max, digits));
           StringBuilder b = new StringBuilder();
           for(int i=0;i<digits;i++) {
             b.append('0');
@@ -1104,4 +1104,131 @@
       throw ae;
     }
   }
+
+  public void testMakeBinaryIntervalRandom() throws Exception {
+    int iters = atLeast(100);
+    for(int iter=0;iter<iters;iter++) {
+      BytesRef minTerm = TestUtil.randomBinaryTerm(random());
+      boolean minInclusive = random().nextBoolean();
+      BytesRef maxTerm = TestUtil.randomBinaryTerm(random());
+      boolean maxInclusive = random().nextBoolean();
+
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter + " minTerm=" + minTerm + " minInclusive=" + minInclusive + " maxTerm=" + maxTerm + " maxInclusive=" + maxInclusive);
+      }
+
+      Automaton a = Automata.makeBinaryInterval(minTerm, minInclusive, maxTerm, maxInclusive);
+
+      Automaton minA = MinimizationOperations.minimize(a, Integer.MAX_VALUE);
+      if (minA.getNumStates() != a.getNumStates()) {
+        assertTrue(minA.getNumStates() < a.getNumStates());
+        System.out.println("Original was not minimal:");
+        System.out.println("Original:\n" + a.toDot());
+        System.out.println("Minimized:\n" + minA.toDot());
+        fail("auotmaton was not minimal");
+      }
+
+      if (VERBOSE) {
+        System.out.println(a.toDot());
+      }
+
+      for(int iter2=0;iter2<500;iter2++) {
+        BytesRef term = TestUtil.randomBinaryTerm(random());
+        int minCmp = minTerm.compareTo(term);
+        int maxCmp = maxTerm.compareTo(term);
+
+        boolean expected;
+        if (minCmp > 0 || maxCmp < 0) {
+          expected = false;
+        } else if (minCmp == 0 && maxCmp == 0) {
+          expected = minInclusive && maxInclusive;
+        } else if (minCmp == 0) {
+          expected = minInclusive;
+        } else if (maxCmp == 0) {
+          expected = maxInclusive;
+        } else {
+          expected = true;
+        }
+
+        if (VERBOSE) {
+          System.out.println("  check term=" + term + " expected=" + expected);
+        }
+        IntsRefBuilder intsBuilder = new IntsRefBuilder();
+        Util.toIntsRef(term, intsBuilder);
+        assertEquals(expected, Operations.run(a, intsBuilder.toIntsRef()));
+      }
+    }
+  }
+
+  private static IntsRef intsRef(String s) {
+    IntsRefBuilder intsBuilder = new IntsRefBuilder();
+    Util.toIntsRef(new BytesRef(s), intsBuilder);
+    return intsBuilder.toIntsRef();
+  }
+
+  public void testMakeBinaryIntervalBasic() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(new BytesRef("bar"), true, new BytesRef("foo"), true);
+    assertTrue(Operations.run(a, intsRef("bar")));
+    assertTrue(Operations.run(a, intsRef("foo")));
+    assertTrue(Operations.run(a, intsRef("beep")));
+    assertFalse(Operations.run(a, intsRef("baq")));
+    assertTrue(Operations.run(a, intsRef("bara")));
+  }
+
+  public void testMakeBinaryIntervalEqual() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(new BytesRef("bar"), true, new BytesRef("bar"), true);
+    assertTrue(Operations.run(a, intsRef("bar")));
+    assertTrue(Operations.isFinite(a));
+    assertEquals(1, Operations.getFiniteStrings(a, 10).size());
+  }
+
+  public void testMakeBinaryIntervalCommonPrefix() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(new BytesRef("bar"), true, new BytesRef("barfoo"), true);
+    assertFalse(Operations.run(a, intsRef("bam")));
+    assertTrue(Operations.run(a, intsRef("bar")));
+    assertTrue(Operations.run(a, intsRef("bara")));
+    assertTrue(Operations.run(a, intsRef("barf")));
+    assertTrue(Operations.run(a, intsRef("barfo")));
+    assertTrue(Operations.run(a, intsRef("barfoo")));
+    assertTrue(Operations.run(a, intsRef("barfonz")));
+    assertFalse(Operations.run(a, intsRef("barfop")));
+    assertFalse(Operations.run(a, intsRef("barfoop")));
+  }
+
+  public void testMakeBinaryIntervalOpenMax() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(new BytesRef("bar"), true, null, true);
+    assertFalse(Operations.run(a, intsRef("bam")));
+    assertTrue(Operations.run(a, intsRef("bar")));
+    assertTrue(Operations.run(a, intsRef("bara")));
+    assertTrue(Operations.run(a, intsRef("barf")));
+    assertTrue(Operations.run(a, intsRef("barfo")));
+    assertTrue(Operations.run(a, intsRef("barfoo")));
+    assertTrue(Operations.run(a, intsRef("barfonz")));
+    assertTrue(Operations.run(a, intsRef("barfop")));
+    assertTrue(Operations.run(a, intsRef("barfoop")));
+    assertTrue(Operations.run(a, intsRef("zzz")));
+  }
+
+  public void testMakeBinaryIntervalOpenMin() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(null, true, new BytesRef("foo"), true);
+    assertFalse(Operations.run(a, intsRef("foz")));
+    assertFalse(Operations.run(a, intsRef("zzz")));
+    assertTrue(Operations.run(a, intsRef("foo")));
+    assertTrue(Operations.run(a, intsRef("")));
+    assertTrue(Operations.run(a, intsRef("a")));
+    assertTrue(Operations.run(a, intsRef("aaa")));
+    assertTrue(Operations.run(a, intsRef("bz")));
+  }
+
+  public void testMakeBinaryIntervalOpenBoth() throws Exception {
+    Automaton a = Automata.makeBinaryInterval(null, true, null, true);
+    System.out.println(a.toDot());
+    assertTrue(Operations.run(a, intsRef("foz")));
+    assertTrue(Operations.run(a, intsRef("zzz")));
+    assertTrue(Operations.run(a, intsRef("foo")));
+    assertTrue(Operations.run(a, intsRef("")));
+    assertTrue(Operations.run(a, intsRef("a")));
+    assertTrue(Operations.run(a, intsRef("aaa")));
+    assertTrue(Operations.run(a, intsRef("bz")));
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index 37825e8..ab8fb97 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -41,7 +41,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -64,12 +63,12 @@
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.IntsRefBuilder;
 import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.apache.lucene.util.automaton.RegExp;
 import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
 import org.apache.lucene.util.fst.FST.Arc;
@@ -305,7 +304,6 @@
   // file, up until a time limit
   public void testRealTerms() throws Exception {
 
-    final LineFileDocs docs = new LineFileDocs(random(), true);
     final int RUN_TIME_MSEC = atLeast(500);
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
@@ -314,6 +312,7 @@
     final Path tempDir = createTempDir("fstlines");
     final Directory dir = newFSDirectory(tempDir);
     final IndexWriter writer = new IndexWriter(dir, conf);
+    final LineFileDocs docs = new LineFileDocs(writer, random());
     final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
     Document doc;
     int docCount = 0;
@@ -853,10 +852,6 @@
       }
       RandomIndexWriter w = new RandomIndexWriter(random(), dir,
                                                   newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
-      Document doc = new Document();
-      Field idField = newStringField("id", "", Field.Store.NO);
-      doc.add(idField);
-
       final int NUM_IDS = atLeast(200);
       //final int NUM_IDS = (int) (377 * (1.0+random.nextDouble()));
       if (VERBOSE) {
@@ -878,7 +873,8 @@
           }
         }
         allIDs.add(idString);
-        idField.setStringValue(idString);
+        Document doc = w.newDocument();
+        doc.addAtom("id", idString);
         w.addDocument(doc);
       }
 
@@ -983,10 +979,6 @@
 
     RandomIndexWriter w = new RandomIndexWriter(random(), dir,
                                                 newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
-    Document doc = new Document();
-    Field f = newStringField("field", "", Field.Store.NO);
-    doc.add(f);
-
     final int NUM_TERMS = (int) (1000*RANDOM_MULTIPLIER * (1+random().nextDouble()));
     if (VERBOSE) {
       System.out.println("TEST: NUM_TERMS=" + NUM_TERMS);
@@ -998,7 +990,8 @@
     }
 
     for(String term : allTerms) {
-      f.setStringValue(term);
+      Document doc = w.newDocument();
+      doc.addAtom("field", term);
       w.addDocument(doc);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
index 3680e56..f3d0e04 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailIfUnreferencedFiles.java
@@ -19,7 +19,8 @@
 
 import java.util.Collections;
 
-import org.apache.lucene.document.Document;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.IOContext;
@@ -30,7 +31,6 @@
 import org.junit.runner.JUnitCore;
 import org.junit.runner.Result;
 import org.junit.runner.notification.Failure;
-import com.carrotsearch.randomizedtesting.RandomizedTest;
 
 // LUCENE-4456: Test that we fail if there are unreferenced files
 public class TestFailIfUnreferencedFiles extends WithNestedTests {
@@ -43,7 +43,7 @@
       MockDirectoryWrapper dir = newMockDirectory();
       dir.setAssertNoUnrefencedFilesOnClose(true);
       IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-      iw.addDocument(new Document());
+      iw.addDocument(iw.newDocument());
       iw.close();
       IndexOutput output = dir.createOutput("_hello.world", IOContext.DEFAULT);
       output.writeString("i am unreferenced!");
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java
index 4823462..7090d90 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java
@@ -39,6 +39,7 @@
   }
   
   public static class Nested1 extends WithNestedTests.AbstractNestedTest {
+
     static Path file;
     public void testDummy() {
       file = createTempDir("leftover");
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java
index 807ec33..61ce4d3 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java
@@ -65,7 +65,7 @@
     Assert.assertEquals(1, result.getFailureCount());
     Failure failure = result.getFailures().get(0);
     Assert.assertTrue(failure.getMessage()
-        .contains("One of the overrides of setUp does not propagate the call."));
+                      .contains("One of the overrides of setUp does not propagate the call."));
   }
   
   /**
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/IndexFiles.java b/lucene/demo/src/java/org/apache/lucene/demo/IndexFiles.java
index 8c324b7..1d56193 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/IndexFiles.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/IndexFiles.java
@@ -17,20 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
@@ -44,6 +30,16 @@
 import java.nio.file.attribute.BasicFileAttributes;
 import java.util.Date;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+
 /** Index all text files under a directory.
  * <p>
  * This is a command-line application demonstrating simple Lucene indexing.
@@ -168,14 +164,13 @@
   static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
     try (InputStream stream = Files.newInputStream(file)) {
       // make a new, empty document
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       
       // Add the path of the file as a field named "path".  Use a
       // field that is indexed (i.e. searchable), but don't tokenize 
       // the field into separate words and don't index term frequency
       // or positional information:
-      Field pathField = new StringField("path", file.toString(), Field.Store.YES);
-      doc.add(pathField);
+      doc.addAtom("path", file.toString());
       
       // Add the last modified date of the file a field named "modified".
       // Use a LongField that is indexed (i.e. efficiently filterable with
@@ -184,13 +179,13 @@
       // year/month/day/hour/minutes/seconds, down the resolution you require.
       // For example the long value 2011021714 would mean
       // February 17, 2011, 2-3 PM.
-      doc.add(new LongField("modified", lastModified, Field.Store.NO));
+      doc.addLong("modified", lastModified);
       
       // Add the contents of the file to a field named "contents".  Specify a Reader,
       // so that the text of the file is tokenized and indexed, but not stored.
       // Note that FileReader expects the file to be in UTF-8 encoding.
       // If that's not the case searching for special characters will fail.
-      doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
+      doc.addLargeText("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)));
       
       if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
         // New index, so we just add the document (no old document can be there):
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java b/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java
index daade5c..7044908 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/SearchFiles.java
@@ -27,9 +27,9 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -178,13 +178,13 @@
           continue;
         }
 
-        StoredDocument doc = searcher.doc(hits[i].doc);
-        String path = doc.get("path");
+        Document doc = searcher.doc(hits[i].doc);
+        String path = doc.getString("path");
         if (path != null) {
           System.out.println((i+1) + ". " + path);
-          String title = doc.get("title");
+          String title = doc.getString("title");
           if (title != null) {
-            System.out.println("   Title: " + doc.get("title"));
+            System.out.println("   Title: " + doc.getString("title"));
           }
         } else {
           System.out.println((i+1) + ". " + "No path for this document");
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java
index 66ede10..b53d94e 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java
@@ -37,8 +37,8 @@
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.store.Directory;
@@ -68,14 +68,14 @@
     // Writes facet ords to a separate directory from the main index
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
 
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     // 3 occurrences for tag 'lucene'
     doc.add(new IntAssociationFacetField(3, "tags", "lucene"));
     // 87% confidence level of genre 'computing'
     doc.add(new FloatAssociationFacetField(0.87f, "genre", "computing"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     // 1 occurrence for tag 'lucene'
     doc.add(new IntAssociationFacetField(1, "tags", "lucene"));
     // 2 occurrence for tag 'solr'
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
index 34fc31b..01a92cad 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
@@ -23,9 +23,7 @@
 
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.SimpleBindings;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
@@ -40,15 +38,14 @@
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.queries.BooleanFilter;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeFilter;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
@@ -93,25 +90,19 @@
 
     // Add documents with latitude/longitude location:
     // we index these both as DoubleFields (for bounding box/ranges) and as NumericDocValuesFields (for scoring)
-    Document doc = new Document();
-    doc.add(new DoubleField("latitude", 40.759011, Field.Store.NO));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
-    doc.add(new DoubleField("longitude", -73.9844722, Field.Store.NO));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
+    Document doc = writer.newDocument();
+    doc.addDouble("latitude", 40.759011);
+    doc.addDouble("longitude", -73.9844722);
     writer.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new DoubleField("latitude", 40.718266, Field.Store.NO));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
-    doc.add(new DoubleField("longitude", -74.007819, Field.Store.NO));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
+    doc = writer.newDocument();
+    doc.addDouble("latitude", 40.718266);
+    doc.addDouble("longitude", -74.007819);
     writer.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new DoubleField("latitude", 40.7051157, Field.Store.NO));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
-    doc.add(new DoubleField("longitude", -74.0088305, Field.Store.NO));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
+    doc = writer.newDocument();
+    doc.addDouble("latitude", 40.7051157);
+    doc.addDouble("longitude", -74.0088305);
     writer.addDocument(doc);
 
     // Open near-real-time searcher
@@ -139,7 +130,7 @@
    *  maximum great circle (surface of the earth) distance,
    *  returns a simple Filter bounding box to "fast match"
    *  candidates. */
-  public static Filter getBoundingBoxFilter(double originLat, double originLng, double maxDistanceKM) {
+  public static Filter getBoundingBoxFilter(FieldTypes fieldTypes, double originLat, double originLng, double maxDistanceKM) {
 
     // Basic bounding box geo math from
     // http://JanMatuschek.de/LatitudeLongitudeBoundingCoordinates,
@@ -182,7 +173,7 @@
     BooleanFilter f = new BooleanFilter();
 
     // Add latitude range filter:
-    f.add(NumericRangeFilter.newDoubleRange("latitude", Math.toDegrees(minLat), Math.toDegrees(maxLat), true, true),
+    f.add(fieldTypes.newDoubleRangeFilter("latitude", Math.toDegrees(minLat), true, Math.toDegrees(maxLat), true),
           BooleanClause.Occur.MUST);
 
     // Add longitude range filter:
@@ -190,13 +181,13 @@
       // The bounding box crosses the international date
       // line:
       BooleanFilter lonF = new BooleanFilter();
-      lonF.add(NumericRangeFilter.newDoubleRange("longitude", Math.toDegrees(minLng), null, true, true),
+      lonF.add(fieldTypes.newDoubleRangeFilter("longitude", Math.toDegrees(minLng), true, null, true),
                BooleanClause.Occur.SHOULD);
-      lonF.add(NumericRangeFilter.newDoubleRange("longitude", null, Math.toDegrees(maxLng), true, true),
+      lonF.add(fieldTypes.newDoubleRangeFilter("longitude", null, true, Math.toDegrees(maxLng), true),
                BooleanClause.Occur.SHOULD);
       f.add(lonF, BooleanClause.Occur.MUST);
     } else {
-      f.add(NumericRangeFilter.newDoubleRange("longitude", Math.toDegrees(minLng), Math.toDegrees(maxLng), true, true),
+      f.add(fieldTypes.newDoubleRangeFilter("longitude", Math.toDegrees(minLng), true, Math.toDegrees(maxLng), true),
             BooleanClause.Occur.MUST);
     }
 
@@ -211,7 +202,7 @@
     searcher.search(new MatchAllDocsQuery(), fc);
 
     Facets facets = new DoubleRangeFacetCounts("field", getDistanceValueSource(), fc,
-                                               getBoundingBoxFilter(ORIGIN_LATITUDE, ORIGIN_LONGITUDE, 10.0),
+                                               getBoundingBoxFilter(searcher.getFieldTypes(), ORIGIN_LATITUDE, ORIGIN_LONGITUDE, 10.0),
                                                ONE_KM,
                                                TWO_KM,
                                                FIVE_KM,
@@ -227,7 +218,7 @@
     // documents ("browse only"):
     DrillDownQuery q = new DrillDownQuery(null);
     final ValueSource vs = getDistanceValueSource();
-    q.add("field", range.getFilter(getBoundingBoxFilter(ORIGIN_LATITUDE, ORIGIN_LONGITUDE, range.max), vs));
+    q.add("field", range.getFilter(getBoundingBoxFilter(searcher.getFieldTypes(), ORIGIN_LATITUDE, ORIGIN_LONGITUDE, range.max), vs));
     DrillSideways ds = new DrillSideways(searcher, config, (TaxonomyReader) null) {
         @Override
         protected Facets buildFacetsResult(FacetsCollector drillDowns, FacetsCollector[] drillSideways, String[] drillSidewaysDims) throws IOException {        
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
index b5f1657..b3860ba 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
@@ -5,9 +5,6 @@
 
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.SimpleBindings;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
@@ -22,8 +19,8 @@
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.SortField;
@@ -65,15 +62,15 @@
     // Writes facet ords to a separate directory from the main index
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
 
-    Document doc = new Document();
-    doc.add(new TextField("c", "foo bar", Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 5L));
+    Document doc = indexWriter.newDocument();
+    doc.addLargeText("c", "foo bar");
+    doc.addLong("popularity", 5L);
     doc.add(new FacetField("A", "B"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
-    doc.add(new TextField("c", "foo foo bar", Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 3L));
+    doc = indexWriter.newDocument();
+    doc.addLargeText("c", "foo foo bar");
+    doc.addLong("popularity", 3L);
     doc.add(new FacetField("A", "C"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
     
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java
index 7f8c0c6..4f40007 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java
@@ -34,8 +34,8 @@
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.store.Directory;
@@ -63,27 +63,27 @@
     // Writes facet ords to a separate directory from the main index
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
 
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Bob"));
     doc.add(new FacetField("Publish Date", "2010", "10", "15"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2010", "10", "20"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2012", "1", "1"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Susan"));
     doc.add(new FacetField("Publish Date", "2012", "1", "7"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Frank"));
     doc.add(new FacetField("Publish Date", "1999", "5", "5"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/RangeFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/RangeFacetsExample.java
index 2f9180c..5e4f4da 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/RangeFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/RangeFacetsExample.java
@@ -22,9 +22,7 @@
 
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.Facets;
@@ -34,11 +32,11 @@
 import org.apache.lucene.facet.range.LongRangeFacetCounts;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
@@ -48,6 +46,7 @@
 
   private final Directory indexDir = new RAMDirectory();
   private IndexSearcher searcher;
+  private FieldTypes fieldTypes;
   private final long nowSec = System.currentTimeMillis();
 
   final LongRange PAST_HOUR = new LongRange("Past hour", nowSec-3600, true, nowSec, true);
@@ -65,17 +64,16 @@
     // Add documents with a fake timestamp, 1000 sec before
     // "now", 2000 sec before "now", ...:
     for(int i=0;i<100;i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       long then = nowSec - i * 1000;
-      // Add as doc values field, so we can compute range facets:
-      doc.add(new NumericDocValuesField("timestamp", then));
-      // Add as numeric field so we can drill-down:
-      doc.add(new LongField("timestamp", then, Field.Store.NO));
+      // Add as numeric field, so we can compute range facets and drill down:
+      doc.addLong("timestamp", then);
       indexWriter.addDocument(doc);
     }
 
     // Open near-real-time searcher
     searcher = new IndexSearcher(DirectoryReader.open(indexWriter, true));
+    fieldTypes = searcher.getFieldTypes();
     indexWriter.close();
   }
 
@@ -108,7 +106,7 @@
     // documents ("browse only"):
     DrillDownQuery q = new DrillDownQuery(getConfig());
 
-    q.add("timestamp", NumericRangeQuery.newLongRange("timestamp", range.min, range.max, range.minInclusive, range.maxInclusive));
+    q.add("timestamp", new ConstantScoreQuery(fieldTypes.newLongRangeFilter("timestamp", range.min, range.minInclusive, range.max, range.maxInclusive)));
 
     return searcher.search(q, 10);
   }
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java
index 7849033..a16410c 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java
@@ -37,8 +37,8 @@
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.store.Directory;
@@ -64,27 +64,27 @@
     // Writes facet ords to a separate directory from the main index
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
 
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Bob"));
     doc.add(new FacetField("Publish Date", "2010", "10", "15"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2010", "10", "20"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2012", "1", "1"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Susan"));
     doc.add(new FacetField("Publish Date", "2012", "1", "7"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new FacetField("Author", "Frank"));
     doc.add(new FacetField("Publish Date", "1999", "5", "5"));
     indexWriter.addDocument(config.build(taxoWriter, doc));
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java
index 0c301e5..0ac9640 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java
@@ -34,8 +34,8 @@
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.store.Directory;
@@ -58,27 +58,27 @@
   private void index() throws IOException {
     IndexWriter indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(
         new WhitespaceAnalyzer()).setOpenMode(OpenMode.CREATE));
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     doc.add(new SortedSetDocValuesFacetField("Author", "Bob"));
     doc.add(new SortedSetDocValuesFacetField("Publish Year", "2010"));
     indexWriter.addDocument(config.build(doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new SortedSetDocValuesFacetField("Author", "Lisa"));
     doc.add(new SortedSetDocValuesFacetField("Publish Year", "2010"));
     indexWriter.addDocument(config.build(doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new SortedSetDocValuesFacetField("Author", "Lisa"));
     doc.add(new SortedSetDocValuesFacetField("Publish Year", "2012"));
     indexWriter.addDocument(config.build(doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new SortedSetDocValuesFacetField("Author", "Susan"));
     doc.add(new SortedSetDocValuesFacetField("Publish Year", "2012"));
     indexWriter.addDocument(config.build(doc));
 
-    doc = new Document();
+    doc = indexWriter.newDocument();
     doc.add(new SortedSetDocValuesFacetField("Author", "Frank"));
     doc.add(new SortedSetDocValuesFacetField("Publish Year", "1999"));
     indexWriter.addDocument(config.build(doc));
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java b/lucene/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java
index a1f3c6d..ad1c667 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java
@@ -25,7 +25,6 @@
 import java.util.Enumeration;
 import java.util.Properties;
 import java.util.StringTokenizer;
-
 import javax.servlet.RequestDispatcher;
 import javax.servlet.ServletConfig;
 import javax.servlet.ServletException;
@@ -36,14 +35,11 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.queryparser.xml.CorePlusExtensionsParser;
 import org.apache.lucene.queryparser.xml.QueryTemplateManager;
 import org.apache.lucene.search.IndexSearcher;
@@ -84,7 +80,7 @@
           getServletContext().getResourceAsStream("/WEB-INF/" + xslFile));
 
       //initialize an XML Query Parser for use by all threads
-      xmlParser = new CorePlusExtensionsParser(defaultStandardQueryParserField, analyzer);
+      xmlParser = new CorePlusExtensionsParser(searcher.getFieldTypes(), defaultStandardQueryParserField, analyzer);
     } catch (Exception e) {
       throw new ServletException("Error loading query template", e);
     }
@@ -108,7 +104,7 @@
       org.w3c.dom.Document xmlQuery = queryTemplateManager.getQueryAsDOM(completedFormFields);
 
       //Parse the XML to produce a Lucene query
-      Query query = xmlParser.getQuery(xmlQuery.getDocumentElement());
+      Query query = xmlParser.getQuery(searcher.getFieldTypes(), xmlQuery.getDocumentElement());
 
       //Run the query
       TopDocs topDocs = searcher.search(query, 10);
@@ -116,7 +112,7 @@
       //and package the results and forward to JSP
       if (topDocs != null) {
         ScoreDoc[] sd = topDocs.scoreDocs;
-        StoredDocument[] results = new StoredDocument[sd.length];
+        Document[] results = new Document[sd.length];
         for (int i = 0; i < results.length; i++) {
           results[i] = searcher.doc(sd[i].doc);
           request.setAttribute("results", results);
@@ -138,18 +134,20 @@
     InputStream dataIn = getServletContext().getResourceAsStream("/WEB-INF/data.tsv");
     BufferedReader br = new BufferedReader(new InputStreamReader(dataIn, StandardCharsets.UTF_8));
     String line = br.readLine();
-    final FieldType textNoNorms = new FieldType(TextField.TYPE_STORED);
-    textNoNorms.setOmitNorms(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for (String fieldName : new String[] {"location", "salary", "type", "description"}) {
+      fieldTypes.disableNorms(fieldName);
+    }
     while (line != null) {
       line = line.trim();
       if (line.length() > 0) {
         //parse row and create a document
         StringTokenizer st = new StringTokenizer(line, "\t");
-        Document doc = new Document();
-        doc.add(new Field("location", st.nextToken(), textNoNorms));
-        doc.add(new Field("salary", st.nextToken(), textNoNorms));
-        doc.add(new Field("type", st.nextToken(), textNoNorms));
-        doc.add(new Field("description", st.nextToken(), textNoNorms));
+        Document doc = writer.newDocument();
+        doc.addLargeText("location", st.nextToken());
+        doc.addLargeText("salary", st.nextToken());
+        doc.addLargeText("type", st.nextToken());
+        doc.addLargeText("description", st.nextToken());
         writer.addDocument(doc);
       }
       line = br.readLine();
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
index 7502f3a..56ddae4 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
@@ -1,8 +1,6 @@
 package org.apache.lucene.expressions;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
 import org.apache.lucene.expressions.js.VariableContext;
 import org.apache.lucene.index.DirectoryReader;
@@ -23,9 +21,9 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
+import static org.apache.lucene.expressions.js.VariableContext.Type.INT_INDEX;
 import static org.apache.lucene.expressions.js.VariableContext.Type.MEMBER;
 import static org.apache.lucene.expressions.js.VariableContext.Type.STR_INDEX;
-import static org.apache.lucene.expressions.js.VariableContext.Type.INT_INDEX;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -56,28 +54,28 @@
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 5));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
+    Document doc = iw.newDocument();
+    doc.addUniqueAtom("id", "1");
+    doc.addLargeText("body", "some contents and more contents");
+    doc.addInt("popularity", 5);
+    doc.addDouble("latitude", 40.759011);
+    doc.addDouble("longitude", -73.9844722);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 20));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "2");
+    doc.addLargeText("body", "another document with different contents");
+    doc.addInt("popularity", 20);
+    doc.addDouble("latitude", 40.718266);
+    doc.addDouble("longitude", -74.007819);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    doc.add(newTextField("body", "crappy contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 2));
-    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
-    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "3");
+    doc.addLargeText("body", "crappy contents");
+    doc.addInt("popularity", 2);
+    doc.addDouble("latitude", 40.7051157);
+    doc.addDouble("longitude", -74.0088305);
     iw.addDocument(doc);
     
     reader = iw.getReader();
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java
index 828f60f..0204daa 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionRescorer.java
@@ -18,8 +18,6 @@
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
@@ -45,22 +43,22 @@
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 5));
+    Document doc = iw.newDocument();
+    doc.addUniqueAtom("id", "1");
+    doc.addLargeText("body", "some contents and more contents");
+    doc.addInt("popularity", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 20));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "2");
+    doc.addLargeText("body", "another document with different contents");
+    doc.addInt("popularity", 20);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    doc.add(newTextField("body", "crappy contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 2));
+    doc = iw.newDocument();
+    doc.addUniqueAtom("id", "3");
+    doc.addLargeText("body", "crappy contents");
+    doc.addInt("popularity", 2);
     iw.addDocument(doc);
     
     reader = iw.getReader();
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
index fc95924..67e5360 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
@@ -21,10 +21,6 @@
 import java.util.Collections;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -48,7 +44,7 @@
 
 /**
  * Tests some basic expressions against different queries,
- * and fieldcache/docvalues fields against an equivalent sort.
+ * and docvalues fields against an equivalent sort.
  */
 public class TestExpressionSorts extends LuceneTestCase {
   private Directory dir;
@@ -62,13 +58,13 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     int numDocs = TestUtil.nextInt(random(), 2049, 4000);
     for (int i = 0; i < numDocs; i++) {
-      Document document = new Document();
-      document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
-      document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
-      document.add(new NumericDocValuesField("int", random().nextInt()));
-      document.add(new NumericDocValuesField("long", random().nextLong()));
-      document.add(new FloatDocValuesField("float", random().nextFloat()));
-      document.add(new DoubleDocValuesField("double", random().nextDouble()));
+      Document document = iw.newDocument();
+      document.addLargeText("english", English.intToEnglish(i));
+      document.addLargeText("oddeven", (i % 2 == 0) ? "even" : "odd");
+      document.addInt("int", random().nextInt());
+      document.addLong("long", random().nextLong());
+      document.addFloat("float", random().nextFloat());
+      document.addDouble("double", random().nextDouble());
       iw.addDocument(document);
     }
     reader = iw.getReader();
@@ -106,10 +102,6 @@
     for (int i = 0; i < 10; i++) {
       boolean reversed = random().nextBoolean();
       SortField fields[] = new SortField[] {
-          new SortField("int", SortField.Type.INT, reversed),
-          new SortField("long", SortField.Type.LONG, reversed),
-          new SortField("float", SortField.Type.FLOAT, reversed),
-          new SortField("double", SortField.Type.DOUBLE, reversed),
           new SortField("score", SortField.Type.SCORE)
       };
       Collections.shuffle(Arrays.asList(fields), random());
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
index debcb81..b62de03 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
@@ -21,12 +21,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
@@ -48,22 +46,22 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    Document doc = new Document();
-    doc.add(newStringField("id", "1", Field.Store.YES));
-    doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 5));
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "1");
+    doc.addLargeText("body", "some contents and more contents");
+    doc.addInt("popularity", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "2", Field.Store.YES));
-    doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 20));
+    doc = iw.newDocument();
+    doc.addAtom("id", "2");
+    doc.addLargeText("body", "another document with different contents");
+    doc.addInt("popularity", 20);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("id", "3", Field.Store.YES));
-    doc.add(newTextField("body", "crappy contents", Field.Store.NO));
-    doc.add(new NumericDocValuesField("popularity", 2));
+    doc = iw.newDocument();
+    doc.addAtom("id", "3");
+    doc.addLargeText("body", "crappy contents");
+    doc.addInt("popularity", 2);
     iw.addDocument(doc);
     iw.forceMerge(1);
     
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java
index 29b4068..3c2da15 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java
@@ -130,7 +130,7 @@
     String indexedField = config.getDimConfig(dim).indexFieldName;
 
     BooleanQuery bq = (BooleanQuery) q.getQuery();
-    bq.add(new TermQuery(term(indexedField, dim, path)), Occur.SHOULD);
+    bq.add(new TermQuery(term(FacetsConfig.drillDownFieldName(indexedField), dim, path)), Occur.SHOULD);
   }
 
   /** Adds one dimension of drill downs; if you pass the same
@@ -146,7 +146,7 @@
     String indexedField = config.getDimConfig(dim).indexFieldName;
 
     BooleanQuery bq = new BooleanQuery(true); // disable coord
-    bq.add(new TermQuery(term(indexedField, dim, path)), Occur.SHOULD);
+    bq.add(new TermQuery(term(FacetsConfig.drillDownFieldName(indexedField), dim, path)), Occur.SHOULD);
 
     add(dim, bq);
   }
@@ -286,7 +286,6 @@
       for(Filter filter : filters) {
         wrapped = new FilteredQuery(wrapped, filter, FilteredQuery.QUERY_FIRST_FILTER_STRATEGY);
       }
-
       return wrapped;
     }
   }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetField.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetField.java
index 2b2a39e..6920b16 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/FacetField.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetField.java
@@ -19,10 +19,13 @@
 
 import java.util.Arrays;
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.util.BytesRef;
 
 /**
  * Add an instance of this to your {@link Document} for every facet label.
@@ -31,11 +34,18 @@
  * <b>NOTE:</b> you must call {@link FacetsConfig#build(Document)} before
  * you add the document to IndexWriter.
  */
-public class FacetField extends Field {
-  static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE.freeze();
+public class FacetField implements IndexableField {
+
+  public static final IndexableFieldType TYPE = new IndexableFieldType() {
+    };
+
+  @Override
+  public String name() {
+    return "dummy";
+  }
+
+  public IndexableFieldType fieldType() {
+    return TYPE;
   }
 
   /** Dimension for this field. */
@@ -47,7 +57,6 @@
   /** Creates the this from {@code dim} and
    *  {@code path}. */
   public FacetField(String dim, String... path) {
-    super("dummy", TYPE);
     verifyLabel(dim);
     for(String label : path) {
       verifyLabel(label);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
index be7d4b2..e17e4f8 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
@@ -27,19 +27,18 @@
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField;
 import org.apache.lucene.facet.taxonomy.AssociationFacetField;
 import org.apache.lucene.facet.taxonomy.FacetLabel;
 import org.apache.lucene.facet.taxonomy.FloatAssociationFacetField;
 import org.apache.lucene.facet.taxonomy.IntAssociationFacetField;
 import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IntsRef;
@@ -209,7 +208,7 @@
 
     Set<String> seenDims = new HashSet<>();
 
-    for (IndexableField field : doc.indexableFields()) {
+    for (IndexableField field : doc) {
       if (field.fieldType() == FacetField.TYPE) {
         FacetField facetField = (FacetField) field;
         FacetsConfig.DimConfig dimConfig = getDimConfig(facetField.dim);
@@ -281,7 +280,7 @@
       }
     }
 
-    Document result = new Document();
+    Document result = new Document(doc.getFieldTypes());
 
     processFacetFields(taxoWriter, byField, result);
     processSSDVFacetFields(dvByField, result);
@@ -289,8 +288,8 @@
 
     //System.out.println("add stored: " + addedStoredFields);
 
-    for (Field field : doc.getFields()) {
-      FieldType ft = field.fieldType();
+    for (IndexableField field : doc.getFields()) {
+      IndexableFieldType ft = field.fieldType();
       if (ft != FacetField.TYPE && ft != SortedSetDocValuesFacetField.TYPE && ft != AssociationFacetField.TYPE) {
         result.add(field);
       }
@@ -304,10 +303,18 @@
 
   private void processFacetFields(TaxonomyWriter taxoWriter, Map<String,List<FacetField>> byField, Document doc) throws IOException {
 
-    for(Map.Entry<String,List<FacetField>> ent : byField.entrySet()) {
+    FieldTypes fieldTypes = doc.getFieldTypes();
 
+    for(Map.Entry<String,List<FacetField>> ent : byField.entrySet()) {
       String indexFieldName = ent.getKey();
+      String drillDownFieldName = drillDownFieldName(indexFieldName);
+
       //System.out.println("  indexFieldName=" + indexFieldName + " fields=" + ent.getValue());
+      fieldTypes.setMultiValued(drillDownFieldName);
+      fieldTypes.disableStored(drillDownFieldName);
+      fieldTypes.disableSorting(drillDownFieldName);
+      fieldTypes.disableSorting(indexFieldName);
+      fieldTypes.disableStored(indexFieldName);
 
       IntsRefBuilder ordinals = new IntsRefBuilder();
       for(FacetField facetField : ent.getValue()) {
@@ -342,21 +349,28 @@
 
         // Drill down:
         for (int i=1;i<=cp.length;i++) {
-          doc.add(new StringField(indexFieldName, pathToString(cp.components, i), Field.Store.NO));
+          doc.addAtom(drillDownFieldName, pathToString(cp.components, i));
         }
       }
 
       // Facet counts:
       // DocValues are considered stored fields:
-      doc.add(new BinaryDocValuesField(indexFieldName, dedupAndEncode(ordinals.get())));
+      doc.addBinary(indexFieldName, dedupAndEncode(ordinals.get()));
     }
   }
 
   private void processSSDVFacetFields(Map<String,List<SortedSetDocValuesFacetField>> byField, Document doc) throws IOException {
     //System.out.println("process SSDV: " + byField);
+    FieldTypes fieldTypes = doc.getFieldTypes();
     for(Map.Entry<String,List<SortedSetDocValuesFacetField>> ent : byField.entrySet()) {
 
       String indexFieldName = ent.getKey();
+      String drillDownFieldName = drillDownFieldName(indexFieldName);
+      fieldTypes.setMultiValued(indexFieldName);
+      fieldTypes.setIndexOptions(indexFieldName, IndexOptions.NONE);
+      fieldTypes.disableSorting(drillDownFieldName);
+      fieldTypes.setMultiValued(drillDownFieldName);
+      
       //System.out.println("  field=" + indexFieldName);
 
       for(SortedSetDocValuesFacetField facetField : ent.getValue()) {
@@ -365,22 +379,37 @@
         //System.out.println("add " + fullPath);
 
         // For facet counts:
-        doc.add(new SortedSetDocValuesField(indexFieldName, new BytesRef(fullPath)));
+        doc.addAtom(indexFieldName, fullPath);
 
-        // For drill-down:
-        doc.add(new StringField(indexFieldName, fullPath, Field.Store.NO));
-        doc.add(new StringField(indexFieldName, facetField.dim, Field.Store.NO));
+        // For drill down:
+        doc.addAtom(drillDownFieldName, fullPath);
       }
     }
   }
 
+  static String drillDownFieldName(String fieldName) {
+    return fieldName + ".drilldown";
+  }
+
   private void processAssocFacetFields(TaxonomyWriter taxoWriter,
       Map<String,List<AssociationFacetField>> byField, Document doc)
       throws IOException {
+
+    FieldTypes fieldTypes = doc.getFieldTypes();
+
     for (Map.Entry<String,List<AssociationFacetField>> ent : byField.entrySet()) {
       byte[] bytes = new byte[16];
       int upto = 0;
       String indexFieldName = ent.getKey();
+      String drillDownFieldName = drillDownFieldName(indexFieldName);
+      fieldTypes.setMultiValued(drillDownFieldName);
+      fieldTypes.disableSorting(drillDownFieldName);
+      fieldTypes.disableStored(drillDownFieldName);
+      fieldTypes.setDocValuesType(drillDownFieldName, DocValuesType.NONE);
+
+      fieldTypes.disableSorting(indexFieldName);
+      fieldTypes.disableStored(indexFieldName);
+
       for(AssociationFacetField field : ent.getValue()) {
         // NOTE: we don't add parents for associations
         checkTaxoWriter(taxoWriter);
@@ -402,10 +431,11 @@
         
         // Drill down:
         for (int i = 1; i <= label.length; i++) {
-          doc.add(new StringField(indexFieldName, pathToString(label.components, i), Field.Store.NO));
+          doc.addAtom(drillDownFieldName, pathToString(label.components, i));
         }
       }
-      doc.add(new BinaryDocValuesField(indexFieldName, new BytesRef(bytes, 0, upto)));
+
+      doc.addBinary(indexFieldName, new BytesRef(bytes, 0, upto));
     }
   }
 
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/RandomSamplingFacetsCollector.java b/lucene/facet/src/java/org/apache/lucene/facet/RandomSamplingFacetsCollector.java
index a7e3519..ea787d9 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/RandomSamplingFacetsCollector.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/RandomSamplingFacetsCollector.java
@@ -242,7 +242,7 @@
     for (int i = 0; i < res.labelValues.length; i++) {
       childPath[res.path.length + 1] = res.labelValues[i].label;
       String fullPath = FacetsConfig.pathToString(childPath, childPath.length);
-      int max = reader.docFreq(new Term(dimConfig.indexFieldName, fullPath));
+      int max = reader.docFreq(new Term(FacetsConfig.drillDownFieldName(dimConfig.indexFieldName), fullPath));
       int correctedCount = (int) (res.labelValues[i].value.doubleValue() / samplingRate);
       correctedCount = Math.min(max, correctedCount);
       fixedLabelValues[i] = new LabelAndValue(res.labelValues[i].label, correctedCount);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
index f6e8e3b..9eb74e0 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.Collections;
 
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
@@ -91,8 +92,8 @@
 
   LongRange toLongRange() {
     return new LongRange(label,
-                         NumericUtils.doubleToSortableLong(minIncl), true,
-                         NumericUtils.doubleToSortableLong(maxIncl), true);
+                         NumericUtils.doubleToLong(minIncl), true,
+                         NumericUtils.doubleToLong(maxIncl), true);
   }
 
   @Override
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
index 6610851..480b35b 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
@@ -21,8 +21,7 @@
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.lucene.document.DoubleDocValuesField; // javadocs
-import org.apache.lucene.document.FloatDocValuesField; // javadocs
+import org.apache.lucene.document.Document;
 import org.apache.lucene.facet.Facets;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsCollector;
@@ -31,9 +30,9 @@
 import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource; // javadocs
 import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.util.Bits;
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.NumericUtils;
 
 /** {@link Facets} implementation that computes counts for
@@ -46,9 +45,9 @@
  *  etc.).
  *
  *  <p> If you had indexed your field using {@link
- *  FloatDocValuesField} then pass {@link FloatFieldSource}
+ *  org.apache.lucene.document.Document#addFloat} then pass {@link FloatFieldSource}
  *  as the {@link ValueSource}; if you used {@link
- *  DoubleDocValuesField} then pass {@link
+ *  org.apache.lucene.document.Document#addDouble} then pass {@link
  *  DoubleFieldSource} (this is the default used when you
  *  pass just a the field name).
  *
@@ -85,8 +84,8 @@
     for(int i=0;i<ranges.length;i++) {
       DoubleRange range = ranges[i];
       longRanges[i] =  new LongRange(range.label,
-                                     NumericUtils.doubleToSortableLong(range.minIncl), true,
-                                     NumericUtils.doubleToSortableLong(range.maxIncl), true);
+                                     NumericUtils.doubleToLong(range.minIncl), true,
+                                     NumericUtils.doubleToLong(range.maxIncl), true);
     }
 
     LongRangeCounter counter = new LongRangeCounter(longRanges);
@@ -121,7 +120,7 @@
         }
         // Skip missing docs:
         if (fv.exists(doc)) {
-          counter.add(NumericUtils.doubleToSortableLong(fv.doubleVal(doc)));
+          counter.add(NumericUtils.doubleToLong(fv.doubleVal(doc)));
         } else {
           missingCount++;
         }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java b/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
index eb51112..b8d2f64 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
@@ -17,12 +17,12 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.DrillDownQuery; // javadocs
 import org.apache.lucene.facet.DrillSideways; // javadocs
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FilteredQuery; // javadocs
-import org.apache.lucene.search.NumericRangeFilter; // javadocs
 
 /** Base class for a single labeled range.
  *
@@ -48,7 +48,7 @@
    *  FilteredQuery#QUERY_FIRST_FILTER_STRATEGY}.  If the
    *  {@link ValueSource} is static, e.g. an indexed numeric
    *  field, then it may be more efficient to use {@link
-   *  NumericRangeFilter}.  The provided fastMatchFilter,
+   *  FieldTypes#newRangeFilter}.  The provided fastMatchFilter,
    *  if non-null, will first be consulted, and only if
    *  that is set for each document will the range then be
    *  checked. */
@@ -61,7 +61,7 @@
    *  {@link FilteredQuery} using its {@link
    *  FilteredQuery#QUERY_FIRST_FILTER_STRATEGY}.  If the
    *  {@link ValueSource} is static, e.g. an indexed numeric
-   *  field, then it may be more efficient to use {@link NumericRangeFilter}. */
+   *  field, then it may be more efficient to use {@link FieldTypes#newRangeFilter}. */
   public Filter getFilter(ValueSource valueSource) {
     return getFilter(null, valueSource);
   }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java
index d24491d..29ecb9e 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java
@@ -17,20 +17,29 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.facet.FacetField;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.util.BytesRef;
 
 /** Add an instance of this to your Document for every facet
  *  label to be indexed via SortedSetDocValues. */
-public class SortedSetDocValuesFacetField extends Field {
+public class SortedSetDocValuesFacetField implements IndexableField {
   
-  /** Indexed {@link FieldType}. */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE.freeze();
+  public static final IndexableFieldType TYPE = new IndexableFieldType() {
+    };
+
+  @Override
+  public String name() {
+    return "dummy";
+  }
+
+  public IndexableFieldType fieldType() {
+    return TYPE;
   }
 
   /** Dimension. */
@@ -41,7 +50,6 @@
 
   /** Sole constructor. */
   public SortedSetDocValuesFacetField(String dim, String label) {
-    super("dummy", TYPE);
     FacetField.verifyLabel(label);
     FacetField.verifyLabel(dim);
     this.dim = dim;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java
index 216595e..cc5b04d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java
@@ -19,12 +19,14 @@
 
 import java.util.Arrays;
 
-import org.apache.lucene.document.Document; // javadocs
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.Facets;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.util.BytesRef;
 
 /** Add an instance of this to your {@link Document} to add
@@ -36,15 +38,20 @@
  *  Facets} implementations.
  * 
  *  @lucene.experimental */
-public class AssociationFacetField extends Field {
+public class AssociationFacetField implements IndexableField {
   
-  /** Indexed {@link FieldType}. */
-  public static final FieldType TYPE = new FieldType();
-  static {
-    TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE.freeze();
+  public static final IndexableFieldType TYPE = new IndexableFieldType() {
+    };
+  
+  @Override
+  public String name() {
+    return "dummy";
   }
-  
+
+  public IndexableFieldType fieldType() {
+    return TYPE;
+  }
+
   /** Dimension for this field. */
   public final String dim;
 
@@ -57,7 +64,6 @@
   /** Creates this from {@code dim} and {@code path} and an
    *  association */
   public AssociationFacetField(BytesRef assoc, String dim, String... path) {
-    super("dummy", TYPE);
     FacetField.verifyLabel(dim);
     for(String label : path) {
       FacetField.verifyLabel(label);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FloatAssociationFacetField.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FloatAssociationFacetField.java
index d89b49e..53d4173 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FloatAssociationFacetField.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FloatAssociationFacetField.java
@@ -19,7 +19,6 @@
 
 import java.util.Arrays;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.util.BytesRef;
 
 /** Add an instance of this to your {@link Document} to add
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/IntAssociationFacetField.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/IntAssociationFacetField.java
index e2f953f..332ddce 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/IntAssociationFacetField.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/IntAssociationFacetField.java
@@ -19,7 +19,6 @@
 
 import java.util.Arrays;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.util.BytesRef;
 
 /** Add an instance of this to your {@link Document} to add
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
index a216bb9..d2b6a2d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
@@ -5,6 +5,7 @@
 import java.util.logging.Level;
 import java.util.logging.Logger;
 
+import org.apache.lucene.document.Document;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.taxonomy.FacetLabel;
 import org.apache.lucene.facet.taxonomy.LRUHashMap;
@@ -15,7 +16,6 @@
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -312,8 +312,8 @@
       }
     }
     
-    StoredDocument doc = indexReader.document(ordinal);
-    FacetLabel ret = new FacetLabel(FacetsConfig.stringToPath(doc.get(Consts.FULL)));
+    Document doc = indexReader.document(ordinal);
+    FacetLabel ret = new FacetLabel(FacetsConfig.stringToPath(doc.getString(Consts.FULL)));
     synchronized (categoryCache) {
       categoryCache.put(catIDInteger, ret);
     }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index 03cc88f..4feb37a 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -15,10 +15,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.taxonomy.FacetLabel;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
@@ -31,8 +28,8 @@
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.LogByteSizeMergePolicy;
@@ -101,8 +98,6 @@
   private long indexEpoch;
 
   private SinglePositionTokenStream parentStream = new SinglePositionTokenStream(Consts.PAYLOAD_PARENT);
-  private Field parentStreamField;
-  private Field fullPathField;
   private int cacheMissesUntilFill = 11;
   private boolean shouldFillCache = true;
   
@@ -189,12 +184,10 @@
     if (openMode == OpenMode.CREATE) {
       ++indexEpoch;
     }
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.disableNorms(Consts.FIELD_PAYLOADS);
+    fieldTypes.disableStored(Consts.FIELD_PAYLOADS);
     
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setOmitNorms(true);
-    parentStreamField = new Field(Consts.FIELD_PAYLOADS, parentStream, ft);
-    fullPathField = new StringField(Consts.FULL, "", Field.Store.YES);
-
     nextID = indexWriter.maxDoc();
 
     if (cache == null) {
@@ -484,12 +477,11 @@
     // we write here (e.g., to write parent+2), and need to do a workaround
     // in the reader (which knows that anyway only category 0 has a parent
     // -1).    
-    parentStream.set(Math.max(parent + 1, 1));
-    Document d = new Document();
-    d.add(parentStreamField);
 
-    fullPathField.setStringValue(FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
-    d.add(fullPathField);
+    Document d = indexWriter.newDocument();
+    parentStream.set(Math.max(parent + 1, 1));
+    d.addLargeText(Consts.FIELD_PAYLOADS, parentStream);
+    d.addAtom(Consts.FULL, FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
 
     // Note that we do no pass an Analyzer here because the fields that are
     // added to the Document are untokenized or contains their own TokenStream.
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillDownQuery.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillDownQuery.java
index 6743f23..be795ec 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillDownQuery.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillDownQuery.java
@@ -23,8 +23,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
@@ -68,6 +67,9 @@
     RandomIndexWriter writer = new RandomIndexWriter(r, dir, 
         newIndexWriterConfig(new MockAnalyzer(r, MockTokenizer.KEYWORD, false)));
     
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("content");
+
     taxoDir = newDirectory();
     TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
     config = new FacetsConfig();
@@ -88,12 +90,12 @@
     config.setRequireDimCount("b", true);
 
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       if (i % 2 == 0) { // 50
-        doc.add(new TextField("content", "foo", Field.Store.NO));
+        doc.addLargeText("content", "foo");
       }
       if (i % 3 == 0) { // 33
-        doc.add(new TextField("content", "bar", Field.Store.NO));
+        doc.addLargeText("content", "bar");
       }
       if (i % 4 == 0) { // 25
         if (r.nextBoolean()) {
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
index e678fbb..1b3a486 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
@@ -29,9 +29,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
 import org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState;
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField;
@@ -39,9 +37,10 @@
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DocIdSet;
@@ -56,9 +55,9 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InPlaceMergeSorter;
@@ -80,27 +79,27 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("Author", "Bob"));
     doc.add(new FacetField("Publish Date", "2010", "10", "15"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2010", "10", "20"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2012", "1", "1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Susan"));
     doc.add(new FacetField("Publish Date", "2012", "1", "7"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Frank"));
     doc.add(new FacetField("Publish Date", "1999", "5", "5"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -257,12 +256,12 @@
     FacetsConfig config = new FacetsConfig();
     config.setHierarchical("Publish Date", true);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("Author", "Bob"));
     doc.add(new FacetField("Publish Date", "2010", "10", "15"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2010", "10", "20"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -270,7 +269,7 @@
     writer.commit();
 
     // 2nd segment has no Author:
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Foobar", "Lisa"));
     doc.add(new FacetField("Publish Date", "2012", "1", "1"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -312,27 +311,27 @@
     FacetsConfig config = new FacetsConfig();
     config.setHierarchical("dim", true);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("dim", "a", "x"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("dim", "a", "y"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("dim", "a", "z"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("dim", "b"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("dim", "c"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("dim", "d"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -480,22 +479,29 @@
     Directory d = newDirectory();
     Directory td = newDirectory();
 
+    boolean doUseDV = random().nextBoolean();
+
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setInfoStream(InfoStream.NO_OUTPUT);
     RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    for(int dim=0;dim<numDims;dim++) {
+      fieldTypes.setMultiValued("dim" + dim);
+      if (doUseDV == false) {
+        fieldTypes.setDocValuesType("dim" + dim, DocValuesType.NONE);
+      }
+    }
+
     DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
     FacetsConfig config = new FacetsConfig();
     for(int i=0;i<numDims;i++) {
       config.setMultiValued("dim"+i, true);
     }
 
-    boolean doUseDV = random().nextBoolean();
-
     for(Doc rawDoc : docs) {
-      Document doc = new Document();
-      doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
-      doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
-      doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", rawDoc.id);
+      doc.addAtom("content", rawDoc.contentToken);
 
       if (VERBOSE) {
         System.out.println("  doc id=" + rawDoc.id + " token=" + rawDoc.contentToken);
@@ -503,30 +509,29 @@
       for(int dim=0;dim<numDims;dim++) {
         int dimValue = rawDoc.dims[dim];
         if (dimValue != -1) {
+          doc.addAtom("dim" + dim, dimValues[dim][dimValue]);
           if (doUseDV) {
             doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue]));
           } else {
             doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue]));
           }
-          doc.add(new StringField("dim" + dim, dimValues[dim][dimValue], Field.Store.YES));
           if (VERBOSE) {
             System.out.println("    dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue]));
           }
         }
         int dimValue2 = rawDoc.dims2[dim];
         if (dimValue2 != -1) {
+          doc.addAtom("dim" + dim, dimValues[dim][dimValue2]);
           if (doUseDV) {
             doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue2]));
           } else {
             doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue2]));
           }
-          doc.add(new StringField("dim" + dim, dimValues[dim][dimValue2], Field.Store.YES));
           if (VERBOSE) {
             System.out.println("      dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue2]));
           }
         }
       }
-
       w.addDocument(config.build(tw, doc));
     }
 
@@ -654,7 +659,7 @@
               final FixedBitSet bits = new FixedBitSet(maxDoc);
               for(int docID=0;docID < maxDoc;docID++) {
                 // Keeps only the even ids:
-                if ((acceptDocs == null || acceptDocs.get(docID)) && (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0) {
+                if ((acceptDocs == null || acceptDocs.get(docID)) && (Integer.parseInt(context.reader().document(docID).getString("id")) & 1) == 0) {
                   bits.set(docID);
                 }
               }
@@ -735,7 +740,7 @@
       TopDocs hits = s.search(baseQuery, numDocs);
       Map<String,Float> scores = new HashMap<>();
       for(ScoreDoc sd : hits.scoreDocs) {
-        scores.put(s.doc(sd.doc).get("id"), sd.score);
+        scores.put(s.doc(sd.doc).getString("id"), sd.score);
       }
       if (VERBOSE) {
         System.out.println("  verify all facets");
@@ -912,7 +917,7 @@
 
     Map<String,Integer> idToDocID = new HashMap<>();
     for(int i=0;i<s.getIndexReader().maxDoc();i++) {
-      idToDocID.put(s.doc(i).get("id"), i);
+      idToDocID.put(s.doc(i).getString("id"), i);
     }
 
     Collections.sort(hits);
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestFacetsConfig.java b/lucene/facet/src/test/org/apache/lucene/facet/TestFacetsConfig.java
index 3c217b4..7973f37 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestFacetsConfig.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestFacetsConfig.java
@@ -62,7 +62,7 @@
     IndexWriter indexWriter = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
     FacetsConfig facetsConfig = new FacetsConfig();
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     doc.add(new FacetField("a", "b"));
     doc = facetsConfig.build(taxoWriter, doc);
     // these two addDocument() used to fail
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java b/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java
index 97528cd..84bceb8 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java
@@ -24,16 +24,14 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
@@ -291,10 +289,10 @@
 
   private void seedIndex(TaxonomyWriter tw, RandomIndexWriter iw, FacetsConfig config) throws IOException {
     for (FacetField ff : CATEGORIES) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       doc.add(ff);
-      doc.add(new TextField("content", "alpha", Field.Store.YES));
+      doc.addLargeText("content", "alpha");
       iw.addDocument(config.build(tw, doc));
     }
   }
-}
\ No newline at end of file
+}
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestRandomSamplingFacetsCollector.java b/lucene/facet/src/test/org/apache/lucene/facet/TestRandomSamplingFacetsCollector.java
index 5be46bb..daa9d38 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestRandomSamplingFacetsCollector.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestRandomSamplingFacetsCollector.java
@@ -4,8 +4,6 @@
 import java.util.Random;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.taxonomy.FastTaxonomyFacetCounts;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
@@ -63,10 +61,11 @@
     final int numCategories = 10;
     int numDocs = atLeast(10000);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("EvenOdd", (i % 2 == 0) ? "even" : "odd", Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("EvenOdd", (i % 2 == 0) ? "even" : "odd");
       doc.add(new FacetField("iMod10", Integer.toString(i % numCategories)));
-      writer.addDocument(config.build(taxoWriter, doc));
+      Document built = config.build(taxoWriter, doc);
+      writer.addDocument(built);
     }
     
     // NRT open
@@ -140,7 +139,7 @@
       LabelAndValue amortized = amortized10Result.labelValues[i];
       LabelAndValue sampled = random10Result.labelValues[i];
       // since numDocs may not divide by 10 exactly, allow for some slack in the amortized count 
-      assertEquals(amortized.value.floatValue(), Math.min(5 * sampled.value.floatValue(), numDocs / 10.f), 1.0);
+      assertEquals(Math.min(5 * sampled.value.floatValue(), numDocs / 10.f), amortized.value.floatValue(), 1.0);
     }
     
     IOUtils.close(searcher.getIndexReader(), taxoReader, dir, taxoDir);
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
index b825f08..7f672f5 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
@@ -23,13 +23,7 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleDocValuesField;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
 import org.apache.lucene.facet.DrillSideways;
@@ -44,10 +38,10 @@
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
@@ -56,14 +50,13 @@
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.CachingWrapperFilter;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FilterCachingPolicy;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeFilter;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.QueryWrapperFilter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BitDocIdSet;
@@ -77,16 +70,15 @@
   public void testBasicLong() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    NumericDocValuesField field = new NumericDocValuesField("field", 0L);
-    doc.add(field);
     for(long l=0;l<100;l++) {
-      field.setLongValue(l);
+      Document doc = w.newDocument();
+      doc.addLong("field", l);
       w.addDocument(doc);
     }
 
     // Also add Long.MAX_VALUE
-    field.setLongValue(Long.MAX_VALUE);
+    Document doc = w.newDocument();
+    doc.addLong("field", Long.MAX_VALUE);
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -143,14 +135,17 @@
 
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    NumericDocValuesField field = new NumericDocValuesField("field", 0L);
-    doc.add(field);
-    field.setLongValue(Long.MIN_VALUE);
+
+    Document doc = w.newDocument();
+    doc.addLong("field", Long.MIN_VALUE);
     w.addDocument(doc);
-    field.setLongValue(0);
+
+    doc = w.newDocument();
+    doc.addLong("field", 0L);
     w.addDocument(doc);
-    field.setLongValue(Long.MAX_VALUE);
+
+    doc = w.newDocument();
+    doc.addLong("field", Long.MAX_VALUE);
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -179,14 +174,13 @@
   public void testOverlappedEndStart() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    NumericDocValuesField field = new NumericDocValuesField("field", 0L);
-    doc.add(field);
     for(long l=0;l<100;l++) {
-      field.setLongValue(l);
+      Document doc = w.newDocument();
+      doc.addLong("field", l);
       w.addDocument(doc);
     }
-    field.setLongValue(Long.MAX_VALUE);
+    Document doc = w.newDocument();
+    doc.addLong("field", Long.MAX_VALUE);
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -221,11 +215,9 @@
     FacetsConfig config = new FacetsConfig();
 
     for (long l = 0; l < 100; l++) {
-      Document doc = new Document();
-      // For computing range facet counts:
-      doc.add(new NumericDocValuesField("field", l));
-      // For drill down by numeric range:
-      doc.add(new LongField("field", l, Field.Store.NO));
+      Document doc = w.newDocument();
+      // For computing range facet counts and drill down by numeric range:
+      doc.addLong("field", l);
 
       if ((l&3) == 0) {
         doc.add(new FacetField("dim", "a"));
@@ -236,6 +228,7 @@
     }
 
     final IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
 
     final TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
 
@@ -301,7 +294,7 @@
 
     // Third search, drill down on "less than or equal to 10":
     ddq = new DrillDownQuery(config);
-    ddq.add("field", NumericRangeQuery.newLongRange("field", 0L, 10L, true, true));
+    ddq.add("field", new ConstantScoreQuery(fieldTypes.newLongRangeFilter("field", 0L, true, 10L, true)));
     dsr = ds.search(null, ddq, 10);
 
     assertEquals(11, dsr.hits.totalHits);
@@ -315,11 +308,9 @@
   public void testBasicDouble() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    DoubleDocValuesField field = new DoubleDocValuesField("field", 0.0);
-    doc.add(field);
     for(long l=0;l<100;l++) {
-      field.setDoubleValue(l);
+      Document doc = w.newDocument();
+      doc.addDouble("field", l);
       w.addDocument(doc);
     }
 
@@ -345,11 +336,9 @@
   public void testBasicFloat() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    FloatDocValuesField field = new FloatDocValuesField("field", 0.0f);
-    doc.add(field);
     for(long l=0;l<100;l++) {
-      field.setFloatValue(l);
+      Document doc = w.newDocument();
+      doc.addFloat("field", l);
       w.addDocument(doc);
     }
 
@@ -385,16 +374,16 @@
     long minValue = Long.MAX_VALUE;
     long maxValue = Long.MIN_VALUE;
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       long v = random().nextLong();
       values[i] = v;
-      doc.add(new NumericDocValuesField("field", v));
-      doc.add(new LongField("field", v, Field.Store.NO));
+      doc.addLong("field", v);
       w.addDocument(doc);
       minValue = Math.min(minValue, v);
       maxValue = Math.max(maxValue, v);
     }
     IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
 
     IndexSearcher s = newSearcher(r);
     FacetsConfig config = new FacetsConfig();
@@ -481,9 +470,9 @@
       Filter fastMatchFilter;
       if (random().nextBoolean()) {
         if (random().nextBoolean()) {
-          fastMatchFilter = NumericRangeFilter.newLongRange("field", minValue, maxValue, true, true);
+          fastMatchFilter = fieldTypes.newLongRangeFilter("field", minValue, true, maxValue, true);
         } else {
-          fastMatchFilter = NumericRangeFilter.newLongRange("field", minAcceptedValue, maxAcceptedValue, true, true);
+          fastMatchFilter = fieldTypes.newLongRangeFilter("field", minAcceptedValue, true, maxAcceptedValue, true);
         }
       } else {
         fastMatchFilter = null;
@@ -506,9 +495,9 @@
         DrillDownQuery ddq = new DrillDownQuery(config);
         if (random().nextBoolean()) {
           if (random().nextBoolean()) {
-            ddq.add("field", NumericRangeFilter.newLongRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", fieldTypes.newLongRangeFilter("field", range.min, range.minInclusive, range.max, range.maxInclusive));
           } else {
-            ddq.add("field", NumericRangeQuery.newLongRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", new ConstantScoreQuery(fieldTypes.newLongRangeFilter("field", range.min, range.minInclusive, range.max, range.maxInclusive)));
           }
         } else {
           ddq.add("field", range.getFilter(fastMatchFilter, vs));
@@ -530,16 +519,16 @@
     float minValue = Float.POSITIVE_INFINITY;
     float maxValue = Float.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       float v = random().nextFloat();
       values[i] = v;
-      doc.add(new FloatDocValuesField("field", v));
-      doc.add(new FloatField("field", v, Field.Store.NO));
+      doc.addFloat("field", v);
       w.addDocument(doc);
       minValue = Math.min(minValue, v);
       maxValue = Math.max(maxValue, v);
     }
     IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
 
     IndexSearcher s = newSearcher(r);
     FacetsConfig config = new FacetsConfig();
@@ -640,9 +629,9 @@
       Filter fastMatchFilter;
       if (random().nextBoolean()) {
         if (random().nextBoolean()) {
-          fastMatchFilter = NumericRangeFilter.newFloatRange("field", minValue, maxValue, true, true);
+          fastMatchFilter = fieldTypes.newFloatRangeFilter("field", minValue, true, maxValue, true);
         } else {
-          fastMatchFilter = NumericRangeFilter.newFloatRange("field", minAcceptedValue, maxAcceptedValue, true, true);
+          fastMatchFilter = fieldTypes.newFloatRangeFilter("field", minAcceptedValue, true, maxAcceptedValue, true);
         }
       } else {
         fastMatchFilter = null;
@@ -665,9 +654,9 @@
         DrillDownQuery ddq = new DrillDownQuery(config);
         if (random().nextBoolean()) {
           if (random().nextBoolean()) {
-            ddq.add("field", NumericRangeFilter.newFloatRange("field", (float) range.min, (float) range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", fieldTypes.newFloatRangeFilter("field", (float) range.min, range.minInclusive, (float) range.max, range.maxInclusive));
           } else {
-            ddq.add("field", NumericRangeQuery.newFloatRange("field", (float) range.min, (float) range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", new ConstantScoreQuery(fieldTypes.newFloatRangeFilter("field", (float) range.min, range.minInclusive, (float) range.max, range.maxInclusive)));
           }
         } else {
           ddq.add("field", range.getFilter(fastMatchFilter, vs));
@@ -689,16 +678,16 @@
     double minValue = Double.POSITIVE_INFINITY;
     double maxValue = Double.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       double v = random().nextDouble();
       values[i] = v;
-      doc.add(new DoubleDocValuesField("field", v));
-      doc.add(new DoubleField("field", v, Field.Store.NO));
+      doc.addDouble("field", v);
       w.addDocument(doc);
       minValue = Math.min(minValue, v);
       maxValue = Math.max(maxValue, v);
     }
     IndexReader r = w.getReader();
+    FieldTypes fieldTypes = r.getFieldTypes();
 
     IndexSearcher s = newSearcher(r);
     FacetsConfig config = new FacetsConfig();
@@ -783,9 +772,9 @@
       Filter fastMatchFilter;
       if (random().nextBoolean()) {
         if (random().nextBoolean()) {
-          fastMatchFilter = NumericRangeFilter.newDoubleRange("field", minValue, maxValue, true, true);
+          fastMatchFilter = fieldTypes.newDoubleRangeFilter("field", minValue, true, maxValue, true);
         } else {
-          fastMatchFilter = NumericRangeFilter.newDoubleRange("field", minAcceptedValue, maxAcceptedValue, true, true);
+          fastMatchFilter = fieldTypes.newDoubleRangeFilter("field", minAcceptedValue, true, maxAcceptedValue, true);
         }
       } else {
         fastMatchFilter = null;
@@ -808,9 +797,9 @@
         DrillDownQuery ddq = new DrillDownQuery(config);
         if (random().nextBoolean()) {
           if (random().nextBoolean()) {
-            ddq.add("field", NumericRangeFilter.newDoubleRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", fieldTypes.newDoubleRangeFilter("field", range.min, range.minInclusive, range.max, range.maxInclusive));
           } else {
-            ddq.add("field", NumericRangeQuery.newDoubleRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
+            ddq.add("field", new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter("field", range.min, range.minInclusive, range.max, range.maxInclusive)));
           }
         } else {
           ddq.add("field", range.getFilter(fastMatchFilter, vs));
@@ -828,16 +817,14 @@
   public void testMissingValues() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    NumericDocValuesField field = new NumericDocValuesField("field", 0L);
-    doc.add(field);
     for(long l=0;l<100;l++) {
+      // Every 5th doc is missing the value:
       if (l % 5 == 0) {
-        // Every 5th doc is missing the value:
-        w.addDocument(new Document());
+        w.addDocument(w.newDocument());
         continue;
       }
-      field.setLongValue(l);
+      Document doc = w.newDocument();
+      doc.addLong("field", l);
       w.addDocument(doc);
     }
 
@@ -865,7 +852,7 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.addDocument(doc);
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
index b9c2973..4c4631e 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
@@ -23,7 +23,7 @@
 import java.util.Map;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;
@@ -55,7 +55,7 @@
     config.setMultiValued("a", true);
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo"));
     doc.add(new SortedSetDocValuesFacetField("a", "bar"));
     doc.add(new SortedSetDocValuesFacetField("a", "zoo"));
@@ -65,7 +65,7 @@
       writer.commit();
     }
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo"));
     writer.addDocument(config.build(doc));
 
@@ -101,21 +101,23 @@
     Directory dir = newDirectory();
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("a");
 
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo"));
     writer.addDocument(config.build(doc));
 
     IndexReader r = writer.getReader();
     SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(r);
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "bar"));
     writer.addDocument(config.build(doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "baz"));
     writer.addDocument(config.build(doc));
 
@@ -143,10 +145,14 @@
     Directory dir = newDirectory();
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("a");
+    fieldTypes.setMultiValued("b");
+    fieldTypes.setMultiValued("c");
 
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo1"));
     writer.addDocument(config.build(doc));
 
@@ -154,7 +160,7 @@
       writer.commit();
     }
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo2"));
     doc.add(new SortedSetDocValuesFacetField("b", "bar1"));
     writer.addDocument(config.build(doc));
@@ -163,7 +169,7 @@
       writer.commit();
     }
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo3"));
     doc.add(new SortedSetDocValuesFacetField("b", "bar2"));
     doc.add(new SortedSetDocValuesFacetField("c", "baz1"));
@@ -197,18 +203,21 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("a");
+
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo1"));
     writer.addDocument(config.build(doc));
     writer.commit();
 
-    doc = new Document();
+    doc = writer.newDocument();
     writer.addDocument(config.build(doc));
     writer.commit();
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo2"));
     writer.addDocument(config.build(doc));
     writer.commit();
@@ -236,15 +245,18 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("a");
+
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo1"));
     writer.addDocument(config.build(doc));
 
     writer.commit();
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new SortedSetDocValuesFacetField("a", "foo2"));
     writer.addDocument(config.build(doc));
 
@@ -272,13 +284,18 @@
     Directory taxoDir = newDirectory();
 
     RandomIndexWriter w = new RandomIndexWriter(random(), indexDir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+
     FacetsConfig config = new FacetsConfig();
     int numDocs = atLeast(1000);
     int numDims = TestUtil.nextInt(random(), 1, 7);
+    for(int dim=0;dim<numDims;dim++) {
+      fieldTypes.setMultiValued("dim" + dim);
+    }
     List<TestDoc> testDocs = getRandomDocs(tokens, numDocs, numDims);
     for(TestDoc testDoc : testDocs) {
-      Document doc = new Document();
-      doc.add(newStringField("content", testDoc.content, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("content", testDoc.content);
       for(int j=0;j<numDims;j++) {
         if (testDoc.dims[j] != null) {
           doc.add(new SortedSetDocValuesFacetField("dim" + j, testDoc.dims[j]));
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCachedOrdinalsReader.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCachedOrdinalsReader.java
index 849eebb..67a3af9 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCachedOrdinalsReader.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCachedOrdinalsReader.java
@@ -25,10 +25,10 @@
 import org.apache.lucene.facet.FacetTestCase;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.IOUtils;
 import org.junit.Test;
@@ -45,10 +45,10 @@
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
     FacetsConfig config = new FacetsConfig();
     
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("A", "1"));
     writer.addDocument(config.build(taxoWriter, doc));
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("A", "2"));
     writer.addDocument(config.build(taxoWriter, doc));
     
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestOrdinalMappingLeafReader.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestOrdinalMappingLeafReader.java
index 0ee930f..0f6ff7f 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestOrdinalMappingLeafReader.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestOrdinalMappingLeafReader.java
@@ -2,8 +2,8 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;
@@ -12,8 +12,8 @@
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.LabelAndValue;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
-import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter.MemoryOrdinalMap;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
@@ -105,7 +105,9 @@
     assertEquals(NUM_DOCS * 2, idResult.value); // each "id" appears twice
     
     BinaryDocValues bdv = MultiDocValues.getBinaryValues(indexReader, "bdv");
+    assertNotNull(bdv);
     BinaryDocValues cbdv = MultiDocValues.getBinaryValues(indexReader, "cbdv");
+    assertNotNull(cbdv);
     for (int i = 0; i < indexReader.maxDoc(); i++) {
       assertEquals(Integer.parseInt(cbdv.get(i).utf8ToString()), Integer.parseInt(bdv.get(i).utf8ToString())*2);
     }
@@ -115,10 +117,13 @@
   private void buildIndexWithFacets(Directory indexDir, Directory taxoDir, boolean asc) throws IOException {
     IndexWriterConfig config = newIndexWriterConfig(null);
     RandomIndexWriter writer = new RandomIndexWriter(random(), indexDir, config);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("bdv");
+    fieldTypes.disableSorting("cbdv");
     
     DirectoryTaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
     for (int i = 1; i <= NUM_DOCS; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       for (int j = i; j <= NUM_DOCS; j++) {
         int facetValue = asc ? j: NUM_DOCS - j;
         doc.add(new FacetField("tag", Integer.toString(facetValue)));
@@ -127,8 +132,8 @@
       doc.add(new FacetField("id", Integer.toString(i)));
       
       // make sure OrdinalMappingLeafReader ignores non-facet BinaryDocValues fields
-      doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
-      doc.add(new BinaryDocValuesField("cbdv", new BytesRef(Integer.toString(i*2))));
+      doc.addBinary("bdv", new BytesRef(Integer.toString(i)));
+      doc.addBinary("cbdv", new BytesRef(Integer.toString(i*2)));
       writer.addDocument(facetConfig.build(taxonomyWriter, doc));
     }
     taxonomyWriter.commit();
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
index eb5e123..9e77401 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
@@ -70,7 +70,7 @@
         Set<String> seen = new HashSet<>();
         List<String> paths = new ArrayList<>();
         while (true) {
-          Document doc = new Document();
+          Document doc = w.newDocument();
           int numPaths = TestUtil.nextInt(random(), 1, 5);
           for(int i=0;i<numPaths;i++) {
             String path;
@@ -271,7 +271,7 @@
     tw2.close();
 
     SearcherTaxonomyManager mgr = new SearcherTaxonomyManager(w, true, null, tw);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     tw.replaceTaxonomy(taxoDir2);
     taxoDir2.close();
 
@@ -307,7 +307,7 @@
       mgr.release(pair);
     }
     
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     tw.replaceTaxonomy(taxoDir2);
     taxoDir2.close();
     w.commit();
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetAssociations.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetAssociations.java
index 334ecdd..8b12afe 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetAssociations.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetAssociations.java
@@ -64,7 +64,7 @@
 
     // index documents, 50% have only 'b' and all have 'a'
     for (int i = 0; i < 110; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       // every 11th document is added empty, this used to cause the association
       // aggregators to go into an infinite loop
       if (i % 11 != 0) {
@@ -167,7 +167,7 @@
     FacetsConfig config = new FacetsConfig();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new IntAssociationFacetField(14, "a", "x"));
     doc.add(new FloatAssociationFacetField(55.0f, "b", "y"));
     try {
@@ -189,7 +189,7 @@
     config.setHierarchical("a", true);
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new IntAssociationFacetField(14, "a", "x"));
     try {
       writer.addDocument(config.build(taxoWriter, doc));
@@ -210,7 +210,7 @@
     config.setRequireDimCount("a", true);
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new IntAssociationFacetField(14, "a", "x"));
     try {
       writer.addDocument(config.build(taxoWriter, doc));
@@ -235,5 +235,4 @@
     assertEquals("Wrong count for category 'a'!", 100, facets.getSpecificValue("int", "a").intValue());
     assertEquals("Wrong count for category 'b'!", 150, facets.getSpecificValue("int", "b").intValue());
   }
-
 }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java
index d04cfb1..666f32d 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java
@@ -28,8 +28,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
@@ -72,27 +70,27 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("Author", "Bob"));
     doc.add(new FacetField("Publish Date", "2010", "10", "15"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2010", "10", "20"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Lisa"));
     doc.add(new FacetField("Publish Date", "2012", "1", "1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Susan"));
     doc.add(new FacetField("Publish Date", "2012", "1", "7"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("Author", "Frank"));
     doc.add(new FacetField("Publish Date", "1999", "5", "5"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -157,7 +155,7 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("a", "foo1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -165,7 +163,7 @@
       writer.commit();
     }
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("a", "foo2"));
     doc.add(new FacetField("b", "bar1"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -174,7 +172,7 @@
       writer.commit();
     }
 
-    doc = new Document();
+    doc = writer.newDocument();
     doc.add(new FacetField("a", "foo3"));
     doc.add(new FacetField("b", "bar2"));
     doc.add(new FacetField("c", "baz1"));
@@ -215,7 +213,7 @@
     config.setIndexFieldName("a", "$facets2");
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     doc.add(new FacetField("a", "foo1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -279,8 +277,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("a", "path"));
     writer.addDocument(config.build(taxoWriter, doc));
     writer.close();
@@ -296,8 +294,8 @@
     config.setMultiValued("a", true);
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("a", "path", "x"));
     doc.add(new FacetField("a", "path", "y"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -342,8 +340,8 @@
     FacetsConfig config = new FacetsConfig();
     config.setMultiValued("dim", true);
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("dim", "test\u001Fone"));
     doc.add(new FacetField("dim", "test\u001Etwo"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -383,8 +381,8 @@
     config.setHierarchical("dim3", true);
     config.setRequireDimCount("dim3", true);
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("dim", "a"));
     doc.add(new FacetField("dim2", "a"));
     doc.add(new FacetField("dim2", "b"));
@@ -432,8 +430,8 @@
     
     int numLabels = TestUtil.nextInt(random(), 40000, 100000);
     
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     for (int i = 0; i < numLabels; i++) {
       doc.add(new FacetField("dim", "" + i));
     }
@@ -477,8 +475,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("a", "path", "other"));
     try {
       config.build(taxoWriter, doc);
@@ -499,8 +497,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
-    doc.add(newTextField("field", "text", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "text");
     doc.add(new FacetField("a", "path"));
     doc.add(new FacetField("a", "path2"));
     try {
@@ -523,8 +521,8 @@
     config.setIndexFieldName("b", "$b");
     
     for(int i = atLeast(30); i > 0; --i) {
-      Document doc = new Document();
-      doc.add(new StringField("f", "v", Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("f", "v");
       doc.add(new FacetField("a", "1"));
       doc.add(new FacetField("b", "1"));
       iw.addDocument(config.build(taxoWriter, doc));
@@ -552,7 +550,7 @@
     IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
     FacetsConfig config = new FacetsConfig();
     for(int i = atLeast(30); i > 0; --i) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       doc.add(new FacetField("a", "1"));
       doc.add(new FacetField("b", "1"));
       iw.addDocument(config.build(taxoWriter, doc));
@@ -581,7 +579,7 @@
     IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     doc.add(new FacetField("a", "1"));
     doc.add(new FacetField("b", "1"));
     iw.addDocument(config.build(taxoWriter, doc));
@@ -610,7 +608,7 @@
     IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
     FacetsConfig config = new FacetsConfig();
     for (int i = 0; i < 10; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       doc.add(new FacetField("a", Integer.toString(i)));
       iw.addDocument(config.build(taxoWriter, doc));
     }
@@ -630,9 +628,9 @@
 
   private void indexTwoDocs(TaxonomyWriter taxoWriter, IndexWriter indexWriter, FacetsConfig config, boolean withContent) throws Exception {
     for (int i = 0; i < 2; i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       if (withContent) {
-        doc.add(new StringField("f", "a", Field.Store.NO));
+        doc.addAtom("f", "a");
       }
       if (config != null) {
         doc.add(new FacetField("A", Integer.toString(i)));
@@ -696,8 +694,8 @@
     int numDims = TestUtil.nextInt(random(), 1, 7);
     List<TestDoc> testDocs = getRandomDocs(tokens, numDocs, numDims);
     for(TestDoc testDoc : testDocs) {
-      Document doc = new Document();
-      doc.add(newStringField("content", testDoc.content, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("content", testDoc.content);
       for(int j=0;j<numDims;j++) {
         if (testDoc.dims[j] != null) {
           doc.add(new FacetField("dim" + j, testDoc.dims[j]));
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java
index 0300bd1..6e3acb8 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java
@@ -28,8 +28,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;
@@ -118,10 +116,10 @@
   }
 
   private static void addField(Document doc) {
-    doc.add(new StringField(A.field(), A.text(), Store.NO));
+    doc.addAtom(A.field(), A.text());
   }
 
-  private static void addFacets(Document doc, FacetsConfig config, boolean updateTermExpectedCounts) 
+  private static void addFacets(Document doc, FacetsConfig config, boolean updateTermExpectedCounts)
       throws IOException {
     List<FacetField> docCategories = randomCategories(random());
     for (FacetField ff : docCategories) {
@@ -155,7 +153,7 @@
   private static void indexDocsNoFacets(IndexWriter indexWriter) throws IOException {
     int numDocs = atLeast(2);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       addField(doc);
       indexWriter.addDocument(doc);
     }
@@ -168,7 +166,7 @@
     int numDocs = atLeast(random, 2);
     FacetsConfig config = getConfig();
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       addFacets(doc, config, false);
       indexWriter.addDocument(config.build(taxoWriter, doc));
     }
@@ -181,7 +179,7 @@
     int numDocs = atLeast(random, 2);
     FacetsConfig config = getConfig();
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       addFacets(doc, config, true);
       addField(doc);
       indexWriter.addDocument(config.build(taxoWriter, doc));
@@ -195,7 +193,7 @@
     int numDocs = atLeast(random, 2);
     FacetsConfig config = getConfig();
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = indexWriter.newDocument();
       boolean hasContent = random.nextBoolean();
       if (hasContent) {
         addField(doc);
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
index 9869446..bbd65d6 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
@@ -25,10 +25,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;
@@ -38,10 +34,10 @@
 import org.apache.lucene.facet.LabelAndValue;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.FunctionQuery;
@@ -78,28 +74,28 @@
 
     // Reused across documents, to add the necessary facet
     // fields:
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("num", 10));
+    Document doc = writer.newDocument();
+    doc.addInt("num", 10);
     doc.add(new FacetField("Author", "Bob"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 20));
+    doc = writer.newDocument();
+    doc.addInt("num", 20);
     doc.add(new FacetField("Author", "Lisa"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 30));
+    doc = writer.newDocument();
+    doc.addInt("num", 30);
     doc.add(new FacetField("Author", "Lisa"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 40));
+    doc = writer.newDocument();
+    doc.addInt("num", 40);
     doc.add(new FacetField("Author", "Susan"));
     writer.addDocument(config.build(taxoWriter, doc));
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 45));
+    doc = writer.newDocument();
+    doc.addInt("num", 45);
     doc.add(new FacetField("Author", "Frank"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -143,8 +139,8 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     FacetsConfig config = new FacetsConfig();
 
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("num", 10));
+    Document doc = writer.newDocument();
+    doc.addInt("num", 10);
     doc.add(new FacetField("a", "foo1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -152,8 +148,8 @@
       writer.commit();
     }
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 20));
+    doc = writer.newDocument();
+    doc.addInt("num", 20);
     doc.add(new FacetField("a", "foo2"));
     doc.add(new FacetField("b", "bar1"));
     writer.addDocument(config.build(taxoWriter, doc));
@@ -162,8 +158,8 @@
       writer.commit();
     }
 
-    doc = new Document();
-    doc.add(new NumericDocValuesField("num", 30));
+    doc = writer.newDocument();
+    doc.addInt("num", 30);
     doc.add(new FacetField("a", "foo3"));
     doc.add(new FacetField("b", "bar2"));
     doc.add(new FacetField("c", "baz1"));
@@ -207,8 +203,8 @@
 
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
 
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("num", 10));
+    Document doc = writer.newDocument();
+    doc.addInt("num", 10);
     doc.add(new FacetField("a", "foo1"));
     writer.addDocument(config.build(taxoWriter, doc));
 
@@ -256,9 +252,9 @@
     FacetsConfig config = new FacetsConfig();
 
     for(int i = atLeast(30); i > 0; --i) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       if (random().nextBoolean()) { // don't match all documents
-        doc.add(new StringField("f", "v", Field.Store.NO));
+        doc.addAtom("f", "v");
       }
       doc.add(new FacetField("dim", "a"));
       iw.addDocument(config.build(taxoWriter, doc));
@@ -290,8 +286,8 @@
     IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
     FacetsConfig config = new FacetsConfig();
     for (int i = 0; i < 4; i++) {
-      Document doc = new Document();
-      doc.add(new NumericDocValuesField("price", (i+1)));
+      Document doc = iw.newDocument();
+      doc.addInt("price", (i+1));
       doc.add(new FacetField("a", Integer.toString(i % 2)));
       iw.addDocument(config.build(taxoWriter, doc));
     }
@@ -317,8 +313,8 @@
 
     FacetsConfig config = new FacetsConfig();
     for (int i = 0; i < 4; i++) {
-      Document doc = new Document();
-      doc.add(new NumericDocValuesField("price", (i+1)));
+      Document doc = iw.newDocument();
+      doc.addInt("price", (i+1));
       doc.add(new FacetField("a", Integer.toString(i % 2)));
       iw.addDocument(config.build(taxoWriter, doc));
     }
@@ -371,8 +367,8 @@
     //config.setRequireDimCount("a", true);
     
     for (int i = 0; i < 4; i++) {
-      Document doc = new Document();
-      doc.add(new NumericDocValuesField("price", (i+1)));
+      Document doc = iw.newDocument();
+      doc.addInt("price", (i+1));
       doc.add(new FacetField("a", Integer.toString(i % 2), "1"));
       iw.addDocument(config.build(taxoWriter, doc));
     }
@@ -401,8 +397,8 @@
     config.setIndexFieldName("b", "$b");
     
     for(int i = atLeast(30); i > 0; --i) {
-      Document doc = new Document();
-      doc.add(new StringField("f", "v", Field.Store.NO));
+      Document doc = iw.newDocument();
+      doc.addAtom("f", "v");
       doc.add(new FacetField("a", "1"));
       doc.add(new FacetField("b", "1"));
       iw.addDocument(config.build(taxoWriter, doc));
@@ -435,10 +431,10 @@
     int numDims = TestUtil.nextInt(random(), 1, 7);
     List<TestDoc> testDocs = getRandomDocs(tokens, numDocs, numDims);
     for(TestDoc testDoc : testDocs) {
-      Document doc = new Document();
-      doc.add(newStringField("content", testDoc.content, Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("content", testDoc.content);
       testDoc.value = random().nextFloat();
-      doc.add(new FloatDocValuesField("value", testDoc.value));
+      doc.addFloat("value", testDoc.value);
       for(int j=0;j<numDims;j++) {
         if (testDoc.dims[j] != null) {
           doc.add(new FacetField("dim" + j, testDoc.dims[j]));
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java
index 679f6a8..586ad75 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java
@@ -10,9 +10,9 @@
 import org.apache.lucene.facet.FacetTestCase;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.taxonomy.FacetLabel;
-import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
 import org.apache.lucene.facet.taxonomy.writercache.Cl2oTaxonomyWriterCache;
 import org.apache.lucene.facet.taxonomy.writercache.LruTaxonomyWriterCache;
+import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
@@ -100,7 +100,7 @@
           Random random = random();
           while (numDocs.decrementAndGet() > 0) {
             try {
-              Document doc = new Document();
+              Document doc = iw.newDocument();
               int numCats = random.nextInt(3) + 1; // 1-3
               while (numCats-- > 0) {
                 FacetField ff = newCategory();
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
index dae94ad..2e76df7 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
@@ -9,16 +9,16 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetTestCase;
 import org.apache.lucene.facet.FacetsConfig;
-import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.taxonomy.FacetLabel;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter.MemoryOrdinalMap;
-import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
 import org.apache.lucene.facet.taxonomy.writercache.Cl2oTaxonomyWriterCache;
 import org.apache.lucene.facet.taxonomy.writercache.LruTaxonomyWriterCache;
+import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -448,7 +448,7 @@
     FacetField ff = new FacetField("dim", bigs);
     FacetLabel cp = new FacetLabel("dim", bigs);
     ordinal = taxoWriter.addCategory(cp);
-    Document doc = new Document();
+    Document doc = indexWriter.newDocument();
     doc.add(ff);
     indexWriter.addDocument(config.build(taxoWriter, doc));
 
@@ -456,7 +456,7 @@
     for (int i = 0; i < 3; i++) {
       String s = TestUtil.randomSimpleString(random(), 1, 10);
       taxoWriter.addCategory(new FacetLabel("dim", s));
-      doc = new Document();
+      doc = indexWriter.newDocument();
       doc.add(new FacetField("dim", s));
       indexWriter.addDocument(config.build(taxoWriter, doc));
     }
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
index d5fc31a..1fcaaf8 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
@@ -28,12 +28,7 @@
 import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
@@ -66,69 +61,68 @@
         random(),
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    DocValuesType valueType = DocValuesType.SORTED;
 
     // 0
-    Document doc = new Document();
-    addGroupField(doc, groupField, "author1", valueType);
-    doc.add(newTextField("content", "random text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 1));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("1")));
+    Document doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "random text");
+    doc.addInt("id_1", 1);
+    doc.addAtom("id_2", new BytesRef("1"));
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    addGroupField(doc, groupField, "author1", valueType);
-    doc.add(newTextField("content", "some more random text blob", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 2));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("2")));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "some more random text blob");
+    doc.addInt("id_1", 2);
+    doc.addAtom("id_2", new BytesRef("2"));
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
-    addGroupField(doc, groupField, "author1", valueType);
-    doc.add(newTextField("content", "some more random textual data", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 3));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("3")));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "some more random textual data");
+    doc.addInt("id_1", 3);
+    doc.addAtom("id_2", new BytesRef("3"));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3
-    doc = new Document();
-    addGroupField(doc, groupField, "author2", valueType);
-    doc.add(newTextField("content", "some random text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 4));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("4")));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author2");
+    doc.addLargeText("content", "some random text");
+    doc.addInt("id_1", 4);
+    doc.addAtom("id_2", new BytesRef("4"));
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
-    addGroupField(doc, groupField, "author3", valueType);
-    doc.add(newTextField("content", "some more random text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 5));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("5")));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author3");
+    doc.addLargeText("content", "some more random text");
+    doc.addInt("id_1", 5);
+    doc.addAtom("id_2", new BytesRef("5"));
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
-    addGroupField(doc, groupField, "author3", valueType);
-    doc.add(newTextField("content", "random blob", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 6));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("6")));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author3");
+    doc.addLargeText("content", "random blob");
+    doc.addInt("id_1", 6);
+    doc.addAtom("id_2", new BytesRef("6"));
     w.addDocument(doc);
 
     // 6 -- no author field
-    doc = new Document();
-    doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 6));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("6")));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
+    doc.addInt("id_1", 6);
+    doc.addAtom("id_2", new BytesRef("6"));
     w.addDocument(doc);
 
     // 7 -- no author field
-    doc = new Document();
-    doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(new NumericDocValuesField("id_1", 7));
-    doc.add(new SortedDocValuesField("id_2", new BytesRef("7")));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
+    doc.addInt("id_1", 7);
+    doc.addAtom("id_2", new BytesRef("7"));
     w.addDocument(doc);
 
     IndexReader reader = w.getReader();
@@ -220,29 +214,6 @@
           newIndexWriterConfig(new MockAnalyzer(random())));
       DocValuesType valueType = DocValuesType.SORTED;
 
-      Document doc = new Document();
-      Document docNoGroup = new Document();
-      Field valuesField = null;
-      valuesField = new SortedDocValuesField("group", new BytesRef());
-      doc.add(valuesField);
-      Field sort1 = new SortedDocValuesField("sort1", new BytesRef());
-      doc.add(sort1);
-      docNoGroup.add(sort1);
-      Field sort2 = new SortedDocValuesField("sort2", new BytesRef());
-      doc.add(sort2);
-      docNoGroup.add(sort2);
-      Field sort3 = new SortedDocValuesField("sort3", new BytesRef());
-      doc.add(sort3);
-      docNoGroup.add(sort3);
-      Field content = newTextField("content", "", Field.Store.NO);
-      doc.add(content);
-      docNoGroup.add(content);
-      IntField id = new IntField("id", 0, Field.Store.NO);
-      doc.add(id);
-      docNoGroup.add(id);
-      NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
-      doc.add(idDV);
-      docNoGroup.add(idDV);
       final GroupDoc[] groupDocs = new GroupDoc[numDocs];
       for (int i = 0; i < numDocs; i++) {
         final BytesRef groupValue;
@@ -268,20 +239,17 @@
         }
 
         groupDocs[i] = groupDoc;
+
+        Document doc = w.newDocument();
         if (groupDoc.group != null) {
-          valuesField.setBytesValue(new BytesRef(groupDoc.group.utf8ToString()));
+          doc.addAtom("group", new BytesRef(groupDoc.group.utf8ToString()));
         }
-        sort1.setBytesValue(groupDoc.sort1);
-        sort2.setBytesValue(groupDoc.sort2);
-        sort3.setBytesValue(groupDoc.sort3);
-        content.setStringValue(groupDoc.content);
-        id.setIntValue(groupDoc.id);
-        idDV.setLongValue(groupDoc.id);
-        if (groupDoc.group == null) {
-          w.addDocument(docNoGroup);
-        } else {
-          w.addDocument(doc);
-        }
+        doc.addAtom("sort1", groupDoc.sort1);
+        doc.addAtom("sort2", groupDoc.sort2);
+        doc.addAtom("sort3", groupDoc.sort3);
+        doc.addLargeText("content", groupDoc.content);
+        doc.addInt("id", groupDoc.id);
+        w.addDocument(doc);
       }
 
       final DirectoryReader r = w.getReader();
@@ -520,19 +488,8 @@
     return collector;
   }
 
-  private void addGroupField(Document doc, String groupField, String value, DocValuesType valueType) {
-    Field valuesField = null;
-    switch(valueType) {
-      case BINARY:
-        valuesField = new BinaryDocValuesField(groupField, new BytesRef(value));
-        break;
-      case SORTED:
-        valuesField = new SortedDocValuesField(groupField, new BytesRef(value));
-        break;
-      default:
-        fail("unhandled type");
-    }
-    doc.add(valuesField);
+  private void addGroupField(Document doc, String groupField, String value) {
+    doc.addBinary(groupField, new BytesRef(value));
   }
 
   private static class GroupDoc {
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
index 7b72864..4ff0824 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
@@ -17,12 +17,10 @@
  * limitations under the License.
  */
 
+import java.util.HashMap;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
@@ -32,18 +30,13 @@
 import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.util.HashMap;
-
 public class AllGroupsCollectorTest extends LuceneTestCase {
 
   public void testTotalGroupCount() throws Exception {
 
     final String groupField = "author";
-    FieldType customType = new FieldType();
-    customType.setStored(true);
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
@@ -52,52 +45,52 @@
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
     // 0
-    Document doc = new Document();
+    Document doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "random text", Field.Store.YES));
-    doc.add(new Field("id", "1", customType));
+    doc.addLargeText("content", "random text");
+    doc.addStoredString("id", "1");
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "some more random text blob", Field.Store.YES));
-    doc.add(new Field("id", "2", customType));
+    doc.addLargeText("content", "some more random text blob");
+    doc.addStoredString("id", "2");
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "some more random textual data", Field.Store.YES));
-    doc.add(new Field("id", "3", customType));
+    doc.addLargeText("content", "some more random textual data");
+    doc.addStoredString("id", "3");
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author2");
-    doc.add(new TextField("content", "some random text", Field.Store.YES));
-    doc.add(new Field("id", "4", customType));
+    doc.addLargeText("content", "some random text");
+    doc.addStoredString("id", "4");
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
-    doc.add(new TextField("content", "some more random text", Field.Store.YES));
-    doc.add(new Field("id", "5", customType));
+    doc.addLargeText("content", "some more random text");
+    doc.addStoredString("id", "5");
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
-    doc.add(new TextField("content", "random blob", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
+    doc.addLargeText("content", "random blob");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     // 6 -- no author field
-    doc = new Document();
-    doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());
@@ -120,8 +113,7 @@
   }
 
   private void addGroupField(Document doc, String groupField, String value) {
-    doc.add(new TextField(groupField, value, Field.Store.YES));
-    doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
+    doc.addAtom(groupField, value);
   }
 
   private AbstractAllGroupsCollector<?> createRandomCollector(String groupField) {
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
index 62c5150..2ff4c95 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
@@ -33,13 +33,8 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.search.IndexSearcher;
@@ -71,58 +66,58 @@
         random,
         dir,
         newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
+    Document doc = w.newDocument();
     addField(doc, groupField, "1");
     addField(doc, countField, "1");
-    doc.add(new TextField("content", "random text", Field.Store.NO));
-    doc.add(new StringField("id", "1", Field.Store.NO));
+    doc.addLargeText("content", "random text");
+    doc.addAtom("id", "1");
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
+    doc = w.newDocument();
     addField(doc, groupField, "1");
     addField(doc, countField, "1");
-    doc.add(new TextField("content", "some more random text blob", Field.Store.NO));
-    doc.add(new StringField("id", "2", Field.Store.NO));
+    doc.addLargeText("content", "some more random text blob");
+    doc.addAtom("id", "2");
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
+    doc = w.newDocument();
     addField(doc, groupField, "1");
     addField(doc, countField, "2");
-    doc.add(new TextField("content", "some more random textual data", Field.Store.NO));
-    doc.add(new StringField("id", "3", Field.Store.NO));
+    doc.addLargeText("content", "some more random textual data");
+    doc.addAtom("id", "3");
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3 -- no count field
-    doc = new Document();
+    doc = w.newDocument();
     addField(doc, groupField, "2");
-    doc.add(new TextField("content", "some random text", Field.Store.NO));
-    doc.add(new StringField("id", "4", Field.Store.NO));
+    doc.addLargeText("content", "some random text");
+    doc.addAtom("id", "4");
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
+    doc = w.newDocument();
     addField(doc, groupField, "3");
     addField(doc, countField, "1");
-    doc.add(new TextField("content", "some more random text", Field.Store.NO));
-    doc.add(new StringField("id", "5", Field.Store.NO));
+    doc.addLargeText("content", "some more random text");
+    doc.addAtom("id", "5");
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
+    doc = w.newDocument();
     addField(doc, groupField, "3");
     addField(doc, countField, "1");
-    doc.add(new TextField("content", "random blob", Field.Store.NO));
-    doc.add(new StringField("id", "6", Field.Store.NO));
+    doc.addLargeText("content", "random blob");
+    doc.addAtom("id", "6");
     w.addDocument(doc);
 
     // 6 -- no author field
-    doc = new Document();
-    doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
     addField(doc, countField, "1");
-    doc.add(new StringField("id", "6", Field.Store.NO));
+    doc.addAtom("id", "6");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());
@@ -348,7 +343,7 @@
   }
 
   private void addField(Document doc, String field, String value) {
-    doc.add(new SortedDocValuesField(field, new BytesRef(value)));
+    doc.addAtom(field, new BytesRef(value));
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
@@ -438,24 +433,23 @@
       }
       countsVals.add(countValue);
 
-      Document doc = new Document();
-      doc.add(new StringField("id", String.format(Locale.ROOT, "%09d", i), Field.Store.YES));
-      doc.add(new SortedDocValuesField("id", new BytesRef(String.format(Locale.ROOT, "%09d", i))));
+      Document doc = w.newDocument();
+      doc.addAtom("id", String.format(Locale.ROOT, "%09d", i));
       if (groupValue != null) {
         addField(doc, groupField, groupValue);
       }
       if (countValue != null) {
         addField(doc, countField, countValue);
       }
-      doc.add(new TextField("content", content, Field.Store.YES));
+      doc.addLargeText("content", content);
       w.addDocument(doc);
     }
 
     DirectoryReader reader = w.getReader();
     if (VERBOSE) {
       for(int docID=0;docID<reader.maxDoc();docID++) {
-        StoredDocument doc = reader.document(docID);
-        System.out.println("docID=" + docID + " id=" + doc.get("id") + " content=" + doc.get("content") + " author=" + doc.get("author") + " publisher=" + doc.get("publisher"));
+        Document doc = reader.document(docID);
+        System.out.println("docID=" + docID + " id=" + doc.getString("id") + " content=" + doc.getString("content") + " author=" + doc.getString("author") + " publisher=" + doc.getString("publisher"));
       }
     }
 
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
index 4ffc728..8677cc1 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
@@ -33,11 +33,7 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -55,50 +51,47 @@
 
   public void testSimple() throws Exception {
     final String groupField = "hotel";
-    FieldType customType = new FieldType();
-    customType.setStored(true);
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
         random(),
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    boolean useDv = true;
 
     // 0
-    Document doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    addField(doc, "airport", "ams", useDv);
-    addField(doc, "duration", "5", useDv);
+    Document doc = w.newDocument();
+    addField(doc, groupField, "a");
+    addField(doc, "airport", "ams");
+    addField(doc, "duration", "5");
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    addField(doc, "airport", "dus", useDv);
-    addField(doc, "duration", "10", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "a");
+    addField(doc, "airport", "dus");
+    addField(doc, "duration", "10");
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    addField(doc, "airport", "ams", useDv);
-    addField(doc, "duration", "10", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "ams");
+    addField(doc, "duration", "10");
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    addField(doc, "airport", "ams", useDv);
-    addField(doc, "duration", "5", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "ams");
+    addField(doc, "duration", "5");
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    addField(doc, "airport", "ams", useDv);
-    addField(doc, "duration", "5", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "ams");
+    addField(doc, "duration", "5");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());
@@ -111,8 +104,8 @@
       // any of these limits is plenty for the data we have
 
       groupedAirportFacetCollector = createRandomCollector
-        (useDv ? "hotel_dv" : "hotel", 
-         useDv ? "airport_dv" : "airport", null, false);
+        ("hotel_dv",
+         "airport_dv", null, false);
       indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector);
       int maxOffset = 5;
       airportResult = groupedAirportFacetCollector.mergeSegmentResults
@@ -137,7 +130,7 @@
       assertEquals(1, entries.get(0).getCount());
     }
 
-    AbstractGroupFacetCollector groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false);
+    AbstractGroupFacetCollector groupedDurationFacetCollector = createRandomCollector("hotel_dv", "duration_dv", null, false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector);
     TermGroupFacetCollector.GroupedFacetResult durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 0, false);
     assertEquals(4, durationResult.getTotalCount());
@@ -151,60 +144,48 @@
     assertEquals(2, entries.get(1).getCount());
 
     // 5
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    // missing airport
-    if (useDv) {
-      addField(doc, "airport", "", useDv);
-    }
-    addField(doc, "duration", "5", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "");
+    addField(doc, "duration", "5");
     w.addDocument(doc);
 
     // 6
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    addField(doc, "airport", "bru", useDv);
-    addField(doc, "duration", "10", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "bru");
+    addField(doc, "duration", "10");
     w.addDocument(doc);
 
     // 7
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    addField(doc, "airport", "bru", useDv);
-    addField(doc, "duration", "15", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    addField(doc, "airport", "bru");
+    addField(doc, "duration", "15");
     w.addDocument(doc);
 
     // 8
-    doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    addField(doc, "airport", "bru", useDv);
-    addField(doc, "duration", "10", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "a");
+    addField(doc, "airport", "bru");
+    addField(doc, "duration", "10");
     w.addDocument(doc);
 
     indexSearcher.getIndexReader().close();
     indexSearcher = newSearcher(w.getReader());
-    groupedAirportFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, !useDv);
+    groupedAirportFacetCollector = createRandomCollector("hotel_dv", "airport_dv", null, false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector);
     airportResult = groupedAirportFacetCollector.mergeSegmentResults(3, 0, true);
     entries = airportResult.getFacetEntries(1, 2);
     assertEquals(2, entries.size());
-    if (useDv) {
-      assertEquals(6, airportResult.getTotalCount());
-      assertEquals(0, airportResult.getTotalMissingCount());
-      assertEquals("bru", entries.get(0).getValue().utf8ToString());
-      assertEquals(2, entries.get(0).getCount());
-      assertEquals("", entries.get(1).getValue().utf8ToString());
-      assertEquals(1, entries.get(1).getCount());
-    } else {
-      assertEquals(5, airportResult.getTotalCount());
-      assertEquals(1, airportResult.getTotalMissingCount());
-      assertEquals("bru", entries.get(0).getValue().utf8ToString());
-      assertEquals(2, entries.get(0).getCount());
-      assertEquals("dus", entries.get(1).getValue().utf8ToString());
-      assertEquals(1, entries.get(1).getCount());
-    }
+    assertEquals(6, airportResult.getTotalCount());
+    assertEquals(0, airportResult.getTotalMissingCount());
+    assertEquals("bru", entries.get(0).getValue().utf8ToString());
+    assertEquals(2, entries.get(0).getCount());
+    assertEquals("", entries.get(1).getValue().utf8ToString());
+    assertEquals(1, entries.get(1).getCount());
 
-    groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false);
+    groupedDurationFacetCollector = createRandomCollector("hotel_dv", "duration_dv", null, false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector);
     durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 2, true);
     assertEquals(5, durationResult.getTotalCount());
@@ -216,50 +197,38 @@
     assertEquals(2, entries.get(0).getCount());
 
     // 9
-    doc = new Document();
-    addField(doc, groupField, "c", useDv);
-    addField(doc, "airport", "bru", useDv);
-    addField(doc, "duration", "15", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "c");
+    addField(doc, "airport", "bru");
+    addField(doc, "duration", "15");
     w.addDocument(doc);
 
     // 10
-    doc = new Document();
-    addField(doc, groupField, "c", useDv);
-    addField(doc, "airport", "dus", useDv);
-    addField(doc, "duration", "10", useDv);
+    doc = w.newDocument();
+    addField(doc, groupField, "c");
+    addField(doc, "airport", "dus");
+    addField(doc, "duration", "10");
     w.addDocument(doc);
 
     indexSearcher.getIndexReader().close();
     indexSearcher = newSearcher(w.getReader());
-    groupedAirportFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "airport_dv" : "airport", null, false);
+    groupedAirportFacetCollector = createRandomCollector("hotel_dv", "airport_dv", null, false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector);
     airportResult = groupedAirportFacetCollector.mergeSegmentResults(10, 0, false);
     entries = airportResult.getFacetEntries(0, 10);
-    if (useDv) {
-      assertEquals(8, airportResult.getTotalCount());
-      assertEquals(0, airportResult.getTotalMissingCount());
-      assertEquals(4, entries.size());
-      assertEquals("", entries.get(0).getValue().utf8ToString());
-      assertEquals(1, entries.get(0).getCount());
-      assertEquals("ams", entries.get(1).getValue().utf8ToString());
-      assertEquals(2, entries.get(1).getCount());
-      assertEquals("bru", entries.get(2).getValue().utf8ToString());
-      assertEquals(3, entries.get(2).getCount());
-      assertEquals("dus", entries.get(3).getValue().utf8ToString());
-      assertEquals(2, entries.get(3).getCount());
-    } else {
-      assertEquals(7, airportResult.getTotalCount());
-      assertEquals(1, airportResult.getTotalMissingCount());
-      assertEquals(3, entries.size());
-      assertEquals("ams", entries.get(0).getValue().utf8ToString());
-      assertEquals(2, entries.get(0).getCount());
-      assertEquals("bru", entries.get(1).getValue().utf8ToString());
-      assertEquals(3, entries.get(1).getCount());
-      assertEquals("dus", entries.get(2).getValue().utf8ToString());
-      assertEquals(2, entries.get(2).getCount());
-    }
+    assertEquals(8, airportResult.getTotalCount());
+    assertEquals(0, airportResult.getTotalMissingCount());
+    assertEquals(4, entries.size());
+    assertEquals("", entries.get(0).getValue().utf8ToString());
+    assertEquals(1, entries.get(0).getCount());
+    assertEquals("ams", entries.get(1).getValue().utf8ToString());
+    assertEquals(2, entries.get(1).getCount());
+    assertEquals("bru", entries.get(2).getValue().utf8ToString());
+    assertEquals(3, entries.get(2).getCount());
+    assertEquals("dus", entries.get(3).getValue().utf8ToString());
+    assertEquals(2, entries.get(3).getCount());
 
-    groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", "1", false);
+    groupedDurationFacetCollector = createRandomCollector("hotel_dv", "duration_dv", "1", false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector);
     durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 0, true);
     assertEquals(5, durationResult.getTotalCount());
@@ -279,8 +248,6 @@
 
   public void testMVGroupedFacetingWithDeletes() throws Exception {
     final String groupField = "hotel";
-    FieldType customType = new FieldType();
-    customType.setStored(true);
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
@@ -288,58 +255,60 @@
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));
     boolean useDv = true;
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("airport");
 
     // Cannot assert this since we use NoMergePolicy:
     w.setDoRandomForceMergeAssert(false);
 
     // 0
-    Document doc = new Document();
-    doc.add(new StringField("x", "x", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addAtom("x", "x");
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
+    doc = w.newDocument();
+    addField(doc, groupField, "a");
+    doc.addAtom("airport", new BytesRef("ams"));
     w.addDocument(doc);
 
     w.commit();
     w.deleteDocuments(new TermQuery(new Term("airport", "ams")));
 
     // 2
-    doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
+    doc = w.newDocument();
+    addField(doc, groupField, "a");
+    doc.addAtom("airport", new BytesRef("ams"));
     w.addDocument(doc);
 
     // 3
-    doc = new Document();
-    addField(doc, groupField, "a", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("dus")));
+    doc = w.newDocument();
+    addField(doc, groupField, "a");
+    doc.addAtom("airport", new BytesRef("dus"));
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    doc.addAtom("airport", new BytesRef("ams"));
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    doc.addAtom("airport", new BytesRef("ams"));
     w.addDocument(doc);
 
     // 6
-    doc = new Document();
-    addField(doc, groupField, "b", useDv);
-    doc.add(new SortedSetDocValuesField("airport", new BytesRef("ams")));
+    doc = w.newDocument();
+    addField(doc, groupField, "b");
+    doc.addAtom("airport", new BytesRef("ams"));
     w.addDocument(doc);
     w.commit();
 
     // 7
-    doc = new Document();
-    doc.add(new StringField("x", "x", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("x", "x");
     w.addDocument(doc);
     w.commit();
 
@@ -362,9 +331,8 @@
     dir.close();
   }
 
-  private void addField(Document doc, String field, String value, boolean canUseIDV) {
-    assert canUseIDV;
-    doc.add(new SortedDocValuesField(field + "_dv", new BytesRef(value)));
+  private void addField(Document doc, String field, String value) {
+    doc.addAtom(field + "_dv", new BytesRef(value));
   }
 
   public void testRandom() throws Exception {
@@ -506,38 +474,10 @@
         dir,
         newIndexWriterConfig(new MockAnalyzer(random))
     );
-    Document doc = new Document();
-    Document docNoGroup = new Document();
-    Document docNoFacet = new Document();
-    Document docNoGroupNoFacet = new Document();
-    Field group = newStringField("group", "", Field.Store.NO);
-    Field groupDc = new SortedDocValuesField("group", new BytesRef());
-    doc.add(groupDc);
-    docNoFacet.add(groupDc);
-    doc.add(group);
-    docNoFacet.add(group);
-    Field[] facetFields;
-    if (multipleFacetValuesPerDocument == false) {
-      facetFields = new Field[2];
-      facetFields[0] = newStringField("facet", "", Field.Store.NO);
-      doc.add(facetFields[0]);
-      docNoGroup.add(facetFields[0]);
-      facetFields[1] = new SortedDocValuesField("facet", new BytesRef());
-      doc.add(facetFields[1]);
-      docNoGroup.add(facetFields[1]);
-    } else {
-      facetFields = multipleFacetValuesPerDocument ? new Field[2 + random.nextInt(6)] : new Field[1];
-      for (int i = 0; i < facetFields.length; i++) {
-        facetFields[i] = new SortedSetDocValuesField("facet", new BytesRef());
-        doc.add(facetFields[i]);
-        docNoGroup.add(facetFields[i]);
-      }
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    if (multipleFacetValuesPerDocument) {
+      fieldTypes.setMultiValued("facet");
     }
-    Field content = newStringField("content", "", Field.Store.NO);
-    doc.add(content);
-    docNoGroup.add(content);
-    docNoFacet.add(content);
-    docNoGroupNoFacet.add(content);
 
     NavigableSet<String> uniqueFacetValues = new TreeSet<>(new Comparator<String>() {
 
@@ -557,6 +497,7 @@
     });
     Map<String, Map<String, Set<String>>> searchTermToFacetToGroups = new HashMap<>();
     int facetWithMostGroups = 0;
+    int numFacetValues = 2 + random.nextInt(6);
     for (int i = 0; i < numDocs; i++) {
       final String groupValue;
       if (random.nextInt(24) == 17) {
@@ -573,6 +514,7 @@
       }
       Map<String, Set<String>> facetToGroups = searchTermToFacetToGroups.get(contentStr);
 
+      Document doc = writer.newDocument();
       List<String> facetVals = new ArrayList<>();
       if (multipleFacetValuesPerDocument == false) {
         String facetValue = facetValues.get(random.nextInt(facetValues.size()));
@@ -585,12 +527,12 @@
         if (groupsInFacet.size() > facetWithMostGroups) {
           facetWithMostGroups = groupsInFacet.size();
         }
-        facetFields[0].setStringValue(facetValue);
-        facetFields[1].setBytesValue(new BytesRef(facetValue));
         facetVals.add(facetValue);
+        doc.addAtom("facet", facetValue);
       } else {
-        for (Field facetField : facetFields) {
+        for (int j=0;j<numFacetValues;j++) {
           String facetValue = facetValues.get(random.nextInt(facetValues.size()));
+          doc.addAtom("facet", facetValue);
           uniqueFacetValues.add(facetValue);
           if (!facetToGroups.containsKey(facetValue)) {
             facetToGroups.put(facetValue, new HashSet<String>());
@@ -600,33 +542,17 @@
           if (groupsInFacet.size() > facetWithMostGroups) {
             facetWithMostGroups = groupsInFacet.size();
           }
-          facetField.setBytesValue(new BytesRef(facetValue));
           facetVals.add(facetValue);
         }
       }
 
       if (VERBOSE) {
-        System.out.println("  doc content=" + contentStr + " group=" + (groupValue == null ? "null" : groupValue) + " facetVals=" + facetVals);
+        System.out.println("  doc content=" + contentStr + " group=" + groupValue + " facetVals=" + facetVals);
       }
 
-      if (groupValue != null) {
-        groupDc.setBytesValue(new BytesRef(groupValue));
-        group.setStringValue(groupValue);
-      } else {
-        // TODO: not true
-        // DV cannot have missing values:
-        groupDc.setBytesValue(new BytesRef());
-      }
-      content.setStringValue(contentStr);
-      if (groupValue == null && facetVals.isEmpty()) {
-        writer.addDocument(docNoGroupNoFacet);
-      } else if (facetVals.isEmpty()) {
-        writer.addDocument(docNoFacet);
-      } else if (groupValue == null) {
-        writer.addDocument(docNoGroup);
-      } else {
-        writer.addDocument(doc);
-      }
+      doc.addAtom("content", contentStr);
+      doc.addAtom("group", groupValue);
+      writer.addDocument(doc);
     }
 
     DirectoryReader reader = writer.getReader();
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java
index cbbfd60..6a787af 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java
@@ -17,13 +17,12 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
@@ -39,10 +38,6 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.mutable.MutableValueStr;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
 public class GroupingSearchTest extends LuceneTestCase {
 
   // Tests some very basic usages...
@@ -50,70 +45,67 @@
 
     final String groupField = "author";
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
         random(),
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    boolean canUseIDV = true;
+
     List<Document> documents = new ArrayList<>();
     // 0
-    Document doc = new Document();
-    addGroupField(doc, groupField, "author1", canUseIDV);
-    doc.add(new TextField("content", "random text", Field.Store.YES));
-    doc.add(new Field("id", "1", customType));
+    Document doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "random text");
+    doc.addStoredString("id", "1");
     documents.add(doc);
 
     // 1
-    doc = new Document();
-    addGroupField(doc, groupField, "author1", canUseIDV);
-    doc.add(new TextField("content", "some more random text", Field.Store.YES));
-    doc.add(new Field("id", "2", customType));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "some more random text");
+    doc.addStoredString("id", "2");
     documents.add(doc);
 
     // 2
-    doc = new Document();
-    addGroupField(doc, groupField, "author1", canUseIDV);
-    doc.add(new TextField("content", "some more random textual data", Field.Store.YES));
-    doc.add(new Field("id", "3", customType));
-    doc.add(new StringField("groupend", "x", Field.Store.NO));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author1");
+    doc.addLargeText("content", "some more random textual data");
+    doc.addStoredString("id", "3");
+    doc.addAtom("groupend", "x");
     documents.add(doc);
     w.addDocuments(documents);
     documents.clear();
 
     // 3
-    doc = new Document();
-    addGroupField(doc, groupField, "author2", canUseIDV);
-    doc.add(new TextField("content", "some random text", Field.Store.YES));
-    doc.add(new Field("id", "4", customType));
-    doc.add(new StringField("groupend", "x", Field.Store.NO));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author2");
+    doc.addLargeText("content", "some random text");
+    doc.addStoredString("id", "4");
+    doc.addAtom("groupend", "x");
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
-    addGroupField(doc, groupField, "author3", canUseIDV);
-    doc.add(new TextField("content", "some more random text", Field.Store.YES));
-    doc.add(new Field("id", "5", customType));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author3");
+    doc.addLargeText("content", "some more random text");
+    doc.addStoredString("id", "5");
     documents.add(doc);
 
     // 5
-    doc = new Document();
-    addGroupField(doc, groupField, "author3", canUseIDV);
-    doc.add(new TextField("content", "random", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
-    doc.add(new StringField("groupend", "x", Field.Store.NO));
+    doc = w.newDocument();
+    addGroupField(doc, groupField, "author3");
+    doc.addLargeText("content", "random");
+    doc.addStoredString("id", "6");
+    doc.addAtom("groupend", "x");
     documents.add(doc);
     w.addDocuments(documents);
     documents.clear();
 
     // 6 -- no author field
-    doc = new Document();
-    doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
-    doc.add(new StringField("groupend", "x", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
+    doc.addStoredString("id", "6");
+    doc.addAtom("groupend", "x");
 
     w.addDocument(doc);
 
@@ -121,7 +113,7 @@
     w.close();
 
     Sort groupSort = Sort.RELEVANCE;
-    GroupingSearch groupingSearch = createRandomGroupingSearch(groupField, groupSort, 5, canUseIDV);
+    GroupingSearch groupingSearch = createRandomGroupingSearch(groupField, groupSort, 5);
 
     TopGroups<?> groups = groupingSearch.search(indexSearcher, null, new TermQuery(new Term("content", "random")), 0, 10);
 
@@ -172,11 +164,8 @@
     dir.close();
   }
 
-  private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV) {
-    doc.add(new TextField(groupField, value, Field.Store.YES));
-    if (canUseIDV) {
-      doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
-    }
+  private void addGroupField(Document doc, String groupField, String value) {
+    doc.addAtom(groupField, value);
   }
 
   private void compareGroupValue(String expected, GroupDocs<?> group) {
@@ -202,7 +191,7 @@
     }
   }
 
-  private GroupingSearch createRandomGroupingSearch(String groupField, Sort groupSort, int docsInGroup, boolean canUseIDV) {
+  private GroupingSearch createRandomGroupingSearch(String groupField, Sort groupSort, int docsInGroup) {
     GroupingSearch groupingSearch;
     if (random().nextBoolean()) {
       ValueSource vs = new BytesRefFieldSource(groupField);
@@ -227,9 +216,8 @@
         random(),
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
-    doc.add(newField("group", "foo", StringField.TYPE_NOT_STORED));
-    doc.add(new SortedDocValuesField("group", new BytesRef("foo")));
+    Document doc = w.newDocument();
+    doc.addAtom("group", new BytesRef("foo"));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index ec3e829..0e22db6 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.*;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
@@ -60,60 +59,57 @@
 
     String groupField = "author";
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
                                random(),
                                dir,
                                newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     // 0
-    Document doc = new Document();
+    Document doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "random text", Field.Store.YES));
-    doc.add(new Field("id", "1", customType));
+    doc.addLargeText("content", "random text");
+    doc.addStoredString("id", "1");
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "some more random text", Field.Store.YES));
-    doc.add(new Field("id", "2", customType));
+    doc.addLargeText("content", "some more random text");
+    doc.addStoredString("id", "2");
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
-    doc.add(new TextField("content", "some more random textual data", Field.Store.YES));
-    doc.add(new Field("id", "3", customType));
+    doc.addLargeText("content", "some more random textual data");
+    doc.addStoredString("id", "3");
     w.addDocument(doc);
 
     // 3
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author2");
-    doc.add(new TextField("content", "some random text", Field.Store.YES));
-    doc.add(new Field("id", "4", customType));
+    doc.addLargeText("content", "some random text");
+    doc.addStoredString("id", "4");
     w.addDocument(doc);
 
     // 4
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
-    doc.add(new TextField("content", "some more random text", Field.Store.YES));
-    doc.add(new Field("id", "5", customType));
+    doc.addLargeText("content", "some more random text");
+    doc.addStoredString("id", "5");
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
+    doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
-    doc.add(new TextField("content", "random", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
+    doc.addLargeText("content", "random");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     // 6 -- no author field
-    doc = new Document();
-    doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
-    doc.add(new Field("id", "6", customType));
+    doc = w.newDocument();
+    doc.addLargeText("content", "random word stuck in alot of other text");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());
@@ -169,7 +165,7 @@
   }
 
   private void addGroupField(Document doc, String groupField, String value) {
-    doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
+    doc.addAtom(groupField, new BytesRef(value));
   }
 
   private AbstractFirstPassGroupingCollector<?> createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException {
@@ -552,33 +548,24 @@
 
     final List<List<Document>> updateDocs = new ArrayList<>();
 
-    FieldType groupEndType = new FieldType(StringField.TYPE_NOT_STORED);
-    groupEndType.setIndexOptions(IndexOptions.DOCS);
-    groupEndType.setOmitNorms(true);
-
     //System.out.println("TEST: index groups");
     for(BytesRef group : groupValues) {
       final List<Document> docs = new ArrayList<>();
       //System.out.println("TEST:   group=" + (group == null ? "null" : group.utf8ToString()));
       for(GroupDoc groupValue : groupMap.get(group)) {
-        Document doc = new Document();
+        Document doc = w.newDocument();
         docs.add(doc);
         if (groupValue.group != null) {
-          doc.add(newStringField("group", groupValue.group.utf8ToString(), Field.Store.YES));
-          doc.add(new SortedDocValuesField("group", BytesRef.deepCopyOf(groupValue.group)));
+          doc.addAtom("group", groupValue.group.utf8ToString());
         }
-        doc.add(newStringField("sort1", groupValue.sort1.utf8ToString(), Field.Store.NO));
-        doc.add(new SortedDocValuesField("sort1", BytesRef.deepCopyOf(groupValue.sort1)));
-        doc.add(newStringField("sort2", groupValue.sort2.utf8ToString(), Field.Store.NO));
-        doc.add(new SortedDocValuesField("sort2", BytesRef.deepCopyOf(groupValue.sort2)));
-        doc.add(new IntField("id", groupValue.id, Field.Store.NO));
-        doc.add(new NumericDocValuesField("id", groupValue.id));
-        doc.add(newTextField("content", groupValue.content, Field.Store.NO));
+        doc.addAtom("sort1", groupValue.sort1.utf8ToString());
+        doc.addAtom("sort2", groupValue.sort2.utf8ToString());
+        doc.addInt("id", groupValue.id);
+        doc.addLargeText("content", groupValue.content);
         //System.out.println("TEST:     doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
       }
       // So we can pull filter marking last doc in block:
-      final Field groupEnd = newField("groupend", "x", groupEndType);
-      docs.get(docs.size()-1).add(groupEnd);
+      docs.get(docs.size()-1).addAtom("groupend", "x");
       // Add as a doc block:
       w.addDocuments(docs);
       if (group != null && random().nextInt(7) == 4) {
@@ -588,7 +575,7 @@
 
     for(List<Document> docs : updateDocs) {
       // Just replaces docs w/ same docs:
-      w.updateDocuments(new Term("group", docs.get(0).get("group")), docs);
+      w.updateDocuments(new Term("group", docs.get(0).getString("group")), docs);
     }
 
     final DirectoryReader r = w.getReader();
@@ -669,29 +656,6 @@
                                                   random(),
                                                   dir,
                                                   newIndexWriterConfig(new MockAnalyzer(random())));
-      Document doc = new Document();
-      Document docNoGroup = new Document();
-      Field idvGroupField = new SortedDocValuesField("group", new BytesRef());
-      doc.add(idvGroupField);
-      docNoGroup.add(idvGroupField);
-
-      Field group = newStringField("group", "", Field.Store.NO);
-      doc.add(group);
-      Field sort1 = new SortedDocValuesField("sort1", new BytesRef());
-      doc.add(sort1);
-      docNoGroup.add(sort1);
-      Field sort2 = new SortedDocValuesField("sort2", new BytesRef());
-      doc.add(sort2);
-      docNoGroup.add(sort2);
-      Field content = newTextField("content", "", Field.Store.NO);
-      doc.add(content);
-      docNoGroup.add(content);
-      IntField id = new IntField("id", 0, Field.Store.NO);
-      doc.add(id);
-      NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
-      doc.add(idDV);
-      docNoGroup.add(id);
-      docNoGroup.add(idDV);
       final GroupDoc[] groupDocs = new GroupDoc[numDocs];
       for(int i=0;i<numDocs;i++) {
         final BytesRef groupValue;
@@ -712,23 +676,16 @@
         }
 
         groupDocs[i] = groupDoc;
+        Document doc = w.newDocument();
         if (groupDoc.group != null) {
-          group.setStringValue(groupDoc.group.utf8ToString());
-          idvGroupField.setBytesValue(BytesRef.deepCopyOf(groupDoc.group));
-        } else {
-          // TODO: not true
-          // Must explicitly set empty string, else eg if
-          // the segment has all docs missing the field then
-          // we get null back instead of empty BytesRef:
-          idvGroupField.setBytesValue(new BytesRef());
+          doc.addAtom("group", groupDoc.group.utf8ToString());
         }
-        sort1.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort1));
-        sort2.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort2));
-        content.setStringValue(groupDoc.content);
-        id.setIntValue(groupDoc.id);
-        idDV.setLongValue(groupDoc.id);
+        doc.addAtom("sort1", BytesRef.deepCopyOf(groupDoc.sort1));
+        doc.addAtom("sort2", BytesRef.deepCopyOf(groupDoc.sort2));
+        doc.addLargeText("content", groupDoc.content);
+        doc.addInt("id", groupDoc.id);
         if (groupDoc.group == null) {
-          w.addDocument(docNoGroup);
+          w.addDocument(doc);
         } else {
           w.addDocument(doc);
         }
@@ -1255,7 +1212,7 @@
       final GroupDocs<BytesRef> actualGroup = actual.groups[groupIDX];
       if (verifyGroupValues) {
         if (idvBasedImplsUsed) {
-          if (actualGroup.groupValue.length == 0) {
+          if (actualGroup.groupValue == null || actualGroup.groupValue.length == 0) {
             assertNull(expectedGroup.groupValue);
           } else {
             assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
index 60a29d5..fce90ab 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
@@ -21,6 +21,7 @@
 import java.util.Collections;
 import java.util.Iterator;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.FieldInfo;
@@ -83,6 +84,13 @@
     fieldInfos = new FieldInfos(new FieldInfo[]{fieldInfo});
   }
 
+  final FieldTypes fieldTypes = new FieldTypes(null);
+
+  @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
   @Override
   public void addCoreClosedListener(CoreClosedListener listener) {
     addCoreClosedListenerAsReaderClosedListener(this, listener);
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
index d67dff6..b150246 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
@@ -24,9 +24,9 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Terms;
 
 /**
@@ -56,7 +56,7 @@
    */
 
   public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
-      String field, StoredDocument document, Analyzer analyzer) throws IOException {
+      String field, Document document, Analyzer analyzer) throws IOException {
     TokenStream ts = null;
 
     Fields vectors = reader.getTermVectors(docId);
@@ -167,13 +167,13 @@
   // convenience method
   public static TokenStream getTokenStream(IndexReader reader, int docId,
       String field, Analyzer analyzer) throws IOException {
-    StoredDocument doc = reader.document(docId);
+    Document doc = reader.document(docId);
     return getTokenStream(doc, field, analyzer);
   }
   
-  public static TokenStream getTokenStream(StoredDocument doc, String field,
+  public static TokenStream getTokenStream(Document doc, String field,
       Analyzer analyzer) {
-    String contents = doc.get(field);
+    String contents = doc.getString(field);
     if (contents == null) {
       throw new IllegalArgumentException("Field " + field
           + " in document is not stored and cannot be analyzed");
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index 14f364b..7844c99 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -31,6 +31,7 @@
 import java.util.TreeSet;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexOptions;
@@ -78,10 +79,8 @@
  * <p>
  * Example usage:
  * <pre class="prettyprint">
- *   // configure field with offsets at index time
- *   FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
- *   offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
- *   Field body = new Field("body", "foobar", offsetsType);
+ *   // large text and short text fields are indexed with offsets by default:
+ *   doc.addLargeText("body", "foobar");
  *
  *   // retrieve highlights at query time 
  *   PostingsHighlighter highlighter = new PostingsHighlighter();
@@ -115,26 +114,45 @@
   /** Set the first time {@link #getScorer} is called,
    *  and then reused. */
   private PassageScorer defaultScorer;
+
+  private final FieldTypes fieldTypes;
   
   /**
    * Creates a new highlighter with {@link #DEFAULT_MAX_LENGTH}.
    */
   public PostingsHighlighter() {
-    this(DEFAULT_MAX_LENGTH);
+    this(null, DEFAULT_MAX_LENGTH);
+  }
+
+  /**
+   * Creates a new highlighter with {@link #DEFAULT_MAX_LENGTH}.
+   */
+  public PostingsHighlighter(FieldTypes fieldTypes) {
+    this(fieldTypes, DEFAULT_MAX_LENGTH);
+  }
+
+  /**
+   * Creates a new highlighter, specifying maximum content length.
+   */
+  public PostingsHighlighter(int maxLength) {
+    this(null, maxLength);
   }
   
   /**
    * Creates a new highlighter, specifying maximum content length.
+   * @param fieldTypes {@link FieldTypes} (null is allowed).  If non-null then we default to WholeBreakIterator when highlighting short text
+   *   and atom fields.
    * @param maxLength maximum content size to process.
    * @throws IllegalArgumentException if <code>maxLength</code> is negative or <code>Integer.MAX_VALUE</code>
    */
-  public PostingsHighlighter(int maxLength) {
+  public PostingsHighlighter(FieldTypes fieldTypes, int maxLength) {
     if (maxLength < 0 || maxLength == Integer.MAX_VALUE) {
       // two reasons: no overflow problems in BreakIterator.preceding(offset+1),
       // our sentinel in the offsets queue uses this value to terminate.
       throw new IllegalArgumentException("maxLength must be < Integer.MAX_VALUE");
     }
     this.maxLength = maxLength;
+    this.fieldTypes = fieldTypes;
   }
   
   /** Returns the {@link BreakIterator} to use for
@@ -142,7 +160,16 @@
    *  {@link BreakIterator#getSentenceInstance(Locale)} by default;
    *  subclasses can override to customize. */
   protected BreakIterator getBreakIterator(String field) {
-    return BreakIterator.getSentenceInstance(Locale.ROOT);
+    if (fieldTypes != null) {
+      FieldTypes.ValueType valueType = fieldTypes.getValueType(field);
+      if (valueType == FieldTypes.ValueType.TEXT) {
+        return BreakIterator.getSentenceInstance(Locale.ROOT);
+      } else {
+        return new WholeBreakIterator();
+      }
+    } else {
+      return BreakIterator.getSentenceInstance(Locale.ROOT);
+    }
   }
 
   /** Returns the {@link PassageFormatter} to use for
@@ -387,6 +414,7 @@
     Map<String,Object[]> highlights = new HashMap<>();
     for (int i = 0; i < fields.length; i++) {
       String field = fields[i];
+      checkField(field);
       int numPassages = maxPassages[i];
       Term floor = new Term(field, "");
       Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
@@ -410,6 +438,12 @@
     return highlights;
   }
 
+  private void checkField(String fieldName) {
+    if (fieldTypes != null && fieldTypes.getHighlighted(fieldName) == false) {
+      throw new IllegalArgumentException("field=\"" + fieldName + "\" was indexed with FieldTypes.enableHighlighting");
+    }
+  }
+
   /** Loads the String values for each field X docID to be
    *  highlighted.  By default this loads from stored
    *  fields, but a subclass can change the source.  This
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
index f0ec6fd..8052bc9 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
@@ -17,9 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.StoredFieldVisitor;
@@ -60,6 +57,20 @@
   private char multiValuedSeparator = ' ';
   private final BoundaryScanner boundaryScanner;
   private boolean discreteMultiValueHighlighting = false;
+
+  protected static class FieldValue {
+    public final String name;
+    public final String value;
+    public final boolean hasTermVectors;
+    public final boolean tokenized;
+
+    public FieldValue(String name, String value, boolean hasTermVectors, boolean tokenized) {
+      this.name = name;
+      this.value = value;
+      this.hasTermVectors = hasTermVectors;
+      this.tokenized = tokenized;
+    }
+  }
   
   protected BaseFragmentsBuilder(){
     this( new String[]{ "<b>" }, new String[]{ "</b>" } );
@@ -124,7 +135,7 @@
     }
 
     List<WeightedFragInfo> fragInfos = fieldFragList.getFragInfos();
-    Field[] values = getFields( reader, docId, fieldName );
+    FieldValue[] values = getFields( reader, docId, fieldName );
     if( values.length == 0 ) {
       return null;
     }
@@ -146,16 +157,14 @@
     return fragments.toArray( new String[fragments.size()] );
   }
   
-  protected Field[] getFields( IndexReader reader, int docId, final String fieldName) throws IOException {
+  protected FieldValue[] getFields( IndexReader reader, int docId, final String fieldName) throws IOException {
     // according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field???
-    final List<Field> fields = new ArrayList<>();
+    final List<FieldValue> fields = new ArrayList<>();
     reader.document(docId, new StoredFieldVisitor() {
         
         @Override
         public void stringField(FieldInfo fieldInfo, String value) {
-          FieldType ft = new FieldType(TextField.TYPE_STORED);
-          ft.setStoreTermVectors(fieldInfo.hasVectors());
-          fields.add(new Field(fieldInfo.name, value, ft));
+          fields.add(new FieldValue(fieldInfo.name, value, fieldInfo.hasVectors(), true));
         }
 
         @Override
@@ -163,10 +172,10 @@
           return fieldInfo.name.equals(fieldName) ? Status.YES : Status.NO;
         }
       });
-    return fields.toArray(new Field[fields.size()]);
+    return fields.toArray(new FieldValue[fields.size()]);
   }
 
-  protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
+  protected String makeFragment( StringBuilder buffer, int[] index, FieldValue[] values, WeightedFragInfo fragInfo,
       String[] preTags, String[] postTags, Encoder encoder ){
     StringBuilder fragment = new StringBuilder();
     final int s = fragInfo.getStartOffset();
@@ -187,15 +196,15 @@
     return fragment.toString();
   }
 
-  protected String getFragmentSourceMSO( StringBuilder buffer, int[] index, Field[] values,
+  protected String getFragmentSourceMSO( StringBuilder buffer, int[] index, FieldValue[] values,
       int startOffset, int endOffset, int[] modifiedStartOffset ){
     while( buffer.length() < endOffset && index[0] < values.length ){
-      buffer.append( values[index[0]++].stringValue() );
+      buffer.append( values[index[0]++].value );
       buffer.append( getMultiValuedSeparator() );
     }
     int bufferLength = buffer.length();
     // we added the multi value char to the last buffer, ignore it
-    if (values[index[0] - 1].fieldType().tokenized()) {
+    if (values[index[0] - 1].tokenized) {
       bufferLength--;
     }
     int eo = bufferLength < endOffset ? bufferLength : boundaryScanner.findEndOffset( buffer, endOffset );
@@ -203,10 +212,10 @@
     return buffer.substring( modifiedStartOffset[0], eo );
   }
   
-  protected String getFragmentSource( StringBuilder buffer, int[] index, Field[] values,
+  protected String getFragmentSource( StringBuilder buffer, int[] index, FieldValue[] values,
       int startOffset, int endOffset ){
     while( buffer.length() < endOffset && index[0] < values.length ){
-      buffer.append( values[index[0]].stringValue() );
+      buffer.append( values[index[0]].value );
       buffer.append( multiValuedSeparator );
       index[0]++;
     }
@@ -214,26 +223,26 @@
     return buffer.substring( startOffset, eo );
   }
 
-  protected List<WeightedFragInfo> discreteMultiValueHighlighting(List<WeightedFragInfo> fragInfos, Field[] fields) {
+  protected List<WeightedFragInfo> discreteMultiValueHighlighting(List<WeightedFragInfo> fragInfos, FieldValue[] fields) {
     Map<String, List<WeightedFragInfo>> fieldNameToFragInfos = new HashMap<>();
-    for (Field field : fields) {
-      fieldNameToFragInfos.put(field.name(), new ArrayList<WeightedFragInfo>());
+    for (FieldValue field : fields) {
+      fieldNameToFragInfos.put(field.name, new ArrayList<WeightedFragInfo>());
     }
 
     fragInfos: for (WeightedFragInfo fragInfo : fragInfos) {
       int fieldStart;
       int fieldEnd = 0;
-      for (Field field : fields) {
-        if (field.stringValue().isEmpty()) {
+      for (FieldValue field : fields) {
+        if (field.value.isEmpty()) {
           fieldEnd++;
           continue;
         }
         fieldStart = fieldEnd;
-        fieldEnd += field.stringValue().length() + 1; // + 1 for going to next field with same name.
+        fieldEnd += field.value.length() + 1; // + 1 for going to next field with same name.
 
         if (fragInfo.getStartOffset() >= fieldStart && fragInfo.getEndOffset() >= fieldStart &&
             fragInfo.getStartOffset() <= fieldEnd && fragInfo.getEndOffset() <= fieldEnd) {
-          fieldNameToFragInfos.get(field.name()).add(fragInfo);
+          fieldNameToFragInfos.get(field.name).add(fragInfo);
           continue fragInfos;
         }
 
@@ -282,7 +291,7 @@
           }
         }
         WeightedFragInfo weightedFragInfo = new WeightedFragInfo(fragStart, fragEnd, subInfos, boost);
-        fieldNameToFragInfos.get(field.name()).add(weightedFragInfo);
+        fieldNameToFragInfos.get(field.name).add(weightedFragInfo);
       }
     }
 
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index 29c307a..9c5a0dc 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -40,29 +40,6 @@
   private final String fieldName;
   LinkedList<TermInfo> termList = new LinkedList<>();
   
-  //public static void main( String[] args ) throws Exception {
-  //  Analyzer analyzer = new WhitespaceAnalyzer(Version.LATEST);
-  //  QueryParser parser = new QueryParser(Version.LATEST,  "f", analyzer );
-  //  Query query = parser.parse( "a x:b" );
-  //  FieldQuery fieldQuery = new FieldQuery( query, true, false );
-    
-  //  Directory dir = new RAMDirectory();
-  //  IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LATEST, analyzer));
-  //  Document doc = new Document();
-  //  FieldType ft = new FieldType(TextField.TYPE_STORED);
-  //  ft.setStoreTermVectors(true);
-  //  ft.setStoreTermVectorOffsets(true);
-  //  ft.setStoreTermVectorPositions(true);
-  //  doc.add( new Field( "f", ft, "a a a b b c a b b c d e f" ) );
-  //  doc.add( new Field( "f", ft, "b a b a f" ) );
-  //  writer.addDocument( doc );
-  //  writer.close();
-    
-  //  IndexReader reader = IndexReader.open(dir1);
-  //  new FieldTermStack( reader, 0, "f", fieldQuery );
-  //  reader.close();
-  //}
-
   /**
    * a constructor.
    * 
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
index 1e4fe66..3f73c73 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
@@ -23,19 +23,16 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -45,8 +42,8 @@
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class HighlighterPhraseTest extends LuceneTestCase {
   private static final String FIELD = "text";
@@ -55,13 +52,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectors(true);
-      document.add(new Field(FIELD, new TokenStreamConcurrent(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new TokenStreamConcurrent());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -96,14 +93,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectors(true);
-      document.add(new Field(FIELD, new TokenStreamConcurrent(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new TokenStreamConcurrent());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -159,14 +155,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectors(true);
-      document.add(new Field(FIELD, new TokenStreamSparse(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new TokenStreamSparse());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -201,13 +196,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-
-      FieldType customType = new FieldType(TextField.TYPE_STORED);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectors(true);
-      document.add(new Field(FIELD, TEXT, customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, TEXT);
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -240,13 +235,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectors(true);
-      document.add(new Field(FIELD, new TokenStreamSparse(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new TokenStreamSparse());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
index bbc79d4..79f7050 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
@@ -45,17 +45,12 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.CommonTermsQuery;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -66,7 +61,6 @@
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
@@ -114,18 +108,6 @@
   int numHighlights = 0;
   MockAnalyzer analyzer;
   TopDocs hits;
-  FieldType fieldType;//see doc()
-
-  final FieldType FIELD_TYPE_TV;
-  {
-    FieldType fieldType = new FieldType(TextField.TYPE_STORED);
-    fieldType.setStoreTermVectors(true);
-    fieldType.setStoreTermVectorPositions(true);
-    fieldType.setStoreTermVectorPayloads(true);
-    fieldType.setStoreTermVectorOffsets(true);
-    fieldType.freeze();
-    FIELD_TYPE_TV = fieldType;
-  }
 
   String[] texts = {
       "Hello this is a piece of text that is very long and contains too much preamble and the meat is really here which says kennedy has been shot",
@@ -150,8 +132,8 @@
 
 
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String storedField = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String storedField = doc.getString(FIELD_NAME);
 
       TokenStream stream = TokenSources.getAnyTokenStream(searcher
           .getIndexReader(), hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
@@ -178,8 +160,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(scorer);
 
-    StoredDocument doc = searcher.doc(hits.scoreDocs[0].doc);
-    String storedField = doc.get(FIELD_NAME);
+    Document doc = searcher.doc(hits.scoreDocs[0].doc);
+    String storedField = doc.getString(FIELD_NAME);
 
     TokenStream stream = TokenSources.getAnyTokenStream(searcher
         .getIndexReader(), hits.scoreDocs[0].doc, FIELD_NAME, doc, analyzer);
@@ -189,7 +171,7 @@
     assertEquals("Hello this is a piece of text that is <B>very</B> <B>long</B> and contains too much preamble and the meat is really here which says kennedy has been shot", fragment);
     
     doc = searcher.doc(hits.scoreDocs[1].doc);
-    storedField = doc.get(FIELD_NAME);
+    storedField = doc.getString(FIELD_NAME);
 
     stream = TokenSources.getAnyTokenStream(searcher
         .getIndexReader(), hits.scoreDocs[1].doc, FIELD_NAME, doc, analyzer);
@@ -232,8 +214,8 @@
     QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
     Highlighter highlighter = new Highlighter(scorer);
 
-    StoredDocument doc = searcher.doc(hits.scoreDocs[0].doc);
-    String storedField = doc.get(FIELD_NAME);
+    Document doc = searcher.doc(hits.scoreDocs[0].doc);
+    String storedField = doc.getString(FIELD_NAME);
 
     TokenStream stream = TokenSources.getAnyTokenStream(searcher
         .getIndexReader(), hits.scoreDocs[0].doc, FIELD_NAME, doc, analyzer);
@@ -243,7 +225,7 @@
     assertEquals("Hello this is a piece of text that is <B>very</B> <B>long</B> and contains too much preamble and the meat is really here which says kennedy has been shot", fragment);
     
     doc = searcher.doc(hits.scoreDocs[1].doc);
-    storedField = doc.get(FIELD_NAME);
+    storedField = doc.getString(FIELD_NAME);
 
     stream = TokenSources.getAnyTokenStream(searcher
         .getIndexReader(), hits.scoreDocs[1].doc, FIELD_NAME, doc, analyzer);
@@ -315,7 +297,7 @@
     Highlighter highlighter = new Highlighter(scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
+      String text = searcher.doc(hits.scoreDocs[i].doc).getString(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -393,8 +375,8 @@
     Highlighter highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -423,8 +405,8 @@
     highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -453,8 +435,8 @@
     highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -479,8 +461,8 @@
     Highlighter highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -504,8 +486,8 @@
     Highlighter highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -529,10 +511,9 @@
     Highlighter highlighter = new Highlighter(this, scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
-
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -547,7 +528,7 @@
   
   public void testNumericRangeQuery() throws Exception {
     // doesn't currently highlight, but make sure it doesn't cause exception either
-    query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
+    query = new ConstantScoreQuery(reader.getFieldTypes().newIntRangeFilter(NUMERIC_FIELD_NAME, 2, true, 6, true));
     searcher = newSearcher(reader);
     hits = searcher.search(query, 100);
     int maxNumFragmentsRequired = 2;
@@ -615,7 +596,7 @@
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = "parent document";
-      StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
       
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -641,8 +622,8 @@
     highlighter.setTextFragmenter(new SimpleFragmenter(40));
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -664,8 +645,8 @@
     int maxNumFragmentsRequired = 2;
 
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
@@ -695,8 +676,8 @@
     Highlighter highlighter = new Highlighter(this, scorer);
   
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5));
@@ -719,7 +700,7 @@
     highlighter = new Highlighter(this, scorer);
 
     for (int i = 0; i < hits.totalHits; i++) {
-      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
+      String text = searcher.doc(hits.scoreDocs[i].doc).getString(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20));
@@ -750,8 +731,8 @@
     Highlighter highlighter = new Highlighter(this,scorer);
     
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -822,8 +803,8 @@
     highlighter.setTextFragmenter(new SimpleFragmenter(40));
     int maxNumFragmentsRequired = 2;
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -1017,9 +998,10 @@
     hits = searcher.search(query, null, 1000);
 
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
-      TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);      int maxNumFragmentsRequired = 2;
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(HighlighterTest.FIELD_NAME);
+      int maxNumFragmentsRequired = 2;
+      TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
       String fragmentSeparator = "...";
       QueryScorer scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME);
 
@@ -1041,8 +1023,8 @@
     numHighlights = 0;
 
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(HighlighterTest.FIELD_NAME);
       TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
@@ -1066,9 +1048,10 @@
     numHighlights = 0;
 
     for (int i = 0; i < hits.totalHits; i++) {
-      final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-      String text = doc.get(FIELD_NAME);
-      TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);      int maxNumFragmentsRequired = 2;
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String text = doc.getString(HighlighterTest.FIELD_NAME);
+      int maxNumFragmentsRequired = 2;
+      TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
       String fragmentSeparator = "...";
       QueryScorer scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME);
 
@@ -1240,8 +1223,8 @@
         doSearching(new TermQuery(new Term(FIELD_NAME, "kennedy")));
         numHighlights = 0;
         for (int i = 0; i < hits.totalHits; i++) {
-          final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-          String text = doc.get(FIELD_NAME);
+          Document doc = searcher.doc(hits.scoreDocs[i].doc);
+          String text = doc.getString(FIELD_NAME);
           TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
@@ -1255,8 +1238,8 @@
 
         numHighlights = 0;
         for (int i = 0; i < hits.totalHits; i++) {
-          final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-          String text = doc.get(FIELD_NAME);
+          Document doc = searcher.doc(hits.scoreDocs[i].doc);
+          String text = doc.getString(FIELD_NAME);
           TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
@@ -1267,8 +1250,8 @@
 
         numHighlights = 0;
         for (int i = 0; i < hits.totalHits; i++) {
-          final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-          String text = doc.get(FIELD_NAME);
+          Document doc = searcher.doc(hits.scoreDocs[i].doc);
+          String text = doc.getString(FIELD_NAME);
           TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
@@ -1376,7 +1359,7 @@
         // new Highlighter(HighlighterTest.this, new QueryTermScorer(query));
 
         for (int i = 0; i < hits.totalHits; i++) {
-          String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
+          String text = searcher.doc(hits.scoreDocs[i].doc).getString(FIELD_NAME);
           TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
@@ -1399,8 +1382,8 @@
         doSearching(new TermQuery(new Term(FIELD_NAME, "kennedy")));
 
         for (int i = 0; i < hits.totalHits; i++) {
-          final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-          String text = doc.get(FIELD_NAME);
+          Document doc = searcher.doc(hits.scoreDocs[i].doc);
+          String text = doc.getString(FIELD_NAME);
           TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
@@ -1552,8 +1535,8 @@
         int maxNumFragmentsRequired = 3;
 
         for (int i = 0; i < hits.totalHits; i++) {
-          final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-          String text = doc.get(FIELD_NAME);
+          Document doc = searcher.doc(hits.scoreDocs[i].doc);
+          String text = doc.getString(FIELD_NAME);
           TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, HighlighterTest.this, false);
 
@@ -1885,12 +1868,18 @@
     searchIndex();
   }
   
+  private Document doc(IndexWriter writer, String f, String v) {
+    Document doc = writer.newDocument();
+    doc.addLargeText(f, v);
+    return doc;
+  }
+  
   private void makeIndex() throws IOException {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
-    writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
-    writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
-    writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
-    writer.addDocument( doc( "t_text1", "more random words for second field" ) );
+    writer.addDocument( doc( writer, "t_text1", "random words for highlighting tests del" ) );
+    writer.addDocument( doc( writer, "t_text1", "more random words for second field del" ) );
+    writer.addDocument( doc( writer, "t_text1", "random words for highlighting tests del" ) );
+    writer.addDocument( doc( writer, "t_text1", "more random words for second field" ) );
     writer.forceMerge(1);
     writer.close();
   }
@@ -1915,8 +1904,8 @@
 
     TopDocs hits = searcher.search(query, null, 10);
     for( int i = 0; i < hits.totalHits; i++ ){
-      StoredDocument doc = searcher.doc( hits.scoreDocs[i].doc );
-      String result = h.getBestFragment( a, "t_text1", doc.get( "t_text1" ));
+      Document doc = searcher.doc(hits.scoreDocs[i].doc);
+      String result = h.getBestFragment( a, "t_text1", doc.getString("t_text1"));
       if (VERBOSE) System.out.println("result:" +  result);
       assertEquals("more <B>random</B> words for second field", result);
     }
@@ -1928,11 +1917,16 @@
     final String text = "random words and words";//"words" at positions 1 & 4
 
     Analyzer analyzer = new MockPayloadAnalyzer();//sets payload to "pos: X" (where X is position #)
+    Directory dir = newDirectory(); 
     try (IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer))) {
       writer.deleteAll();
-      Document doc = new Document();
-
-      doc.add(new Field(FIELD_NAME, text, fieldType));
+      Document doc = writer.newDocument();
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors(FIELD_NAME);
+      fieldTypes.enableTermVectorPositions(FIELD_NAME);
+      fieldTypes.enableTermVectorOffsets(FIELD_NAME);
+      fieldTypes.enableTermVectorPayloads(FIELD_NAME);
+      doc.addLargeText(FIELD_NAME, text);
       writer.addDocument(doc);
       writer.commit();
     }
@@ -1953,6 +1947,7 @@
       String result = h.getBestFragment(stream, text);
       assertEquals("random <B>words</B> and words", result);//only highlight first "word"
     }
+    dir.close();
   }
   
   /*
@@ -2002,7 +1997,7 @@
   public void assertExpectedHighlightCount(final int maxNumFragmentsRequired,
       final int expectedHighlights) throws Exception {
     for (int i = 0; i < hits.totalHits; i++) {
-      String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
+      String text = searcher.doc(hits.scoreDocs[i].doc).getString(FIELD_NAME);
       TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
@@ -2029,36 +2024,37 @@
     //Most tests use this setup:
     analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
     ramDir = newDirectory();
-    fieldType = random().nextBoolean() ? FIELD_TYPE_TV : TextField.TYPE_STORED;
     IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(analyzer));
-
+    if (random().nextBoolean()) {
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors(FIELD_NAME);
+      fieldTypes.enableTermVectorPositions(FIELD_NAME);
+      fieldTypes.enableTermVectorOffsets(FIELD_NAME);
+      fieldTypes.enableTermVectorPayloads(FIELD_NAME);
+    }
     for (String text : texts) {
-      writer.addDocument(doc(FIELD_NAME, text));
+      writer.addDocument(doc(writer, FIELD_NAME, text));
     }
 
     // a few tests need other docs...:
-    Document doc = new Document();
-    doc.add(new IntField(NUMERIC_FIELD_NAME, 1, Field.Store.NO));
-    doc.add(new StoredField(NUMERIC_FIELD_NAME, 1));
+    Document doc = writer.newDocument();
+    doc.addInt(NUMERIC_FIELD_NAME, 1);
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new IntField(NUMERIC_FIELD_NAME, 3, Field.Store.NO));
-    doc.add(new StoredField(NUMERIC_FIELD_NAME, 3));
+    doc = writer.newDocument();
+    doc.addInt(NUMERIC_FIELD_NAME, 3);
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new IntField(NUMERIC_FIELD_NAME, 5, Field.Store.NO));
-    doc.add(new StoredField(NUMERIC_FIELD_NAME, 5));
+    doc = writer.newDocument();
+    doc.addInt(NUMERIC_FIELD_NAME, 5);
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new IntField(NUMERIC_FIELD_NAME, 7, Field.Store.NO));
-    doc.add(new StoredField(NUMERIC_FIELD_NAME, 7));
+    doc = writer.newDocument();
+    doc.addInt(NUMERIC_FIELD_NAME, 7);
     writer.addDocument(doc);
 
-    Document childDoc = doc(FIELD_NAME, "child document");
-    Document parentDoc = doc(FIELD_NAME, "parent document");
+    Document childDoc = doc(writer, FIELD_NAME, "child document");
+    Document parentDoc = doc(writer, FIELD_NAME, "parent document");
     writer.addDocuments(Arrays.asList(childDoc, parentDoc));
     
     writer.forceMerge(1);
@@ -2077,12 +2073,6 @@
     super.tearDown();
   }
 
-  private Document doc(String name, String value) {
-    Document d = new Document();
-    d.add(new Field(name, value, fieldType));//fieldType is randomly chosen for term vectors in setUp
-    return d;
-  }
-
   private static Token createToken(String term, int start, int offset)
   {
     return new Token(term, start, offset);
@@ -2259,8 +2249,8 @@
         throws Exception {
 
       for (int i = 0; i < hits.totalHits; i++) {
-        final StoredDocument doc = searcher.doc(hits.scoreDocs[i].doc);
-        String text = doc.get(HighlighterTest.FIELD_NAME);
+        Document doc = searcher.doc(hits.scoreDocs[i].doc);
+        String text = doc.getString(HighlighterTest.FIELD_NAME);
         int maxNumFragmentsRequired = 2;
         String fragmentSeparator = "...";
         Scorer scorer = null;
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
index b231203..63498b4 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
@@ -29,9 +29,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
@@ -96,12 +94,12 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(null));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new OverlappingTokenStream());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -139,13 +137,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(null));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorOffsets(true);
-      customType.setStoreTermVectorPositions(true);
-      document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new OverlappingTokenStream());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -183,12 +181,12 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(null));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new OverlappingTokenStream());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -226,12 +224,12 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(null));
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorOffsets(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorOffsets(true);
-      document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new OverlappingTokenStream());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -268,13 +266,13 @@
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(null));
+
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.enableTermVectors(FIELD);
+    fieldTypes.enableTermVectorPositions(FIELD);
     try {
-      final Document document = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorOffsets(false);
-      customType.setStoreTermVectorPositions(true);
-      document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
+      final Document document = indexWriter.newDocument();
+      document.addLargeText(FIELD, new OverlappingTokenStream());
       indexWriter.addDocument(document);
     } finally {
       indexWriter.close();
@@ -310,11 +308,6 @@
   public void testPayloads() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    FieldType myFieldType = new FieldType(TextField.TYPE_NOT_STORED);
-    myFieldType.setStoreTermVectors(true);
-    myFieldType.setStoreTermVectorOffsets(true);
-    myFieldType.setStoreTermVectorPositions(true);
-    myFieldType.setStoreTermVectorPayloads(true);
 
     curOffset = 0;
 
@@ -325,8 +318,13 @@
       getToken("high")
     };
 
-    Document doc = new Document();
-    doc.add(new Field("field", new CannedTokenStream(tokens), myFieldType));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorPayloads("field");
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", new CannedTokenStream(tokens));
     writer.addDocument(doc);
   
     IndexReader reader = writer.getReader();
@@ -394,15 +392,19 @@
 
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    FieldType myFieldType = new FieldType(TextField.TYPE_NOT_STORED);
-    myFieldType.setStoreTermVectors(true);
-    myFieldType.setStoreTermVectorOffsets(true);
-    myFieldType.setStoreTermVectorPositions(storeTermVectorPositions);
-    //payloads require positions; it will throw an error otherwise
-    myFieldType.setStoreTermVectorPayloads(storeTermVectorPositions && random().nextBoolean());
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    if (storeTermVectorPositions) {
+      fieldTypes.enableTermVectorPositions("field");
+      //payloads require positions; it will throw an error otherwise
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPayloads("field");
+      }
+    }
 
-    Document doc = new Document();
-    doc.add(new Field("field", rTokenStream, myFieldType));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", rTokenStream);
     writer.addDocument(doc);
 
     IndexReader reader = writer.getReader();
@@ -413,12 +415,12 @@
 
     //sometimes check payloads
     PayloadAttribute payloadAttribute = null;
-    if (myFieldType.storeTermVectorPayloads() && usually()) {
+    if (fieldTypes.getTermVectorPayloads("field") && usually()) {
       payloadAttribute = vectorTokenStream.addAttribute(PayloadAttribute.class);
     }
     assertTokenStreamContents(vectorTokenStream,
         rTokenStream.getTerms(), rTokenStream.getStartOffsets(), rTokenStream.getEndOffsets(),
-        myFieldType.storeTermVectorPositions() ? rTokenStream.getPositionsIncrements() : null);
+                              fieldTypes.getTermVectorPositions("field") ? rTokenStream.getPositionsIncrements() : null);
     //test payloads
     if (payloadAttribute != null) {
       vectorTokenStream.reset();
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java
index 8e1e26e..0147b9a 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java
@@ -21,15 +21,10 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermFilter;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
@@ -42,6 +37,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.RegexpQuery;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermFilter;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.WildcardQuery;
@@ -69,15 +65,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -121,15 +114,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -173,15 +163,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -225,15 +212,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -286,15 +270,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -396,15 +377,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -449,15 +427,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
 
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
 
     IndexReader ir = iw.getReader();
@@ -492,15 +467,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
 
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
 
     IndexReader ir = iw.getReader();
@@ -533,15 +505,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -575,15 +544,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -616,15 +582,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -658,15 +621,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -700,15 +660,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -743,15 +700,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -787,15 +741,10 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("Test a one sentence document.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
-    
+
     IndexReader ir = iw.getReader();
     iw.close();
     
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
index 177fcc0..24dcd1e 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
@@ -29,15 +29,10 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -59,15 +54,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
-    body.setStringValue("Highlighting the first term. Hope it works.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -129,15 +121,10 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    final FieldType fieldType = new FieldType(TextField.TYPE_STORED);
-    fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    final Field body = new Field("body", bodyText, fieldType);
-    
-    Document doc = new Document();
-    doc.add(body);
-    
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", bodyText);
     iw.addDocument(doc);
-    
+
     IndexReader ir = iw.getReader();
     iw.close();
     
@@ -160,19 +147,14 @@
   // simple test highlighting last word.
   public void testHighlightLastWord() throws Exception {
     Directory dir = newDirectory();
-    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriterConfig iwc = newIndexWriterConfig();
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test");
     iw.addDocument(doc);
-    
+
     IndexReader ir = iw.getReader();
     iw.close();
     
@@ -197,15 +179,12 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.");
     iw.addDocument(doc);
-    body.setStringValue("Test a one sentence document.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Test a one sentence document.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -232,17 +211,13 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
-    
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("body");
+
+    Document doc = iw.newDocument();
     for(int i = 0; i < 3 ; i++) {
-      Field body = new Field("body", "", offsetsType);
-      body.setStringValue("This is a multivalued field");
-      doc.add(body);
+      doc.addLargeText("body", "This is a multivalued field");
     }
-    
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -268,19 +243,14 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Field title = new Field("title", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    doc.add(title);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
-    title.setStringValue("I am hoping for the best.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    doc.addLargeText("title", "I am hoping for the best.");
     iw.addDocument(doc);
-    body.setStringValue("Highlighting the first term. Hope it works.");
-    title.setStringValue("But best may not be good enough.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
+    doc.addLargeText("title", "But best may not be good enough.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -309,17 +279,14 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
-    body.setStringValue("Highlighting the first term. Hope it works.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
     iw.addDocument(doc);
-    
+
     IndexReader ir = iw.getReader();
     iw.close();
     
@@ -345,16 +312,13 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
-    body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "This test is another test. Not a good sentence. Test test test test.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -379,20 +343,17 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
-    FieldType positionsType = new FieldType(TextField.TYPE_STORED);
-    positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    Field body = new Field("body", "", positionsType);
-    Field title = new StringField("title", "", Field.Store.YES);
-    Document doc = new Document();
-    doc.add(body);
-    doc.add(title);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
-    title.setStringValue("test");
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableHighlighting("body");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    doc.addAtom("title", "test");
     iw.addDocument(doc);
-    body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
-    title.setStringValue("test");
+
+    doc = iw.newDocument();
+    doc.addLargeText("body", "This test is another test. Not a good sentence. Test test test test.");
+    doc.addAtom("title", "test");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -434,12 +395,10 @@
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
     
-    FieldType positionsType = new FieldType(TextField.TYPE_STORED);
-    positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", text, positionsType);
-    Document document = new Document();
-    document.add(body);
+    Document document = iw.newDocument();
+    document.addLargeText("body", text);
     iw.addDocument(document);
+
     IndexReader ir = iw.getReader();
     iw.close();
     IndexSearcher searcher = newSearcher(ir);
@@ -464,12 +423,11 @@
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
-    FieldType positionsType = new FieldType(TextField.TYPE_STORED);
-    positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", text, positionsType);
-    Document document = new Document();
-    document.add(body);
+
+    Document document = iw.newDocument();
+    document.addLargeText("body", text);
     iw.addDocument(document);
+
     IndexReader ir = iw.getReader();
     iw.close();
     IndexSearcher searcher = newSearcher(ir);
@@ -494,12 +452,10 @@
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
-    FieldType positionsType = new FieldType(TextField.TYPE_STORED);
-    positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", text, positionsType);
-    Document document = new Document();
-    document.add(body);
+    Document document = iw.newDocument();
+    document.addLargeText("body", text);
     iw.addDocument(document);
+
     IndexReader ir = iw.getReader();
     iw.close();
     IndexSearcher searcher = newSearcher(ir);
@@ -524,13 +480,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -553,12 +504,9 @@
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
-    FieldType positionsType = new FieldType(TextField.TYPE_STORED);
-    positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "This sentence has both terms.  This sentence has only terms.", positionsType);
-    Document document = new Document();
-    document.add(body);
-    iw.addDocument(document);
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This sentence has both terms.  This sentence has only terms.");
+    iw.addDocument(doc);
     IndexReader ir = iw.getReader();
     iw.close();
     IndexSearcher searcher = newSearcher(ir);
@@ -583,13 +531,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -619,15 +562,11 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
-    body.setStringValue("Highlighting the first term. Hope it works.");
+    doc = iw.newDocument();
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -657,13 +596,9 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    Document doc = new Document();
-
-    FieldType offsetsType = new FieldType(TextField.TYPE_NOT_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Document doc = iw.newDocument();
     final String text = "This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.";
-    Field body = new Field("body", text, offsetsType);
-    doc.add(body);
+    doc.addLargeText("body", text);
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -706,12 +641,9 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
+    Document doc = iw.newDocument();
 
-    Field body = new Field("body", "test this is.  another sentence this test has.  far away is that planet.", offsetsType);
-    doc.add(body);
+    doc.addLargeText("body", "test this is.  another sentence this test has.  far away is that planet.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -737,12 +669,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
-
-    Field body = new Field("body", "test this is.  another sentence this test has.  far away is that planet.", offsetsType);
-    doc.add(body);
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "test this is.  another sentence this test has.  far away is that planet.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -773,12 +701,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
-
-    Field body = new Field("body", "test this is.  another sentence this test has.  far away is that planet.", offsetsType);
-    doc.add(body);
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "test this is.  another sentence this test has.  far away is that planet.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -809,12 +733,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
-
-    Field body = new Field("body", "test this is.  another sentence this test has.  far away is that planet.", offsetsType);
-    doc.add(body);
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "test this is.  another sentence this test has.  far away is that planet.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -838,16 +758,13 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-
-    Document doc = new Document();
-    doc.add(new Field("body", "   ", offsetsType));
-    doc.add(new Field("id", "id", offsetsType));
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "   ");
+    doc.addLargeText("id", "id");
     iw.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new Field("body", "something", offsetsType));
+    doc = iw.newDocument();
+    doc.addLargeText("body", "something");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -873,17 +790,14 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
 
-    Document doc = new Document();
-    doc.add(new Field("body", "", offsetsType));
-    doc.add(new Field("id", "id", offsetsType));
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "");
+    doc.addLargeText("id", "id");
     iw.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new Field("body", "something", offsetsType));
+    doc = iw.newDocument();
+    doc.addLargeText("body", "something");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -910,18 +824,15 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-
     int numDocs = atLeast(100);
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       String content = "the answer is " + i;
       if ((i & 1) == 0) {
         content += " some more terms";
       }
-      doc.add(new Field("body", content, offsetsType));
-      doc.add(newStringField("id", ""+i, Field.Store.YES));
+      doc.addLargeText("body", content);
+      doc.addAtom("id", ""+i);
       iw.addDocument(doc);
 
       if (random().nextInt(10) == 2) {
@@ -941,8 +852,8 @@
     String snippets[] = highlighter.highlight("body", query, searcher, hits);
     assertEquals(numDocs, snippets.length);
     for(int hit=0;hit<numDocs;hit++) {
-      StoredDocument doc = searcher.doc(hits.scoreDocs[hit].doc);
-      int id = Integer.parseInt(doc.get("id"));
+      Document doc = searcher.doc(hits.scoreDocs[hit].doc);
+      int id = Integer.parseInt(doc.getString("id"));
       String expected = "the <b>answer</b> is " + id;
       if ((id  & 1) == 0) {
         expected += " some more terms";
@@ -960,16 +871,9 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Field title = new Field("title", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    doc.add(title);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
-    title.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    doc.addLargeText("title", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -995,13 +899,8 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from <i>postings</i>. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from <i>postings</i>. Feel free to ignore.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -1032,19 +931,12 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Document doc = new Document();
-    
-    Field body1 = new Field("body", "", offsetsType);
-    body1.setStringValue("This is a multivalued field");
-    doc.add(body1);
-    
-    Field body2 = new Field("body", "", offsetsType);
-    body2.setStringValue("This is something different");
-    doc.add(body2);
-    
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("body");
+
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a multivalued field");
+    doc.addLargeText("body", "This is something different");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -1076,15 +968,10 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
     iw.addDocument(doc);
-    
+
     IndexReader ir = iw.getReader();
     iw.close();
     
@@ -1118,4 +1005,79 @@
     ir.close();
     dir.close();
   }
+
+  public void testWithFieldTypes() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableHighlighting("yes");
+    fieldTypes.disableFastRanges("yes");
+    
+    Document doc = iw.newDocument();
+    doc.addShortText("title", "highlighting on this title field should.  Be entire.");
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    doc.addAtom("not", "no");
+    doc.addAtom("yes", "highlighting");
+    iw.addDocument(doc);
+
+    doc = iw.newDocument();
+    doc.addShortText("title", "Highlighting the first term. Hope it works.");
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
+    doc.addAtom("not", "no");
+    doc.addAtom("yes", "highlighting");
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    // body field should be snippets:
+    IndexSearcher searcher = newSearcher(ir);
+    PostingsHighlighter highlighter = new PostingsHighlighter(searcher.getFieldTypes());
+    Query query = new TermQuery(new Term("body", "highlighting"));
+    TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+    assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+    // title field should be "whole" highlighted:
+    query = new TermQuery(new Term("title", "highlighting"));
+    topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    snippets = highlighter.highlight("title", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("<b>highlighting</b> on this title field should.  Be entire.", snippets[0]);
+    assertEquals("<b>Highlighting</b> the first term. Hope it works.", snippets[1]);
+
+    // this field doesn't exist
+    try {
+      highlighter.highlight("nofield", query, searcher, topDocs);
+      fail("did not hit exception");
+    } catch (Exception e) {
+      assertEquals(e.getMessage(), "unknown field \"nofield\"; valid fields: [$fieldnames, body, not, title, yes]");
+    }
+
+    // we didn't enable highlighting for this atom field
+    try {
+      highlighter.highlight("not", query, searcher, topDocs);
+      fail("did not hit exception");
+    } catch (Exception e) {
+      assertEquals(e.getMessage(), "field=\"not\" was indexed with FieldTypes.enableHighlighting");
+    }
+    
+    // we did enable highlighting for this atom field:
+    query = new TermQuery(new Term("yes", "highlighting"));
+    topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    snippets = highlighter.highlight("yes", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("<b>highlighting</b>", snippets[0]);
+    assertEquals("<b>highlighting</b>", snippets[1]);
+
+    ir.close();
+    dir.close();
+  }
 }
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java
index df3d7b6..fae72b2 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java
@@ -24,11 +24,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -63,13 +58,6 @@
     
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
-    Document document = new Document();
-    Field id = new StringField("id", "", Field.Store.NO);
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    document.add(id);
-    document.add(body);
     
     for (int i = 0; i < numDocs; i++) {
       StringBuilder bodyText = new StringBuilder();
@@ -77,8 +65,9 @@
       for (int j = 0; j < numSentences; j++) {
         bodyText.append(newSentence(random(), maxSentenceLength));
       }
-      body.setStringValue(bodyText.toString());
-      id.setStringValue(Integer.toString(i));
+      Document document = iw.newDocument();
+      document.addLargeText("body", bodyText.toString());
+      document.addAtom("id", Integer.toString(i));
       iw.addDocument(document);
     }
     
@@ -251,14 +240,10 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This is a test.  This test is a better test but the sentence is excruiatingly long, " + 
-                        "you have no idea how painful it was for me to type this long sentence into my IDE.");
+    Document doc = iw.newDocument();
+    doc.addLargeText("body",
+                    "This is a test.  This test is a better test but the sentence is excruiatingly long, " + 
+                    "you have no idea how painful it was for me to type this long sentence into my IDE.");
     iw.addDocument(doc);
     
     IndexReader ir = iw.getReader();
@@ -289,13 +274,9 @@
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
     
-    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
-    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    Field body = new Field("body", "", offsetsType);
-    Document doc = new Document();
-    doc.add(body);
-    
-    body.setStringValue("This has only foo foo. " + 
+    Document doc = iw.newDocument();
+    doc.addLargeText("body",
+                     "This has only foo foo. " + 
                         "On the other hand this sentence contains both foo and bar. " + 
                         "This has only bar bar bar bar bar bar bar bar bar bar bar bar.");
     iw.addDocument(doc);
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
index 385fa74..34c7710 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
@@ -27,9 +27,7 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -349,13 +347,16 @@
   // make 1 doc with multi valued field
   protected void make1dmfIndex( Analyzer analyzer, String... values ) throws Exception {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer).setOpenMode(OpenMode.CREATE));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.setMultiValued(F);
+
+    Document doc = writer.newDocument();
+
     for( String value: values ) {
-      doc.add( new Field( F, value, customType) );
+      doc.addLargeText(F, value);
     }
     writer.addDocument( doc );
     writer.close();
@@ -366,13 +367,14 @@
   // make 1 doc with multi valued & not analyzed field
   protected void make1dmfIndexNA( String... values ) throws Exception {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzerK).setOpenMode(OpenMode.CREATE));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.setMultiValued(F);
+    Document doc = writer.newDocument();
     for( String value: values ) {
-      doc.add( new Field( F, value, customType));
+      doc.addLargeText(F, value);
       //doc.add( new Field( F, value, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) );
     }
     writer.addDocument( doc );
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
index e3f697f..d252a3e 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
@@ -29,10 +29,7 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -60,15 +57,12 @@
   public void testSimpleHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field field = new Field("field", "This is a test where foo is highlighed and should be highlighted", type);
-    
-    doc.add(field);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "This is a test where foo is highlighed and should be highlighted");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     
@@ -90,15 +84,13 @@
   public void testPhraseHighlightLongTextTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field text = new Field("text", 
-        "Netscape was the general name for a series of web browsers originally produced by Netscape Communications Corporation, now a subsidiary of AOL The original browser was once the dominant browser in terms of usage share, but as a result of the first browser war it lost virtually all of its share to Internet Explorer Netscape was discontinued and support for all Netscape browsers and client products was terminated on March 1, 2008 Netscape Navigator was the name of Netscape\u0027s web browser from versions 1.0 through 4.8 The first beta release versions of the browser were released in 1994 and known as Mosaic and then Mosaic Netscape until a legal challenge from the National Center for Supercomputing Applications (makers of NCSA Mosaic, which many of Netscape\u0027s founders used to develop), led to the name change to Netscape Navigator The company\u0027s name also changed from Mosaic Communications Corporation to Netscape Communications Corporation The browser was easily the most advanced...", type);
-    doc.add(text);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("text");
+    fieldTypes.enableTermVectorPositions("text");
+    fieldTypes.enableTermVectorOffsets("text");
+    Document doc = writer.newDocument();
+    doc.addLargeText("text", 
+        "Netscape was the general name for a series of web browsers originally produced by Netscape Communications Corporation, now a subsidiary of AOL The original browser was once the dominant browser in terms of usage share, but as a result of the first browser war it lost virtually all of its share to Internet Explorer Netscape was discontinued and support for all Netscape browsers and client products was terminated on March 1, 2008 Netscape Navigator was the name of Netscape\u0027s web browser from versions 1.0 through 4.8 The first beta release versions of the browser were released in 1994 and known as Mosaic and then Mosaic Netscape until a legal challenge from the National Center for Supercomputing Applications (makers of NCSA Mosaic, which many of Netscape\u0027s founders used to develop), led to the name change to Netscape Navigator The company\u0027s name also changed from Mosaic Communications Corporation to Netscape Communications Corporation The browser was easily the most advanced...");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -136,17 +128,15 @@
   public void testPhraseHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field longTermField = new Field("long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted", type);
-    Field noLongTermField = new Field("no_long_term", "This is a test where foo is highlighed and should be highlighted", type);
-
-    doc.add(longTermField);
-    doc.add(noLongTermField);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"long_term", "no_long_term"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document doc = writer.newDocument();
+    doc.addLargeText("long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted");
+    doc.addLargeText("no_long_term", "This is a test where foo is highlighed and should be highlighted");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -263,12 +253,13 @@
   public void testBoostedPhraseHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer( random() ) ) );
-    Document doc = new Document();
-    FieldType type = new FieldType( TextField.TYPE_STORED  );
-    type.setStoreTermVectorOffsets( true );
-    type.setStoreTermVectorPositions( true );
-    type.setStoreTermVectors( true );
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"text"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document doc = writer.newDocument();
     StringBuilder text = new StringBuilder();
     text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk ");
     for ( int i = 0; i<10; i++ ) {
@@ -278,7 +269,7 @@
     for ( int i = 0; i<10; i++ ) {
       text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk ");
     }
-    doc.add( new Field( "text", text.toString().trim(), type ) );
+    doc.addLargeText("text", text.toString().trim() );
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -308,11 +299,12 @@
   public void testCommonTermsQueryHighlight() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)));
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
     String[] texts = {
         "Hello this is a piece of text that is very long and contains too much preamble and the meat is really here which says kennedy has been shot",
         "This piece of text refers to Kennedy at the beginning then has a longer piece of text that is very long in the middle and finally ends with another reference to Kennedy",
@@ -320,9 +312,8 @@
         "This text has a typo in referring to Keneddy",
         "wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" };
     for (int i = 0; i < texts.length; i++) {
-      Document doc = new Document();
-      Field field = new Field("field", texts[i], type);
-      doc.add(field);
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", texts[i]);
       writer.addDocument(doc);
     }
     CommonTermsQuery query = new CommonTermsQuery(Occur.MUST, Occur.SHOULD, 2);
@@ -462,16 +453,18 @@
   public void testMultiValuedSortByScore() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer( random() ) ) );
-    Document doc = new Document();
-    FieldType type = new FieldType( TextField.TYPE_STORED );
-    type.setStoreTermVectorOffsets( true );
-    type.setStoreTermVectorPositions( true );
-    type.setStoreTermVectors( true );
-    type.freeze();
-    doc.add( new Field( "field", "zero if naught", type ) ); // The first two fields contain the best match
-    doc.add( new Field( "field", "hero of legend", type ) ); // but total a lower score (3) than the bottom
-    doc.add( new Field( "field", "naught of hero", type ) ); // two fields (4)
-    doc.add( new Field( "field", "naught of hero", type ) );
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.setMultiValued(fieldName);
+    }
+    Document doc = writer.newDocument();
+    doc.addLargeText( "field", "zero if naught"); // The first two fields contain the best match
+    doc.addLargeText( "field", "hero of legend" ); // but total a lower score (3) than the bottom
+    doc.addLargeText( "field", "naught of hero" ); // two fields (4)
+    doc.addLargeText( "field", "naught of hero" );
     writer.addDocument(doc);
 
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
@@ -510,12 +503,14 @@
   public void testBooleanPhraseWithSynonym() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_NOT_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.disableHighlighting(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document doc = writer.newDocument();
     Token syn = new Token("httpwwwfacebookcom", 6, 29);
     syn.setPositionIncrement(0);
     CannedTokenStream ts = new CannedTokenStream(
@@ -526,9 +521,7 @@
         new Token("facebook", 17, 25),
         new Token("com", 26, 29)
     );
-    Field field = new Field("field", ts, type);
-    doc.add(field);
-    doc.add(new StoredField("field", "Test: http://www.facebook.com"));
+    doc.addLargeText("field", "Test: http://www.facebook.com", ts, 1.0f);
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     
@@ -542,6 +535,7 @@
     pq.add(new Term("field", "www"));
     pq.add(new Term("field", "facebook"));
     pq.add(new Term("field", "com"));
+    TopDocs hits = newSearcher(reader).search(pq, 1);
     FieldQuery fieldQuery  = highlighter.getFieldQuery(pq, reader);
     String[] bestFragments = highlighter.getBestFragments(fieldQuery, reader, docId, "field", 54, 1);
     assertEquals("<b>Test: http://www.facebook.com</b>", bestFragments[0]);
@@ -575,29 +569,7 @@
   }
 
   private void matchedFieldsTestCase( boolean useMatchedFields, boolean fieldMatch, String fieldValue, String expected, Query... queryClauses ) throws IOException {
-    Document doc = new Document();
-    FieldType stored = new FieldType( TextField.TYPE_STORED );
-    stored.setStoreTermVectorOffsets( true );
-    stored.setStoreTermVectorPositions( true );
-    stored.setStoreTermVectors( true );
-    stored.freeze();
-    FieldType matched = new FieldType( TextField.TYPE_NOT_STORED );
-    matched.setStoreTermVectorOffsets( true );
-    matched.setStoreTermVectorPositions( true );
-    matched.setStoreTermVectors( true );
-    matched.freeze();
-    doc.add( new Field( "field", fieldValue, stored ) );               // Whitespace tokenized with English stop words
-    doc.add( new Field( "field_exact", fieldValue, matched ) );        // Whitespace tokenized without stop words
-    doc.add( new Field( "field_super_exact", fieldValue, matched ) );  // Whitespace tokenized without toLower
-    doc.add( new Field( "field_characters", fieldValue, matched ) );   // Each letter is a token
-    doc.add( new Field( "field_tripples", fieldValue, matched ) );     // Every three letters is a token
-    doc.add( new Field( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
-      Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
-    doc.add( new Field( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
-          token( "der", 1, 0, 3 ),
-          token( "red", 0, 0, 3 )
-        ), matched ) );
-
+    Directory dir = newDirectory();
     final Map<String, Analyzer> fieldAnalyzers = new TreeMap<>();
     fieldAnalyzers.put( "field", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET ) );
     fieldAnalyzers.put( "field_exact", new MockAnalyzer( random() ) );
@@ -612,8 +584,31 @@
       }
     };
 
-    Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(analyzer));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    for(String fieldName : new String[] {"field", "field_exact", "field_super_exact", "field_characters", "field_tripples", "field_sliced", "field_der_red"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.setMultiValued(fieldName);
+      if (fieldName.equals("field") == false) {
+        fieldTypes.disableStored(fieldName);
+      }
+    }
+    Document doc = writer.newDocument();
+    doc.addLargeText( "field", fieldValue );               // Whitespace tokenized with English stop words
+    doc.addLargeText( "field_exact", fieldValue );        // Whitespace tokenized without stop words
+    doc.addLargeText( "field_super_exact", fieldValue );  // Whitespace tokenized without toLower
+    doc.addLargeText( "field_characters", fieldValue );   // Each letter is a token
+    doc.addLargeText( "field_tripples", fieldValue );     // Every three letters is a token
+    doc.addLargeText( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
+      Math.min( fieldValue.length() - 1 , 10 ) ) );
+    doc.addLargeText( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
+          token( "der", 1, 0, 3 ),
+          token( "red", 0, 0, 3 )
+        ) );
+
     writer.addDocument( doc );
 
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
index 70a7b87..e473bd9 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
@@ -17,16 +17,21 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
@@ -37,13 +42,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.TestUtil;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 public class SimpleFragmentsBuilderTest extends AbstractTestCase {
   
   public void test1TermIndex() throws Exception {
@@ -149,13 +147,13 @@
   
   protected void makeUnstoredIndex() throws Exception {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzerW).setOpenMode(OpenMode.CREATE));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
-    doc.add( new Field( F, "aaa", customType) );
-    //doc.add( new Field( F, "aaa", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) );
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.disableStored(F);
+    Document doc = writer.newDocument();
+    doc.addLargeText(F, "aaa");
     writer.addDocument( doc );
     writer.close();
     if (reader != null) reader.close();
@@ -238,10 +236,11 @@
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.setMultiValued(F);
 
     int numDocs = randomValues.length * 5;
     int numFields = 2 + random().nextInt(5);
@@ -250,7 +249,7 @@
     List<Document> documents = new ArrayList<>(numDocs);
     Map<String, Set<Integer>> valueToDocId = new HashMap<>();
     for (int i = 0; i < numDocs; i++) {
-      Document document = new Document();
+      Document document = writer.newDocument();
       String[][] fields = new String[numFields][numTerms];
       for (int j = 0; j < numFields; j++) {
         String[] fieldValues = new String[numTerms];
@@ -260,7 +259,7 @@
           fieldValues[k] = getRandomValue(randomValues, valueToDocId, i);
           builder.append(' ').append(fieldValues[k]);
         }
-        document.add(new Field(F, builder.toString(), customType));
+        document.addLargeText(F, builder.toString());
         fields[j] = fieldValues;
       }
       docs.add(new Doc(fields));
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
index bba2225..865e5e5 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
@@ -372,7 +372,7 @@
     @Override
     public int advance(int parentTarget) throws IOException {
 
-      //System.out.println("Q.advance parentTarget=" + parentTarget);
+      // System.out.println("Q.advance parentTarget=" + parentTarget);
       if (parentTarget == NO_MORE_DOCS) {
         return parentDoc = NO_MORE_DOCS;
       }
@@ -389,13 +389,13 @@
 
       prevParentDoc = parentBits.prevSetBit(parentTarget-1);
 
-      //System.out.println("  rolled back to prevParentDoc=" + prevParentDoc + " vs parentDoc=" + parentDoc);
+      // System.out.println("  rolled back to prevParentDoc=" + prevParentDoc + " vs parentDoc=" + parentDoc);
       assert prevParentDoc >= parentDoc;
       if (prevParentDoc > nextChildDoc) {
         nextChildDoc = childScorer.advance(prevParentDoc);
         // System.out.println("  childScorer advanced to child docID=" + nextChildDoc);
-      //} else {
-        //System.out.println("  skip childScorer advance");
+      } else {
+        // System.out.println("  skip childScorer advance");
       }
 
       // Parent & child docs are supposed to be orthogonal:
@@ -404,15 +404,21 @@
       }
 
       final int nd = nextDoc();
-      //System.out.println("  return nextParentDoc=" + nd);
+      // System.out.println("  return nextParentDoc=" + nd);
       return nd;
     }
 
     public Explanation explain(int docBase) throws IOException {
-      int start = docBase + prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
-      int end = docBase + parentDoc - 1; // -1 b/c parentDoc is parent doc
+      int start = prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
+      if (acceptDocs != null) {
+        // Skip deleted docs:
+        while (acceptDocs.get(start) == false) {
+          start++;
+        }
+      }
+      int end = parentDoc - 1; // -1 b/c parentDoc is parent doc
       return new ComplexExplanation(
-          true, score(), String.format(Locale.ROOT, "Score based on child doc range from %d to %d", start, end)
+          true, score(), String.format(Locale.ROOT, "Score based on child doc range from %d to %d", docBase+start, docBase+end)
       );
     }
 
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 63ba2bb..29c9b1d 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -37,28 +37,27 @@
 public class TestBlockJoin extends LuceneTestCase {
 
   // One resume...
-  private Document makeResume(String name, String country) {
-    Document resume = new Document();
-    resume.add(newStringField("docType", "resume", Field.Store.NO));
-    resume.add(newStringField("name", name, Field.Store.YES));
-    resume.add(newStringField("country", country, Field.Store.NO));
+  private Document makeResume(IndexWriter w, String name, String country) {
+    Document resume = w.newDocument();
+    resume.addAtom("docType", "resume");
+    resume.addAtom("name", name);
+    resume.addAtom("country", country);
     return resume;
   }
 
   // ... has multiple jobs
-  private Document makeJob(String skill, int year) {
-    Document job = new Document();
-    job.add(newStringField("skill", skill, Field.Store.YES));
-    job.add(new IntField("year", year, Field.Store.NO));
-    job.add(new StoredField("year", year));
+  private Document makeJob(IndexWriter w, String skill, int year) {
+    Document job = w.newDocument();
+    job.addAtom("skill", skill);
+    job.addInt("year", year);
     return job;
   }
 
   // ... has multiple qualifications
-  private Document makeQualification(String qualification, int year) {
-    Document job = new Document();
-    job.add(newStringField("qualification", qualification, Field.Store.YES));
-    job.add(new IntField("year", year, Field.Store.NO));
+  private Document makeQualification(IndexWriter w, String qualification, int year) {
+    Document job = w.newDocument();
+    job.addAtom("qualification", qualification);
+    job.addInt("year", year);
     return job;
   }
   
@@ -71,21 +70,21 @@
 
     final List<Document> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w, "java", 2007));
+    docs.add(makeJob(w, "python", 2010));
+    docs.add(makeResume(w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w, "ruby", 2005));
+    docs.add(makeJob(w, "java", 2006));
+    docs.add(makeResume(w, "Frank", "United States"));
     w.addDocuments(docs);
     w.commit();
     int num = atLeast(10); // produce a segment that doesn't have a value in the docType field
     for (int i = 0; i < num; i++) {
       docs.clear();
-      docs.add(makeJob("java", 2007));
+      docs.add(makeJob(w, "java", 2007));
       w.addDocuments(docs);
     }
     
@@ -93,11 +92,13 @@
     w.close();
     assertTrue(r.leaves().size() > 1);
     IndexSearcher s = new IndexSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
 
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
 
@@ -111,11 +112,11 @@
     assertEquals(1, results.totalGroupedHitCount);
     assertEquals(1, results.groups.length);
     final GroupDocs<Integer> group = results.groups[0];
-    StoredDocument childDoc = s.doc(group.scoreDocs[0].doc);
-    assertEquals("java", childDoc.get("skill"));
+    Document childDoc = s.doc(group.scoreDocs[0].doc);
+    assertEquals("java", childDoc.getString("skill"));
     assertNotNull(group.groupValue);
-    StoredDocument parentDoc = s.doc(group.groupValue);
-    assertEquals("Lisa", parentDoc.get("name"));
+    Document parentDoc = s.doc(group.groupValue);
+    assertEquals("Lisa", parentDoc.getString("name"));
 
     r.close();
     dir.close();
@@ -129,20 +130,21 @@
 
     final List<Document> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeResume(w.w, "Frank", "United States"));
     w.addDocuments(docs);
     
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -150,7 +152,7 @@
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Define parent document criteria (find a resident in the UK)
     Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@@ -179,11 +181,11 @@
     assertEquals(1, group.totalHits);
     assertFalse(Float.isNaN(group.score));
 
-    StoredDocument childDoc = s.doc(group.scoreDocs[0].doc);
+    Document childDoc = s.doc(group.scoreDocs[0].doc);
     //System.out.println("  doc=" + group.scoreDocs[0].doc);
-    assertEquals("java", childDoc.get("skill"));
+    assertEquals("java", childDoc.getString("skill"));
     assertNotNull(group.groupValue);
-    StoredDocument parentDoc = s.doc(group.groupValue);
+    Document parentDoc = s.doc(group.groupValue);
     assertEquals("Lisa", parentDoc.get("name"));
 
 
@@ -221,17 +223,18 @@
 
     for (int i=0;i<10;i++) {
       docs.clear();
-      docs.add(makeJob("ruby", i));
-      docs.add(makeJob("java", 2007));
-      docs.add(makeResume("Frank", "United States"));
+      docs.add(makeJob(w.w, "ruby", i));
+      docs.add(makeJob(w.w, "java", 2007));
+      docs.add(makeResume(w.w, "Frank", "United States"));
       w.addDocuments(docs);
     }
 
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
-    MultiTermQuery qc = NumericRangeQuery.newIntRange("year", 2007, 2007, true, true);
+    MultiTermQuery qc = new TermRangeQuery("year", NumericUtils.intToBytes(2007), NumericUtils.intToBytes(2007), true, true);
     // Hacky: this causes the query to need 2 rewrite
     // iterations: 
     qc.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
@@ -271,16 +274,16 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
     final List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
     Collections.shuffle(docs, random());
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
 
     final List<Document> docs2 = new ArrayList<>();
-    docs2.add(makeJob("ruby", 2005));
-    docs2.add(makeJob("java", 2006));
+    docs2.add(makeJob(w.w, "ruby", 2005));
+    docs2.add(makeJob(w.w, "java", 2006));
     Collections.shuffle(docs2, random());
-    docs2.add(makeResume("Frank", "United States"));
+    docs2.add(makeResume(w.w, "Frank", "United States"));
     
     addSkillless(w);
     boolean turn = random().nextBoolean();
@@ -295,6 +298,7 @@
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -302,7 +306,7 @@
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Define parent document criteria (find a resident in the UK)
     Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@@ -351,11 +355,11 @@
 
   private void addSkillless(final RandomIndexWriter w) throws IOException {
     if (random().nextBoolean()) {
-      w.addDocument(makeResume("Skillless", random().nextBoolean() ? "United Kingdom":"United States"));
+      w.addDocument(makeResume(w.w, "Skillless", random().nextBoolean() ? "United Kingdom":"United States"));
     }
   }
   
-  private StoredDocument getParentDoc(IndexReader reader, BitDocIdSetFilter parents, int childDocID) throws IOException {
+  private Document getParentDoc(IndexReader reader, BitDocIdSetFilter parents, int childDocID) throws IOException {
     final List<LeafReaderContext> leaves = reader.leaves();
     final int subIndex = ReaderUtil.subIndex(childDocID, leaves);
     final LeafReaderContext leaf = leaves.get(subIndex);
@@ -392,23 +396,24 @@
     w.setDoRandomForceMergeAssert(false);
 
     List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("c", 1999));
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w.w, "c", 1999));
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeResume(w.w, "Frank", "United States"));
     w.addDocuments(docs);
 
     w.commit();
     IndexSearcher s = newSearcher(DirectoryReader.open(dir));
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(
-        NumericRangeQuery.newIntRange("year", 1990, 2010, true, true),
+        new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 1990, true, 2010, true)),
         new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume")))),
         ScoreMode.Total
     );
@@ -504,32 +509,34 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     final RandomIndexWriter joinW = new RandomIndexWriter(random(), joinDir);
     for(int parentDocID=0;parentDocID<numParentDocs;parentDocID++) {
-      Document parentDoc = new Document();
-      Document parentJoinDoc = new Document();
-      Field id = new IntField("parentID", parentDocID, Field.Store.YES);
-      parentDoc.add(id);
-      parentJoinDoc.add(id);
-      parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
-      id = new NumericDocValuesField("parentID", parentDocID);
-      parentDoc.add(id);
-      parentJoinDoc.add(id);
-      parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
-      for(int field=0;field<parentFields.length;field++) {
-        if (random().nextDouble() < 0.9) {
-          String s = parentFields[field][random().nextInt(parentFields[field].length)];
-          Field f = newStringField("parent" + field, s, Field.Store.NO);
-          parentDoc.add(f);
-          parentJoinDoc.add(f);
+      Document parentDoc = w.newDocument();
+      Document parentJoinDoc = joinW.newDocument();
+      parentDoc.addInt("parentID", parentDocID);
+      parentJoinDoc.addInt("parentID", parentDocID);
+      parentJoinDoc.addAtom("isParent", "x");
 
-          f = new SortedDocValuesField("parent" + field, new BytesRef(s));
-          parentDoc.add(f);
-          parentJoinDoc.add(f);
+      String[] randomFields = new String[parentFields.length];
+      for(int field=0;field<parentFields.length;field++) {
+        String s;
+        if (random().nextDouble() < 0.9) {
+          s = parentFields[field][random().nextInt(parentFields[field].length)];
+        } else {
+          s = null;
+        }
+        randomFields[field] = s;
+      }
+
+      for(int i=0;i<randomFields.length;i++) {
+        String s = randomFields[i];
+        if (s != null) {
+          parentDoc.addAtom("parent" + i, s);
+          parentJoinDoc.addAtom("parent" + i, s);
         }
       }
 
       if (doDeletes) {
-        parentDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
-        parentJoinDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
+        parentDoc.addInt("blockID", parentDocID);
+        parentJoinDoc.addInt("blockID", parentDocID);
       }
 
       final List<Document> joinDocs = new ArrayList<>();
@@ -538,7 +545,7 @@
         StringBuilder sb = new StringBuilder();
         sb.append("parentID=").append(parentDoc.get("parentID"));
         for(int fieldID=0;fieldID<parentFields.length;fieldID++) {
-          String s = parentDoc.get("parent" + fieldID);
+          String s = parentDoc.getString("parent" + fieldID);
           if (s != null) {
             sb.append(" parent" + fieldID + "=" + s);
           }
@@ -549,27 +556,30 @@
       final int numChildDocs = TestUtil.nextInt(random(), 1, 20);
       for(int childDocID=0;childDocID<numChildDocs;childDocID++) {
         // Denormalize: copy all parent fields into child doc:
-        Document childDoc = TestUtil.cloneDocument(parentDoc);
-        Document joinChildDoc = new Document();
+        Document childDoc = w.newDocument();
+        childDoc.addInt("parentID", parentDocID);
+        for(int i=0;i<randomFields.length;i++) {
+          String s = randomFields[i];
+          if (s != null) {
+            childDoc.addAtom("parent" + i, s);
+          }
+        }
+        if (doDeletes) {
+          childDoc.addInt("blockID", parentDocID);
+        }
+
+        Document joinChildDoc = joinW.newDocument();
         joinDocs.add(joinChildDoc);
 
-        Field childID = new IntField("childID", childDocID, Field.Store.YES);
-        childDoc.add(childID);
-        joinChildDoc.add(childID);
-        childID = new NumericDocValuesField("childID", childDocID);
-        childDoc.add(childID);
-        joinChildDoc.add(childID);
+        childDoc.addInt("childID", childDocID);
+        joinChildDoc.addInt("childID", childDocID);
 
         for(int childFieldID=0;childFieldID<childFields.length;childFieldID++) {
           if (random().nextDouble() < 0.9) {
             String s = childFields[childFieldID][random().nextInt(childFields[childFieldID].length)];
-            Field f = newStringField("child" + childFieldID, s, Field.Store.NO);
-            childDoc.add(f);
-            joinChildDoc.add(f);
 
-            f = new SortedDocValuesField("child" + childFieldID, new BytesRef(s));
-            childDoc.add(f);
-            joinChildDoc.add(f);
+            childDoc.addAtom("child" + childFieldID, s);
+            joinChildDoc.addAtom("child" + childFieldID, s);
           }
         }
 
@@ -577,7 +587,7 @@
           StringBuilder sb = new StringBuilder();
           sb.append("childID=").append(joinChildDoc.get("childID"));
           for(int fieldID=0;fieldID<childFields.length;fieldID++) {
-            String s = joinChildDoc.get("child" + fieldID);
+            String s = joinChildDoc.getString("child" + fieldID);
             if (s != null) {
               sb.append(" child" + fieldID + "=" + s);
             }
@@ -586,7 +596,7 @@
         }
 
         if (doDeletes) {
-          joinChildDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
+          joinChildDoc.addInt("blockID", parentDocID);
         }
 
         w.addDocument(childDoc);
@@ -606,9 +616,8 @@
       if (VERBOSE) {
         System.out.println("DELETE parentID=" + deleteID);
       }
-      NumericUtils.intToPrefixCodedBytes(deleteID, 0, term);
-      w.deleteDocuments(new Term("blockID", term.toBytesRef()));
-      joinW.deleteDocuments(new Term("blockID", term.toBytesRef()));
+      w.deleteDocuments(w.getFieldTypes().newIntTerm("blockID", deleteID));
+      joinW.deleteDocuments(joinW.getFieldTypes().newIntTerm("blockID", deleteID));
     }
 
     final IndexReader r = w.getReader();
@@ -752,7 +761,7 @@
         System.out.println("\nTEST: normal index gets " + results.totalHits + " hits; sort=" + parentAndChildSort);
         final ScoreDoc[] hits = results.scoreDocs;
         for(int hitIDX=0;hitIDX<hits.length;hitIDX++) {
-          final StoredDocument doc = s.doc(hits[hitIDX].doc);
+          final Document doc = s.doc(hits[hitIDX].doc);
           //System.out.println("  score=" + hits[hitIDX].score + " parentID=" + doc.get("parentID") + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")");
           System.out.println("  parentID=" + doc.get("parentID") + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")");
           FieldDoc fd = (FieldDoc) hits[hitIDX];
@@ -806,12 +815,12 @@
             }
 
             assertNotNull(group.groupValue);
-            final StoredDocument parentDoc = joinS.doc(group.groupValue);
+            final Document parentDoc = joinS.doc(group.groupValue);
             System.out.println("  group parentID=" + parentDoc.get("parentID") + " (docID=" + group.groupValue + ")");
             for(int hitIDX=0;hitIDX<group.scoreDocs.length;hitIDX++) {
-              final StoredDocument doc = joinS.doc(group.scoreDocs[hitIDX].doc);
+              final Document doc = joinS.doc(group.scoreDocs[hitIDX].doc);
               //System.out.println("    score=" + group.scoreDocs[hitIDX].score + " childID=" + doc.get("childID") + " (docID=" + group.scoreDocs[hitIDX].doc + ")");
-              System.out.println("    childID=" + doc.get("childID") + " child0=" + doc.get("child0") + " (docID=" + group.scoreDocs[hitIDX].doc + ")");
+              System.out.println("    childID=" + doc.getString("childID") + " child0=" + doc.getString("child0") + " (docID=" + group.scoreDocs[hitIDX].doc + ")");
             }
           }
         }
@@ -824,8 +833,8 @@
         TopDocs b = joinS.search(childJoinQuery, 10);
         for (ScoreDoc hit : b.scoreDocs) {
           Explanation explanation = joinS.explain(childJoinQuery, hit.doc);
-          StoredDocument document = joinS.doc(hit.doc - 1);
-          int childId = Integer.parseInt(document.get("childID"));
+          Document document = joinS.doc(hit.doc - 1);
+          int childId = document.getInt("childID");
           //System.out.println("  hit docID=" + hit.doc + " childId=" + childId + " parentId=" + document.get("parentID"));
           assertTrue(explanation.isMatch());
           assertEquals(hit.score, explanation.getValue(), 0.0f);
@@ -953,8 +962,8 @@
       if (VERBOSE) {
         System.out.println("  " + results2.totalHits + " totalHits:");
         for(ScoreDoc sd : results2.scoreDocs) {
-          final StoredDocument doc = s.doc(sd.doc);
-          System.out.println("  childID=" + doc.get("childID") + " parentID=" + doc.get("parentID") + " docID=" + sd.doc);
+          final Document doc = s.doc(sd.doc);
+          System.out.println("  childID=" + doc.getString("childID") + " parentID=" + doc.get("parentID") + " docID=" + sd.doc);
         }
       }
 
@@ -967,9 +976,9 @@
       if (VERBOSE) {
         System.out.println("  " + joinResults2.totalHits + " totalHits:");
         for(ScoreDoc sd : joinResults2.scoreDocs) {
-          final StoredDocument doc = joinS.doc(sd.doc);
-          final StoredDocument parentDoc = getParentDoc(joinR, parentsFilter, sd.doc);
-          System.out.println("  childID=" + doc.get("childID") + " parentID=" + parentDoc.get("parentID") + " docID=" + sd.doc);
+          final Document doc = joinS.doc(sd.doc);
+          final Document parentDoc = getParentDoc(joinR, parentsFilter, sd.doc);
+          System.out.println("  childID=" + doc.getString("childID") + " parentID=" + parentDoc.getString("parentID") + " docID=" + sd.doc);
         }
       }
 
@@ -988,10 +997,10 @@
     for(int hitCount=0;hitCount<results.scoreDocs.length;hitCount++) {
       ScoreDoc hit = results.scoreDocs[hitCount];
       ScoreDoc joinHit = joinResults.scoreDocs[hitCount];
-      StoredDocument doc1 = r.document(hit.doc);
-      StoredDocument doc2 = joinR.document(joinHit.doc);
+      Document doc1 = r.document(hit.doc);
+      Document doc2 = joinR.document(joinHit.doc);
       assertEquals("hit " + hitCount + " differs",
-                   doc1.get("childID"), doc2.get("childID"));
+                   doc1.getInt("childID"), doc2.getInt("childID"));
       // don't compare scores -- they are expected to differ
 
 
@@ -1016,18 +1025,18 @@
       final GroupDocs<Integer> group = groupDocs[joinGroupUpto++];
       final ScoreDoc[] groupHits = group.scoreDocs;
       assertNotNull(group.groupValue);
-      final StoredDocument parentDoc = joinR.document(group.groupValue);
-      final String parentID = parentDoc.get("parentID");
+      final Document parentDoc = joinR.document(group.groupValue);
+      final String parentID = Integer.toString(parentDoc.getInt("parentID"));
       //System.out.println("GROUP groupDoc=" + group.groupDoc + " parent=" + parentDoc);
       assertNotNull(parentID);
       assertTrue(groupHits.length > 0);
       for(int hitIDX=0;hitIDX<groupHits.length;hitIDX++) {
-        final StoredDocument nonJoinHit = r.document(hits[resultUpto++].doc);
-        final StoredDocument joinHit = joinR.document(groupHits[hitIDX].doc);
+        final Document nonJoinHit = r.document(hits[resultUpto++].doc);
+        final Document joinHit = joinR.document(groupHits[hitIDX].doc);
         assertEquals(parentID,
-                     nonJoinHit.get("parentID"));
-        assertEquals(joinHit.get("childID"),
-                     nonJoinHit.get("childID"));
+                     Integer.toString(nonJoinHit.getInt("parentID")));
+        assertEquals(joinHit.getInt("childID"),
+                     nonJoinHit.getInt("childID"));
       }
 
       if (joinGroupUpto < groupDocs.length) {
@@ -1035,7 +1044,7 @@
         //System.out.println("  next joingroupUpto=" + joinGroupUpto + " gd.length=" + groupDocs.length + " parentID=" + parentID);
         while(true) {
           assertTrue(resultUpto < hits.length);
-          if (!parentID.equals(r.document(hits[resultUpto].doc).get("parentID"))) {
+          if (!parentID.equals(Integer.toString(r.document(hits[resultUpto].doc).getInt("parentID")))) {
             break;
           }
           resultUpto++;
@@ -1051,15 +1060,16 @@
 
     final List<Document> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeQualification("maths", 1999));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeQualification(w.w, "maths", 1999));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -1067,11 +1077,11 @@
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childJobQuery = new BooleanQuery();
     childJobQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childJobQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childJobQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     BooleanQuery childQualificationQuery = new BooleanQuery();
     childQualificationQuery.add(new BooleanClause(new TermQuery(new Term("qualification", "maths")), Occur.MUST));
-    childQualificationQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 1980, 2000, true, true), Occur.MUST));
+    childQualificationQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 1980, true, 2000, true)), Occur.MUST));
 
 
     // Define parent document criteria (find a resident in the UK)
@@ -1104,12 +1114,12 @@
     final GroupDocs<Integer> group = jobResults.groups[0];
     assertEquals(1, group.totalHits);
 
-    StoredDocument childJobDoc = s.doc(group.scoreDocs[0].doc);
+    Document childJobDoc = s.doc(group.scoreDocs[0].doc);
     //System.out.println("  doc=" + group.scoreDocs[0].doc);
-    assertEquals("java", childJobDoc.get("skill"));
+    assertEquals("java", childJobDoc.getString("skill"));
     assertNotNull(group.groupValue);
-    StoredDocument parentDoc = s.doc(group.groupValue);
-    assertEquals("Lisa", parentDoc.get("name"));
+    Document parentDoc = s.doc(group.groupValue);
+    assertEquals("Lisa", parentDoc.getString("name"));
 
     // Now Examine qualification children
     TopGroups<Integer> qualificationResults = c.getTopGroups(childQualificationJoinQuery, null, 0, 10, 0, true);
@@ -1120,11 +1130,11 @@
     final GroupDocs<Integer> qGroup = qualificationResults.groups[0];
     assertEquals(1, qGroup.totalHits);
 
-    StoredDocument childQualificationDoc = s.doc(qGroup.scoreDocs[0].doc);
-    assertEquals("maths", childQualificationDoc.get("qualification"));
+    Document childQualificationDoc = s.doc(qGroup.scoreDocs[0].doc);
+    assertEquals("maths", childQualificationDoc.getString("qualification"));
     assertNotNull(qGroup.groupValue);
     parentDoc = s.doc(qGroup.groupValue);
-    assertEquals("Lisa", parentDoc.get("name"));
+    assertEquals("Lisa", parentDoc.getString("name"));
 
     r.close();
     dir.close();
@@ -1133,10 +1143,10 @@
   public void testAdvanceSingleParentSingleChild() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document childDoc = new Document();
-    childDoc.add(newStringField("child", "1", Field.Store.NO));
-    Document parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "1", Field.Store.NO));
+    Document childDoc = w.newDocument();
+    childDoc.addAtom("child", "1");
+    Document parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "1");
     w.addDocuments(Arrays.asList(childDoc, parentDoc));
     IndexReader r = w.getReader();
     w.close();
@@ -1157,17 +1167,17 @@
   public void testAdvanceSingleParentNoChild() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy()));
-    Document parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "1", Field.Store.NO));
-    parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
+    Document parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "1");
+    parentDoc.addAtom("isparent", "yes");
     w.addDocuments(Arrays.asList(parentDoc));
 
     // Add another doc so scorer is not null
-    parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "2", Field.Store.NO));
-    parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
-    Document childDoc = new Document();
-    childDoc.add(newStringField("child", "2", Field.Store.NO));
+    parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "2");
+    parentDoc.addAtom("isparent", "yes");
+    Document childDoc = w.newDocument();
+    childDoc.addAtom("child", "2");
     w.addDocuments(Arrays.asList(childDoc, parentDoc));
 
     // Need single seg:
@@ -1194,12 +1204,12 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
     final List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeJob("java", 2010));
-    docs.add(makeJob("java", 2012));
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeJob(w.w, "java", 2010));
+    docs.add(makeJob(w.w, "java", 2012));
     Collections.shuffle(docs, random());
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeResume(w.w, "Frank", "United States"));
 
     addSkillless(w);
     w.addDocuments(docs);
@@ -1208,6 +1218,7 @@
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = new IndexSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -1215,7 +1226,7 @@
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Wrap the child document query to 'join' any matches
     // up to corresponding parent:
@@ -1239,15 +1250,15 @@
       assertEquals(2, group.totalHits);
       assertFalse(Float.isNaN(group.score));
       assertNotNull(group.groupValue);
-      StoredDocument parentDoc = s.doc(group.groupValue);
-      assertEquals("Frank", parentDoc.get("name"));
+      Document parentDoc = s.doc(group.groupValue);
+      assertEquals("Frank", parentDoc.getString("name"));
 
       assertEquals(2, group.scoreDocs.length); //all matched child documents collected
 
       for (ScoreDoc scoreDoc : group.scoreDocs) {
-        StoredDocument childDoc = s.doc(scoreDoc.doc);
-        assertEquals("java", childDoc.get("skill"));
-        int year = Integer.parseInt(childDoc.get("year"));
+        Document childDoc = s.doc(scoreDoc.doc);
+        assertEquals("java", childDoc.getString("skill"));
+        int year = childDoc.getInt("year");
         assertTrue(year >= 2006 && year <= 2011);
       }
     }
@@ -1262,15 +1273,15 @@
     assertEquals(2, group.totalHits);
     assertFalse(Float.isNaN(group.score));
     assertNotNull(group.groupValue);
-    StoredDocument parentDoc = s.doc(group.groupValue);
-    assertEquals("Frank", parentDoc.get("name"));
+    Document parentDoc = s.doc(group.groupValue);
+    assertEquals("Frank", parentDoc.getString("name"));
 
     assertEquals(1, group.scoreDocs.length); //not all matched child documents collected
 
     for (ScoreDoc scoreDoc : group.scoreDocs) {
-      StoredDocument childDoc = s.doc(scoreDoc.doc);
-      assertEquals("java", childDoc.get("skill"));
-      int year = Integer.parseInt(childDoc.get("year"));
+      Document childDoc = s.doc(scoreDoc.doc);
+      assertEquals("java", childDoc.getString("skill"));
+      int year = childDoc.getInt("year");
       assertTrue(year >= 2006 && year <= 2011);
     }
 
@@ -1282,18 +1293,17 @@
   public void testSometimesParentOnlyMatches() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document parent = new Document();
-    parent.add(new StoredField("parentID", "0"));
-    parent.add(new SortedDocValuesField("parentID", new BytesRef("0")));
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
+    Document parent = w.newDocument();
+    parent.addAtom("parentID", "0");
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
 
     List<Document> docs = new ArrayList<>();
 
-    Document child = new Document();
+    Document child = w.newDocument();
     docs.add(child);
-    child.add(new StoredField("childID", "0"));
-    child.add(newTextField("childText", "text", Field.Store.NO));
+    child.addStoredString("childID", "0");
+    child.addLargeText("childText", "text");
 
     // parent last:
     docs.add(parent);
@@ -1301,11 +1311,10 @@
 
     docs.clear();
 
-    parent = new Document();
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
-    parent.add(new StoredField("parentID", "1"));
-    parent.add(new SortedDocValuesField("parentID", new BytesRef("1")));
+    parent = w.newDocument();
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
+    parent.addAtom("parentID", "1");
 
     // parent last:
     docs.add(parent);
@@ -1334,12 +1343,12 @@
     assertEquals(1, groups.totalGroupedHitCount);
 
     GroupDocs<Integer> group = groups.groups[0];
-    StoredDocument doc = r.document(group.groupValue.intValue());
-    assertEquals("0", doc.get("parentID"));
+    Document doc = r.document(group.groupValue.intValue());
+    assertEquals("0", doc.getString("parentID"));
 
     group = groups.groups[1];
     doc = r.document(group.groupValue.intValue());
-    assertEquals("1", doc.get("parentID"));
+    assertEquals("1", doc.getString("parentID"));
 
     r.close();
     d.close();
@@ -1349,18 +1358,17 @@
   public void testChildQueryNeverMatches() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document parent = new Document();
-    parent.add(new StoredField("parentID", "0"));
-    parent.add(new SortedDocValuesField("parentID", new BytesRef("0")));
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
+    Document parent = w.newDocument();
+    parent.addAtom("parentID", "0");
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
 
     List<Document> docs = new ArrayList<>();
 
-    Document child = new Document();
+    Document child = w.newDocument();
     docs.add(child);
-    child.add(new StoredField("childID", "0"));
-    child.add(newTextField("childText", "text", Field.Store.NO));
+    child.addStoredString("childID", "0");
+    child.addLargeText("childText", "text");
 
     // parent last:
     docs.add(parent);
@@ -1368,11 +1376,10 @@
 
     docs.clear();
 
-    parent = new Document();
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
-    parent.add(new StoredField("parentID", "1"));
-    parent.add(new SortedDocValuesField("parentID", new BytesRef("1")));
+    parent = w.newDocument();
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
+    parent.addAtom("parentID", "1");
     
 
     // parent last:
@@ -1404,12 +1411,12 @@
     assertEquals(0, groups.totalGroupedHitCount);
 
     GroupDocs<Integer> group = groups.groups[0];
-    StoredDocument doc = r.document(group.groupValue.intValue());
-    assertEquals("0", doc.get("parentID"));
+    Document doc = r.document(group.groupValue.intValue());
+    assertEquals("0", doc.getString("parentID"));
 
     group = groups.groups[1];
     doc = r.document(group.groupValue.intValue());
-    assertEquals("1", doc.get("parentID"));
+    assertEquals("1", doc.getString("parentID"));
 
     r.close();
     d.close();
@@ -1419,17 +1426,17 @@
   public void testChildQueryMatchesParent() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document parent = new Document();
-    parent.add(new StoredField("parentID", "0"));
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
+    Document parent = w.newDocument();
+    parent.addStoredString("parentID", "0");
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
 
     List<Document> docs = new ArrayList<>();
 
-    Document child = new Document();
+    Document child = w.newDocument();
     docs.add(child);
-    child.add(new StoredField("childID", "0"));
-    child.add(newTextField("childText", "text", Field.Store.NO));
+    child.addStoredString("childID", "0");
+    child.addLargeText("childText", "text");
 
     // parent last:
     docs.add(parent);
@@ -1437,10 +1444,10 @@
 
     docs.clear();
 
-    parent = new Document();
-    parent.add(newTextField("parentText", "text", Field.Store.NO));
-    parent.add(newStringField("isParent", "yes", Field.Store.NO));
-    parent.add(new StoredField("parentID", "1"));
+    parent = w.newDocument();
+    parent.addLargeText("parentText", "text");
+    parent.addAtom("isParent", "yes");
+    parent.addStoredString("parentID", "1");
 
     // parent last:
     docs.add(parent);
@@ -1477,25 +1484,25 @@
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
     // First doc with 1 children
-    Document parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "1", Field.Store.NO));
-    parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
-    Document childDoc = new Document();
-    childDoc.add(newStringField("child", "1", Field.Store.NO));
+    Document parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "1");
+    parentDoc.addAtom("isparent", "yes");
+    Document childDoc = w.newDocument();
+    childDoc.addAtom("child", "1");
     w.addDocuments(Arrays.asList(childDoc, parentDoc));
 
-    parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "2", Field.Store.NO));
-    parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
+    parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "2");
+    parentDoc.addAtom("isparent", "yes");
     w.addDocuments(Arrays.asList(parentDoc));
 
     w.deleteDocuments(new Term("parent", "2"));
 
-    parentDoc = new Document();
-    parentDoc.add(newStringField("parent", "2", Field.Store.NO));
-    parentDoc.add(newStringField("isparent", "yes", Field.Store.NO));
-    childDoc = new Document();
-    childDoc.add(newStringField("child", "2", Field.Store.NO));
+    parentDoc = w.newDocument();
+    parentDoc.addAtom("parent", "2");
+    parentDoc.addAtom("isparent", "yes");
+    childDoc = w.newDocument();
+    childDoc.addAtom("child", "2");
     w.addDocuments(Arrays.asList(childDoc, parentDoc));
 
     IndexReader r = w.getReader();
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
index 5620766..7be1081 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinSorting.java
@@ -17,17 +17,16 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FilteredQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
@@ -42,9 +41,6 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-import java.util.ArrayList;
-import java.util.List;
-
 /**
  */
 public class TestBlockJoinSorting extends LuceneTestCase {
@@ -56,177 +52,156 @@
         .setMergePolicy(NoMergePolicy.INSTANCE));
 
     List<Document> docs = new ArrayList<>();
-    Document document = new Document();
-    document.add(new StringField("field2", "a", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("a")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    Document document = w.newDocument();
+    document.addAtom("field2", "a");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "b", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("b")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "b");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "c", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("c")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "c");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "a", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "a");
     docs.add(document);
     w.addDocuments(docs);
     w.commit();
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "c", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("c")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "c");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "d", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("d")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "d");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "e", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("e")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "e");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "b", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "b");
     docs.add(document);
     w.addDocuments(docs);
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "e", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("e")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "e");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "f", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("f")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "f");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "g", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("g")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "g");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "c", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "c");
     docs.add(document);
     w.addDocuments(docs);
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "g", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("g")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "g");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "h", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("h")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "h");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "i", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("i")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "i");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "d", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "d");
     docs.add(document);
     w.addDocuments(docs);
     w.commit();
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "i", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("i")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "i");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "j", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("j")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "j");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "k", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("k")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "k");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "f", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "f");
     docs.add(document);
     w.addDocuments(docs);
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "k", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("k")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "k");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "l", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("l")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "l");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "m", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("m")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "m");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "g", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "g");
     docs.add(document);
     w.addDocuments(docs);
 
     // This doc will not be included, because it doesn't have nested docs
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "h", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "h");
     w.addDocument(document);
 
     docs.clear();
-    document = new Document();
-    document.add(new StringField("field2", "m", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("m")));
-    document.add(new StringField("filter_1", "T", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "m");
+    document.addAtom("filter_1", "T");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "n", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("n")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "n");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("field2", "o", Field.Store.NO));
-    document.add(new SortedDocValuesField("field2", new BytesRef("o")));
-    document.add(new StringField("filter_1", "F", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("field2", "o");
+    document.addAtom("filter_1", "F");
     docs.add(document);
-    document = new Document();
-    document.add(new StringField("__type", "parent", Field.Store.NO));
-    document.add(new StringField("field1", "i", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("__type", "parent");
+    document.addAtom("field1", "i");
     docs.add(document);
     w.addDocuments(docs);
     w.commit();
 
     // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
-    document = new Document();
-    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("fieldXXX", "x");
     w.addDocument(document);
-    document = new Document();
-    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("fieldXXX", "x");
     w.addDocument(document);
-    document = new Document();
-    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+    document = w.newDocument();
+    document.addAtom("fieldXXX", "x");
     w.addDocument(document);
 
     IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w.w, false));
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
index 596f3ab..b7a6837 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
@@ -22,7 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -65,7 +64,7 @@
     final IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter indexWriter = new IndexWriter(directory, config);
     for (int i = 0; i < AMOUNT_OF_SEGMENTS; i++) {
-      List<Document> segmentDocs = createDocsForSegment(i);
+      List<Document> segmentDocs = createDocsForSegment(indexWriter, i);
       indexWriter.addDocuments(segmentDocs);
       indexWriter.commit();
     }
@@ -150,10 +149,10 @@
     directory.close();
   }
 
-  private static List<Document> createDocsForSegment(int segmentNumber) {
+  private static List<Document> createDocsForSegment(IndexWriter w, int segmentNumber) {
     List<List<Document>> blocks = new ArrayList<>(AMOUNT_OF_PARENT_DOCS);
     for (int i = 0; i < AMOUNT_OF_PARENT_DOCS; i++) {
-      blocks.add(createParentDocWithChildren(segmentNumber, i));
+      blocks.add(createParentDocWithChildren(w, segmentNumber, i));
     }
     List<Document> result = new ArrayList<>(AMOUNT_OF_DOCS_IN_SEGMENT);
     for (List<Document> block : blocks) {
@@ -162,28 +161,28 @@
     return result;
   }
 
-  private static List<Document> createParentDocWithChildren(int segmentNumber, int parentNumber) {
+  private static List<Document> createParentDocWithChildren(IndexWriter w, int segmentNumber, int parentNumber) {
     List<Document> result = new ArrayList<>(AMOUNT_OF_CHILD_DOCS + 1);
     for (int i = 0; i < AMOUNT_OF_CHILD_DOCS; i++) {
-      result.add(createChildDoc(segmentNumber, parentNumber, i));
+      result.add(createChildDoc(w, segmentNumber, parentNumber, i));
     }
-    result.add(createParentDoc(segmentNumber, parentNumber));
+    result.add(createParentDoc(w, segmentNumber, parentNumber));
     return result;
   }
 
-  private static Document createParentDoc(int segmentNumber, int parentNumber) {
-    Document result = new Document();
-    result.add(newStringField("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber), Field.Store.YES));
-    result.add(newStringField("parent", createFieldValue(parentNumber), Field.Store.NO));
-    result.add(newStringField("common_field", "1", Field.Store.NO));
+  private static Document createParentDoc(IndexWriter w, int segmentNumber, int parentNumber) {
+    Document result = w.newDocument();
+    result.addAtom("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber));
+    result.addAtom("parent", createFieldValue(parentNumber));
+    result.addAtom("common_field", "1");
     return result;
   }
 
-  private static Document createChildDoc(int segmentNumber, int parentNumber, int childNumber) {
-    Document result = new Document();
-    result.add(newStringField("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber, childNumber), Field.Store.YES));
-    result.add(newStringField("child", createFieldValue(childNumber), Field.Store.NO));
-    result.add(newStringField("common_field", "1", Field.Store.NO));
+  private static Document createChildDoc(IndexWriter w, int segmentNumber, int parentNumber, int childNumber) {
+    Document result = w.newDocument();
+    result.addAtom("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber, childNumber));
+    result.addAtom("child", createFieldValue(childNumber));
+    result.addAtom("common_field", "1");
     return result;
   }
 
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index 2397fa9..605c14f 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -33,10 +33,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsEnum;
@@ -89,56 +86,46 @@
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
     // 0
-    Document doc = new Document();
-    doc.add(new TextField("description", "random text", Field.Store.NO));
-    doc.add(new TextField("name", "name1", Field.Store.NO));
-    doc.add(new TextField(idField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
+    Document doc = w.newDocument();
+    doc.addLargeText("description", "random text");
+    doc.addLargeText("name", "name1");
+    doc.addAtom(idField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    doc.add(new TextField("price", "10.0", Field.Store.NO));
-    doc.add(new TextField(idField, "2", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
-    doc.add(new TextField(toField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
+    doc = w.newDocument();
+    doc.addLargeText("price", "10.0");
+    doc.addAtom(idField, new BytesRef("2"));
+    doc.addAtom(toField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
-    doc.add(new TextField("price", "20.0", Field.Store.NO));
-    doc.add(new TextField(idField, "3", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
-    doc.add(new TextField(toField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
+    doc = w.newDocument();
+    doc.addLargeText("price", "20.0");
+    doc.addAtom(idField, new BytesRef("3"));
+    doc.addAtom(toField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 3
-    doc = new Document();
-    doc.add(new TextField("description", "more random text", Field.Store.NO));
-    doc.add(new TextField("name", "name2", Field.Store.NO));
-    doc.add(new TextField(idField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("description", "more random text");
+    doc.addLargeText("name", "name2");
+    doc.addAtom(idField, new BytesRef("4"));
     w.addDocument(doc);
     w.commit();
 
     // 4
-    doc = new Document();
-    doc.add(new TextField("price", "10.0", Field.Store.NO));
-    doc.add(new TextField(idField, "5", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
-    doc.add(new TextField(toField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("price", "10.0");
+    doc.addAtom(idField, new BytesRef("5"));
+    doc.addAtom(toField, new BytesRef("4"));
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
-    doc.add(new TextField("price", "20.0", Field.Store.NO));
-    doc.add(new TextField(idField, "6", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
-    doc.add(new TextField(toField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("price", "20.0");
+    doc.addAtom(idField, new BytesRef("6"));
+    doc.addAtom(toField, new BytesRef("4"));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -187,24 +174,27 @@
         random(),
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    if (multipleValues) {
+      FieldTypes fieldTypes = w.getFieldTypes();
+      fieldTypes.setMultiValued(toField);
+    }
 
     // 0
-    Document doc = new Document();
-    doc.add(new TextField("description", "random text", Field.Store.NO));
-    doc.add(new TextField("name", "name1", Field.Store.NO));
-    doc.add(new TextField(idField, "0", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("0")));
+    Document doc = w.newDocument();
+    doc.addLargeText("description", "random text");
+    doc.addLargeText("name", "name1");
+    doc.addAtom(idField, new BytesRef("0"));
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new TextField("price", "10.0", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("price", "10.0");
 
     if (multipleValues) {
       for(int i=0;i<300;i++) {
-        doc.add(new SortedSetDocValuesField(toField, new BytesRef(""+i)));
+        doc.addAtom(toField, new BytesRef(""+i));
       }
     } else {
-      doc.add(new SortedDocValuesField(toField, new BytesRef("0")));
+      doc.addAtom(toField, new BytesRef("0"));
     }
     w.addDocument(doc);
 
@@ -237,51 +227,45 @@
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
     // 0
-    Document doc = new Document();
-    doc.add(new TextField("description", "random text", Field.Store.NO));
-    doc.add(new TextField("name", "name1", Field.Store.NO));
-    doc.add(new TextField(idField, "7", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("7")));
+    Document doc = w.newDocument();
+    doc.addLargeText("description", "random text");
+    doc.addLargeText("name", "name1");
+    doc.addAtom(idField, new BytesRef("7"));
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    doc.add(new TextField("price", "10.0", Field.Store.NO));
-    doc.add(new TextField(idField, "2", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
-    doc.add(new TextField(toField, "7", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("price", "10.0");
+    doc.addAtom(idField, new BytesRef("2"));
+    doc.addLargeText(toField, "7");
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
-    doc.add(new TextField("price", "20.0", Field.Store.NO));
-    doc.add(new TextField(idField, "3", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
-    doc.add(new TextField(toField, "7", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("price", "20.0");
+    doc.addAtom(idField, new BytesRef("3"));
+    doc.addLargeText(toField, "7");
     w.addDocument(doc);
 
     // 3
-    doc = new Document();
-    doc.add(new TextField("description", "more random text", Field.Store.NO));
-    doc.add(new TextField("name", "name2", Field.Store.NO));
-    doc.add(new TextField(idField, "0", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("description", "more random text");
+    doc.addLargeText("name", "name2");
     w.addDocument(doc);
     w.commit();
 
     // 4
-    doc = new Document();
-    doc.add(new TextField("price", "10.0", Field.Store.NO));
-    doc.add(new TextField(idField, "5", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
-    doc.add(new TextField(toField, "0", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("price", "10.0");
+    doc.addAtom(idField, new BytesRef("5"));
+    doc.addLargeText(toField, "0");
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
-    doc.add(new TextField("price", "20.0", Field.Store.NO));
-    doc.add(new TextField(idField, "6", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
-    doc.add(new TextField(toField, "0", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("price", "20.0");
+    doc.addAtom(idField, new BytesRef("6"));
+    doc.addLargeText(toField, "0");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -327,56 +311,46 @@
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
     // 0
-    Document doc = new Document();
-    doc.add(new TextField("description", "A random movie", Field.Store.NO));
-    doc.add(new TextField("name", "Movie 1", Field.Store.NO));
-    doc.add(new TextField(idField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
+    Document doc = w.newDocument();
+    doc.addLargeText("description", "A random movie");
+    doc.addLargeText("name", "Movie 1");
+    doc.addAtom(idField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 1
-    doc = new Document();
-    doc.add(new TextField("subtitle", "The first subtitle of this movie", Field.Store.NO));
-    doc.add(new TextField(idField, "2", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
-    doc.add(new TextField(toField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
+    doc = w.newDocument();
+    doc.addLargeText("subtitle", "The first subtitle of this movie");
+    doc.addAtom(idField, new BytesRef("2"));
+    doc.addAtom(toField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 2
-    doc = new Document();
-    doc.add(new TextField("subtitle", "random subtitle; random event movie", Field.Store.NO));
-    doc.add(new TextField(idField, "3", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
-    doc.add(new TextField(toField, "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
+    doc = w.newDocument();
+    doc.addLargeText("subtitle", "random subtitle; random event movie");
+    doc.addAtom(idField, new BytesRef("3"));
+    doc.addAtom(toField, new BytesRef("1"));
     w.addDocument(doc);
 
     // 3
-    doc = new Document();
-    doc.add(new TextField("description", "A second random movie", Field.Store.NO));
-    doc.add(new TextField("name", "Movie 2", Field.Store.NO));
-    doc.add(new TextField(idField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("description", "A second random movie");
+    doc.addLargeText("name", "Movie 2");
+    doc.addAtom(idField, new BytesRef("4"));
     w.addDocument(doc);
     w.commit();
 
     // 4
-    doc = new Document();
-    doc.add(new TextField("subtitle", "a very random event happened during christmas night", Field.Store.NO));
-    doc.add(new TextField(idField, "5", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
-    doc.add(new TextField(toField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("subtitle", "a very random event happened during christmas night");
+    doc.addAtom(idField, new BytesRef("5"));
+    doc.addAtom(toField, new BytesRef("4"));
     w.addDocument(doc);
 
     // 5
-    doc = new Document();
-    doc.add(new TextField("subtitle", "movie end movie test 123 test 123 random", Field.Store.NO));
-    doc.add(new TextField(idField, "6", Field.Store.NO));
-    doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
-    doc.add(new TextField(toField, "4", Field.Store.NO));
-    doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
+    doc = w.newDocument();
+    doc.addLargeText("subtitle", "movie end movie test 123 test 123 random");
+    doc.addAtom(idField, new BytesRef("6"));
+    doc.addAtom(toField, new BytesRef("4"));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -564,11 +538,17 @@
       String id = Integer.toString(i);
       int randomI = random().nextInt(context.randomUniqueValues.length);
       String value = context.randomUniqueValues[randomI];
-      Document document = new Document();
-      document.add(newTextField(random(), "id", id, Field.Store.NO));
-      document.add(newTextField(random(), "value", value, Field.Store.NO));
-
       boolean from = context.randomFrom[randomI];
+      RandomIndexWriter writer = from ? fromWriter : toWriter;
+      if (multipleValuesPerDocument) {
+        FieldTypes fieldTypes = writer.getFieldTypes();
+        fieldTypes.setMultiValued("from");
+        fieldTypes.setMultiValued("to");
+      }
+      Document document = writer.newDocument();
+      document.addLargeText("id", id);
+      document.addLargeText("value", value);
+
       int numberOfLinkValues = multipleValuesPerDocument ? 2 + random().nextInt(10) : 1;
       docs[i] = new RandomDoc(id, numberOfLinkValues, value, from);
       for (int j = 0; j < numberOfLinkValues; j++) {
@@ -584,12 +564,7 @@
 
           context.fromDocuments.get(linkValue).add(docs[i]);
           context.randomValueFromDocs.get(value).add(docs[i]);
-          document.add(newTextField(random(), "from", linkValue, Field.Store.NO));
-          if (multipleValuesPerDocument) {
-            document.add(new SortedSetDocValuesField("from", new BytesRef(linkValue)));
-          } else {
-            document.add(new SortedDocValuesField("from", new BytesRef(linkValue)));
-          }
+          document.addAtom("from", new BytesRef(linkValue));
         } else {
           if (!context.toDocuments.containsKey(linkValue)) {
             context.toDocuments.put(linkValue, new ArrayList<RandomDoc>());
@@ -600,12 +575,7 @@
 
           context.toDocuments.get(linkValue).add(docs[i]);
           context.randomValueToDocs.get(value).add(docs[i]);
-          document.add(newTextField(random(), "to", linkValue, Field.Store.NO));
-          if (multipleValuesPerDocument) {
-            document.add(new SortedSetDocValuesField("to", new BytesRef(linkValue)));
-          } else {
-            document.add(new SortedDocValuesField("to", new BytesRef(linkValue)));
-          }
+          document.addAtom("to", new BytesRef(linkValue));
         }
       }
 
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index a578102..2695e38 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -32,6 +32,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.DocsAndPositionsEnum;
@@ -63,12 +64,12 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefArray;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
+import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.Counter;
-import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.IntBlockPool.SliceReader;
 import org.apache.lucene.util.IntBlockPool.SliceWriter;
+import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RecyclingByteBlockAllocator;
 import org.apache.lucene.util.RecyclingIntBlockAllocator;
@@ -767,6 +768,13 @@
       return fields.get(fieldName);
     }
 
+    final FieldTypes fieldTypes = new FieldTypes(null);
+
+    @Override
+    public FieldTypes getFieldTypes() {
+      return fieldTypes;
+    }
+
     @Override
     public Bits getLiveDocs() {
       return null;
@@ -945,6 +953,7 @@
       public void seekExact(long ord) {
         assert ord < info.terms.size();
         termUpto = (int) ord;
+        info.terms.get(info.sortedTerms[termUpto], br);
       }
       
       @Override
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index a507552..74dc4fc 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -37,11 +37,10 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.CompositeReader;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
@@ -150,11 +149,9 @@
     IndexWriter writer = new IndexWriter(ramdir,
                                          new IndexWriterConfig(analyzer).setCodec(
                                              TestUtil.alwaysPostingsFormat(TestUtil.getDefaultPostingsFormat())));
-    Document doc = new Document();
-    Field field1 = newTextField("foo", fooField.toString(), Field.Store.NO);
-    Field field2 = newTextField("term", termField.toString(), Field.Store.NO);
-    doc.add(field1);
-    doc.add(field2);
+    Document doc = writer.newDocument();
+    doc.addLargeText("foo", fooField.toString());
+    doc.addLargeText("term", termField.toString());
     writer.addDocument(doc);
     writer.close();
     
@@ -431,7 +428,6 @@
   }
 
   public void testDuellMemIndex() throws IOException {
-    LineFileDocs lineFileDocs = new LineFileDocs(random());
     int numDocs = atLeast(10);
     MemoryIndex memory = randomMemoryIndex();
     for (int i = 0; i < numDocs; i++) {
@@ -439,21 +435,23 @@
       MockAnalyzer mockAnalyzer = new MockAnalyzer(random());
       mockAnalyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer));
+      LineFileDocs lineFileDocs = new LineFileDocs(writer, random());
       Document nextDoc = lineFileDocs.nextDoc();
-      Document doc = new Document();
-      for (Field field : nextDoc.getFields()) {
-        if (field.fieldType().indexOptions() != IndexOptions.NONE) {
+      Document doc = writer.newDocument();
+      for (IndexableField field : nextDoc.getFields()) {
+        if (field.fieldType().indexOptions() != IndexOptions.NONE && field.stringValue() != null && field.fieldType().docValuesType() == DocValuesType.NONE) {
           doc.add(field);
           if (random().nextInt(3) == 0) {
             doc.add(field);  // randomly add the same field twice
           }
         }
       }
+      lineFileDocs.close();
       
       writer.addDocument(doc);
       writer.close();
-      for (IndexableField field : doc.indexableFields()) {
-        memory.addField(field.name(), ((Field)field).stringValue(), mockAnalyzer);  
+      for (IndexableField field : doc) {
+        memory.addField(field.name(), field.stringValue(), mockAnalyzer);  
       }
       DirectoryReader competitor = DirectoryReader.open(dir);
       LeafReader memIndexReader= (LeafReader) memory.createSearcher().getIndexReader();
@@ -462,7 +460,6 @@
       memory.reset();
       dir.close();
     }
-    lineFileDocs.close();
   }
   
   // LUCENE-4880
@@ -482,19 +479,19 @@
       mockAnalyzer.setOffsetGap(random().nextInt(100));
     }
     //index into a random directory
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPayloads(false);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-
-    Document doc = new Document();
-    doc.add(new Field(field_name, "la la", type));
-    doc.add(new Field(field_name, "foo bar foo bar foo", type));
-
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(field_name);
+    fieldTypes.enableTermVectorPositions(field_name);
+    fieldTypes.enableTermVectorOffsets(field_name);
+    fieldTypes.enableTermVectorPayloads(field_name);
+    fieldTypes.setMultiValued(field_name);
+
+    Document doc = writer.newDocument();
+    doc.addLargeText(field_name, "la la");
+    doc.addLargeText(field_name, "foo bar foo bar foo");
+
     writer.updateDocument(new Term("id", "1"), doc);
     writer.commit();
     writer.close();
diff --git a/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java b/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java
index 784db2c..54a30e4 100644
--- a/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java
+++ b/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java
@@ -17,20 +17,18 @@
  */
 import java.io.IOException;
 import java.io.Reader;
-import java.util.List;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Map;
 import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.IndexableFieldType;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.util.BytesRef;
 
 /** Defers actually loading a field's value until you ask
@@ -43,7 +41,7 @@
   private final int docID;
 
   // null until first field is loaded
-  private StoredDocument doc;
+  private Document doc;
 
   private Map<Integer,List<LazyField>> fields = new HashMap<>();
   private Set<String> fieldNames = new HashSet<>();
@@ -64,11 +62,11 @@
    * </p>
    * <p>
    * The lazy loading of field values from all instances of StorableField 
-   * objects returned by this method are all backed by a single StoredDocument 
+   * objects returned by this method are all backed by a single Document
    * per LazyDocument instance.
    * </p>
    */
-  public StorableField getField(FieldInfo fieldInfo) {  
+  public IndexableField getField(FieldInfo fieldInfo) {  
 
     fieldNames.add(fieldInfo.name);
     List<LazyField> values = fields.get(fieldInfo.number);
@@ -94,7 +92,7 @@
    * non-private for test only access
    * @lucene.internal 
    */
-  synchronized StoredDocument getDocument() {
+  synchronized Document getDocument() {
     if (doc == null) {
       try {
         doc = reader.document(docID, fieldNames);
@@ -107,18 +105,18 @@
 
   // :TODO: synchronize to prevent redundent copying? (sync per field name?)
   private void fetchRealValues(String name, int fieldNum) {
-    StoredDocument d = getDocument();
+    Document d = getDocument();
 
     List<LazyField> lazyValues = fields.get(fieldNum);
-    StorableField[] realValues = d.getFields(name);
+    List<IndexableField> realValues = d.getFields(name);
     
-    assert realValues.length <= lazyValues.size() 
+    assert realValues.size() <= lazyValues.size() 
       : "More lazy values then real values for field: " + name;
     
     for (int i = 0; i < lazyValues.size(); i++) {
       LazyField f = lazyValues.get(i);
       if (null != f) {
-        f.realValue = realValues[i];
+        f.realValue = realValues.get(i);
       }
     }
   }
@@ -127,10 +125,10 @@
   /** 
    * @lucene.internal 
    */
-  public class LazyField implements StorableField {
+  public class LazyField implements IndexableField {
     private String name;
     private int fieldNum;
-    volatile StorableField realValue = null;
+    volatile IndexableField realValue = null;
 
     private LazyField(String name, int fieldNum) {
       this.name = name;
@@ -145,7 +143,7 @@
       return null != realValue;
     }
 
-    private StorableField getRealValue() {
+    private IndexableField getRealValue() {
       if (null == realValue) {
         fetchRealValues(name, fieldNum);
       }
@@ -162,22 +160,37 @@
     }
 
     @Override
+    public float boost() {
+      return 1.0f;
+    }
+
+    @Override
+    public TokenStream tokenStream(TokenStream reuse) {
+      return null;
+    }
+
+    @Override
     public BytesRef binaryValue() {
       return getRealValue().binaryValue();
     }
 
     @Override
+    public BytesRef binaryDocValue() {
+      return getRealValue().binaryDocValue();
+    }
+
+    @Override
     public String stringValue() {
       return getRealValue().stringValue();
     }
 
     @Override
-    public Reader readerValue() {
-      return getRealValue().readerValue();
+    public Number numericValue() {
+      return getRealValue().numericValue();
     }
 
     @Override
-    public Number numericValue() {
+    public Number numericDocValue() {
       return getRealValue().numericValue();
     }
 
diff --git a/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java b/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
index c01c35b..a27a0c5 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
@@ -24,6 +24,7 @@
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 
 /** this is a hack to make SortingMP fast! */
@@ -85,6 +86,11 @@
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return in.getFieldTypes();
+  }
+
+  @Override
   public NumericDocValues getNumericDocValues(String field) throws IOException {
     ensureOpen();
     FieldInfo fi = getFieldInfos().fieldInfo(field);
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
index bf4fb65..dcaf598 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
@@ -20,15 +20,10 @@
 import java.io.IOException;
 import java.io.PrintStream;
 
-import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.IndexReader; // javadocs
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
@@ -87,92 +82,74 @@
   /** Expert: The cache used internally by sorting and range query classes. */
   public static FieldCache DEFAULT = new FieldCacheImpl();
 
-  /**
-   * A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
-   * via {@link IntField}/{@link NumericTokenStream}.
-   */
-  public static final Parser NUMERIC_UTILS_INT_PARSER = new Parser() {
+  public static final Parser DOCUMENT_INT_PARSER = new Parser() {
     @Override
     public long parseValue(BytesRef term) {
-      return NumericUtils.prefixCodedToInt(term);
+      return NumericUtils.bytesToInt(term);
     }
     
     @Override
     public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
+      return terms.iterator(null);
     }
     
     @Override
     public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER"; 
+      return FieldCache.class.getName()+".DOCUMENT_INT_PARSER"; 
     }
   };
 
-  /**
-   * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
-   * via {@link FloatField}/{@link NumericTokenStream}.
-   */
-  public static final Parser NUMERIC_UTILS_FLOAT_PARSER = new Parser() {
+  public static final Parser DOCUMENT_FLOAT_PARSER = new Parser() {
     @Override
     public long parseValue(BytesRef term) {
-      int val = NumericUtils.prefixCodedToInt(term);
-      if (val<0) val ^= 0x7fffffff;
-      return val;
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER"; 
+      return NumericUtils.floatToInt(NumericUtils.bytesToFloat(term));
     }
     
     @Override
     public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedInts(terms.iterator(null));
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT_FLOAT_PARSER"; 
     }
   };
 
-  /**
-   * A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
-   * via {@link LongField}/{@link NumericTokenStream}.
-   */
-  public static final Parser NUMERIC_UTILS_LONG_PARSER = new Parser() {
+  public static final Parser DOCUMENT_LONG_PARSER = new Parser() {
     @Override
     public long parseValue(BytesRef term) {
-      return NumericUtils.prefixCodedToLong(term);
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER"; 
+      return NumericUtils.bytesToLong(term);
     }
     
     @Override
     public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT_LONG_PARSER"; 
     }
   };
 
-  /**
-   * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
-   * via {@link DoubleField}/{@link NumericTokenStream}.
-   */
-  public static final Parser NUMERIC_UTILS_DOUBLE_PARSER = new Parser() {
+  public static final Parser DOCUMENT_DOUBLE_PARSER = new Parser() {
     @Override
     public long parseValue(BytesRef term) {
-      long val = NumericUtils.prefixCodedToLong(term);
-      if (val<0) val ^= 0x7fffffffffffffffL;
-      return val;
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER"; 
+      return NumericUtils.doubleToLong(NumericUtils.bytesToDouble(term));
     }
     
     @Override
     public TermsEnum termsEnum(Terms terms) throws IOException {
-      return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT_DOUBLE_PARSER"; 
     }
   };
-  
+
   /** Checks the internal cache for an appropriate entry, and if none is found,
    *  reads the terms in <code>field</code> and returns a bit set at the size of
    *  <code>reader.maxDoc()</code>, with turned on bits for each docid that 
@@ -246,11 +223,6 @@
    *  subsequent calls will share the same cache entry. */
   public SortedDocValues getTermsIndex(LeafReader reader, String field, float acceptableOverheadRatio) throws IOException;
 
-  /** Can be passed to {@link #getDocTermOrds} to filter for 32-bit numeric terms */
-  public static final BytesRef INT32_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_INT });
-  /** Can be passed to {@link #getDocTermOrds} to filter for 64-bit numeric terms */
-  public static final BytesRef INT64_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_LONG });
-  
   /**
    * Checks the internal cache for an appropriate entry, and if none is found, reads the term values
    * in <code>field</code> and returns a {@link DocTermOrds} instance, providing a method to retrieve
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
index 6b18fbf..b90f07d 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
@@ -884,7 +884,7 @@
   // should share it...
   public SortedSetDocValues getDocTermOrds(LeafReader reader, String field, BytesRef prefix) throws IOException {
     // not a general purpose filtering mechanism...
-    assert prefix == null || prefix == INT32_TERM_PREFIX || prefix == INT64_TERM_PREFIX;
+    assert prefix == null;
     
     SortedSetDocValues dv = reader.getSortedSetDocValues(field);
     if (dv != null) {
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
index 4d20ed4..6fa3ad2 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
@@ -21,15 +21,6 @@
 import java.util.ArrayList;
 import java.util.Map;
 
-import org.apache.lucene.document.BinaryDocValuesField; // javadocs
-import org.apache.lucene.document.DoubleField; // javadocs
-import org.apache.lucene.document.FloatField; // javadocs
-import org.apache.lucene.document.IntField; // javadocs
-import org.apache.lucene.document.LongField; // javadocs
-import org.apache.lucene.document.NumericDocValuesField; // javadocs
-import org.apache.lucene.document.SortedDocValuesField; // javadocs
-import org.apache.lucene.document.SortedSetDocValuesField; // javadocs
-import org.apache.lucene.document.StringField; // javadocs
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValuesType;
@@ -63,7 +54,7 @@
    */
   public static enum Type {
     /** 
-     * Single-valued Integer, (e.g. indexed with {@link IntField})
+     * Single-valued Integer, (e.g. indexed with {@link Document2#addInt})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -91,31 +82,31 @@
      */
     DOUBLE,
     /** 
-     * Single-valued Binary, (e.g. indexed with {@link StringField}) 
+     * Single-valued Binary, (e.g. indexed with {@link Document2#addAtom}) 
      * <p>
      * Fields with this type act as if they were indexed with
-     * {@link BinaryDocValuesField}.
+     * an unsorted {@link Document2#addBinary} field.
      */
     BINARY,
     /** 
-     * Single-valued Binary, (e.g. indexed with {@link StringField}) 
+     * Single-valued Binary, (e.g. indexed with {@link Document2#addAtom})
+     * without doc values.
      * <p>
-     * Fields with this type act as if they were indexed with
-     * {@link SortedDocValuesField}.
+     * Fields with this type act as if they were indexed with doc values.
      */
     SORTED,
     /** 
-     * Multi-valued Binary, (e.g. indexed with {@link StringField}) 
+     * Multi-valued Binary, (e.g. indexed with {@link Document2#addAtom}) 
      * <p>
      * Fields with this type act as if they were indexed with
-     * {@link SortedSetDocValuesField}.
+     * multi-valued {@link Document2#addAtom}.
      */
     SORTED_SET_BINARY,
     /** 
-     * Multi-valued Integer, (e.g. indexed with {@link IntField}) 
+     * Multi-valued Integer, (e.g. indexed with {@link Document2#addInt}) 
      * <p>
      * Fields with this type act as if they were indexed with
-     * {@link SortedSetDocValuesField}.
+     * multi-valued {@link Document2#addInt}.
      */
     SORTED_SET_INTEGER,
     /** 
@@ -230,10 +221,14 @@
     Type v = getType(field);
     if (v != null) {
       switch (v) {
-        case INTEGER: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_INT_PARSER, true);
-        case FLOAT: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
-        case LONG: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
-        case DOUBLE: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+        case INTEGER:
+          return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.DOCUMENT_INT_PARSER, true);
+        case FLOAT:
+          return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.DOCUMENT_FLOAT_PARSER, true);
+        case LONG:
+          return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.DOCUMENT_LONG_PARSER, true);
+        case DOUBLE:
+          return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.DOCUMENT_DOUBLE_PARSER, true);
       }
     }
     return super.getNumericDocValues(field);
@@ -264,14 +259,14 @@
     Type v = getType(field);
     if (v != null) {
       switch (v) {
-        case SORTED_SET_INTEGER:
-        case SORTED_SET_FLOAT: 
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT32_TERM_PREFIX);
-        case SORTED_SET_LONG:
-        case SORTED_SET_DOUBLE:
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT64_TERM_PREFIX);
-        case SORTED_SET_BINARY:
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
+      case SORTED_SET_INTEGER:
+      case SORTED_SET_FLOAT: 
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
+      case SORTED_SET_LONG:
+      case SORTED_SET_DOUBLE:
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
+      case SORTED_SET_BINARY:
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
       }
     }
     return in.getSortedSetDocValues(field);
diff --git a/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java b/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java
index 7008f3c..9f80591 100644
--- a/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java
+++ b/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java
@@ -16,19 +16,20 @@
  */
 package org.apache.lucene.document;
 
-import java.util.Arrays;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.HashMap;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.store.*;
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.*;
-
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.LuceneTestCase;
 import org.junit.After;
 import org.junit.Before;
 
@@ -57,17 +58,21 @@
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter writer = new IndexWriter
       (dir, newIndexWriterConfig(analyzer));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String f : FIELDS) {
+      fieldTypes.setMultiValued(f);
+    }
     try {
       for (int docid = 0; docid < NUM_DOCS; docid++) {
-        Document d = new Document();
-        d.add(newStringField("docid", ""+docid, Field.Store.YES));
-        d.add(newStringField("never_load", "fail", Field.Store.YES));
+        Document d = writer.newDocument();
+        d.addAtom("docid", ""+docid);
+        d.addAtom("never_load", "fail");
         for (String f : FIELDS) {
           for (int val = 0; val < NUM_VALUES; val++) {
-            d.add(newStringField(f, docid+"_"+f+"_"+val, Field.Store.YES));
+            d.addAtom(f, docid+"_"+f+"_"+val);
           }
         }
-        d.add(newStringField("load_later", "yes", Field.Store.YES));
+        d.addAtom("load_later", "yes");
         writer.addDocument(d);
       }
     } finally {
@@ -84,34 +89,34 @@
       ScoreDoc[] hits = searcher.search(q, 100).scoreDocs;
       assertEquals("Too many docs", 1, hits.length);
       LazyTestingStoredFieldVisitor visitor 
-        = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc),
+        = new LazyTestingStoredFieldVisitor(reader.getFieldTypes(), new LazyDocument(reader, hits[0].doc),
                                             FIELDS);
       reader.document(hits[0].doc, visitor);
-      StoredDocument d = visitor.doc;
+      List<FieldValue> d = visitor.doc;
 
       int numFieldValues = 0;
       Map<String,Integer> fieldValueCounts = new HashMap<>();
 
       // at this point, all FIELDS should be Lazy and unrealized
-      for (StorableField f : d) {
+      for (FieldValue f : d) {
         numFieldValues++;   
-        if (f.name().equals("never_load")) {
+        if (f.name.equals("never_load")) {
           fail("never_load was loaded");
         }
-        if (f.name().equals("load_later")) {
+        if (f.name.equals("load_later")) {
           fail("load_later was loaded on first pass");
         }
-        if (f.name().equals("docid")) {
-          assertFalse(f.name(), f instanceof LazyDocument.LazyField);
+        if (f.name.equals("docid")) {
+          assertFalse(f.name, f.value instanceof LazyDocument.LazyField);
         } else {
-          int count = fieldValueCounts.containsKey(f.name()) ?
-            fieldValueCounts.get(f.name()) : 0;
+          int count = fieldValueCounts.containsKey(f.name) ?
+            fieldValueCounts.get(f.name) : 0;
           count++;
-          fieldValueCounts.put(f.name(), count);
-          assertTrue(f.name() + " is " + f.getClass(),
-                     f instanceof LazyDocument.LazyField);
-          LazyDocument.LazyField lf = (LazyDocument.LazyField) f;
-          assertFalse(f.name() + " is loaded", lf.hasBeenLoaded());
+          fieldValueCounts.put(f.name, count);
+          assertTrue(f.name + " is " + f.value.getClass(),
+                     f.value instanceof LazyDocument.LazyField);
+          LazyDocument.LazyField lf = (LazyDocument.LazyField) f.value;
+          assertFalse(f.name + " is loaded", lf.hasBeenLoaded());
         }
       }
       if (VERBOSE) System.out.println("numFieldValues == " + numFieldValues);
@@ -125,34 +130,39 @@
 
       // pick a single field name to load a single value
       final String fieldName = FIELDS[random().nextInt(FIELDS.length)];
-      final StorableField[] fieldValues = d.getFields(fieldName);
+      List<FieldValue> fieldValues = new ArrayList<>();
+      for(FieldValue f : d) {
+        if (f.name.equals(fieldName)) {
+          fieldValues.add(f);
+        }
+      }
       assertEquals("#vals in field: " + fieldName, 
-                   NUM_VALUES, fieldValues.length);
-      final int valNum = random().nextInt(fieldValues.length);
+                   NUM_VALUES, fieldValues.size());
+      final int valNum = random().nextInt(fieldValues.size());
       assertEquals(id + "_" + fieldName + "_" + valNum,
-                   fieldValues[valNum].stringValue());
+                   ((LazyDocument.LazyField) fieldValues.get(valNum).value).stringValue());
       
       // now every value of fieldName should be loaded
-      for (StorableField f : d) {
-        if (f.name().equals("never_load")) {
+      for (FieldValue f : d) {
+        if (f.name.equals("never_load")) {
           fail("never_load was loaded");
         }
-        if (f.name().equals("load_later")) {
+        if (f.name.equals("load_later")) {
           fail("load_later was loaded too soon");
         }
-        if (f.name().equals("docid")) {
-          assertFalse(f.name(), f instanceof LazyDocument.LazyField);
+        if (f.name.equals("docid")) {
+          assertFalse(f.name, f.value instanceof LazyDocument.LazyField);
         } else {
-          assertTrue(f.name() + " is " + f.getClass(),
-                     f instanceof LazyDocument.LazyField);
-          LazyDocument.LazyField lf = (LazyDocument.LazyField) f;
-          assertEquals(f.name() + " is loaded?", 
+          assertTrue(f.name + " is " + f.value.getClass(),
+                     f.value instanceof LazyDocument.LazyField);
+          LazyDocument.LazyField lf = (LazyDocument.LazyField) f.value;
+          assertEquals(f.name + " is loaded?", 
                        lf.name().equals(fieldName), lf.hasBeenLoaded());
         }
       }
 
       // use the same LazyDoc to ask for one more lazy field
-      visitor = new LazyTestingStoredFieldVisitor(new LazyDocument(reader, hits[0].doc),
+      visitor = new LazyTestingStoredFieldVisitor(reader.getFieldTypes(), new LazyDocument(reader, hits[0].doc),
                                                   "load_later");
       reader.document(hits[0].doc, visitor);
       d = visitor.doc;
@@ -160,17 +170,17 @@
       // ensure we have all the values we expect now, and that
       // adding one more lazy field didn't "unload" the existing LazyField's
       // we already loaded.
-      for (StorableField f : d) {
-        if (f.name().equals("never_load")) {
+      for (FieldValue f : d) {
+        if (f.name.equals("never_load")) {
           fail("never_load was loaded");
         }
-        if (f.name().equals("docid")) {
-          assertFalse(f.name(), f instanceof LazyDocument.LazyField);
+        if (f.name.equals("docid")) {
+          assertFalse(f.name, f.value instanceof LazyDocument.LazyField);
         } else {
-          assertTrue(f.name() + " is " + f.getClass(),
-                     f instanceof LazyDocument.LazyField);
-          LazyDocument.LazyField lf = (LazyDocument.LazyField) f;
-          assertEquals(f.name() + " is loaded?", 
+          assertTrue(f.name + " is " + f.value.getClass(),
+                     f.value instanceof LazyDocument.LazyField);
+          LazyDocument.LazyField lf = (LazyDocument.LazyField) f.value;
+          assertEquals(f.name + " is loaded?", 
                        lf.name().equals(fieldName), lf.hasBeenLoaded());
         }
       }
@@ -184,12 +194,21 @@
     }
   }
 
+  private static class FieldValue {
+    String name;
+    Object value;
+    public FieldValue(String name, Object value) {
+      this.name = name;
+      this.value = value;
+    }
+  }
+
   private static class LazyTestingStoredFieldVisitor extends StoredFieldVisitor {
-    public final StoredDocument doc = new StoredDocument();
+    public final List<FieldValue> doc = new ArrayList<>();
     public final LazyDocument lazyDoc;
     public final Set<String> lazyFieldNames;
 
-    LazyTestingStoredFieldVisitor(LazyDocument l, String... fields) {
+    LazyTestingStoredFieldVisitor(FieldTypes fieldTypes, LazyDocument l, String... fields) {
       lazyDoc = l;
       lazyFieldNames = new HashSet<>(Arrays.asList(fields));
     }
@@ -202,7 +221,7 @@
         return Status.NO;
       } else {
         if (lazyFieldNames.contains(fieldInfo.name)) {
-          doc.add(lazyDoc.getField(fieldInfo));
+          doc.add(new FieldValue(fieldInfo.name, lazyDoc.getField(fieldInfo)));
         }
       }
       return Status.NO;
@@ -210,11 +229,7 @@
 
     @Override
     public void stringField(FieldInfo fieldInfo, String value) throws IOException {
-      final FieldType ft = new FieldType(TextField.TYPE_STORED);
-      ft.setStoreTermVectors(fieldInfo.hasVectors());
-      ft.setOmitNorms(fieldInfo.omitsNorms());
-      ft.setIndexOptions(fieldInfo.getIndexOptions());
-      doc.add(new Field(fieldInfo.name, value, ft));
+      doc.add(new FieldValue(fieldInfo.name, value));
     }
 
   }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java b/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
index ac95fc6..3041d58 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
@@ -49,7 +49,7 @@
     List<Integer> values = new ArrayList<>();
     for (int i = 0; i < unsortedReader.maxDoc(); i++) {
       if (liveDocs == null || liveDocs.get(i)) {
-        values.add(Integer.valueOf(unsortedReader.document(i).get(ID_FIELD)));
+        values.add(Integer.valueOf(unsortedReader.document(i).getString(ID_FIELD)));
       }
     }
     int idx = random().nextInt(SORT.length);
diff --git a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
index 30b0be7..b3cfdfa 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
@@ -29,23 +29,13 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -55,12 +45,12 @@
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
+import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.TermStatistics;
@@ -153,41 +143,33 @@
   protected static final String SORTED_DV_FIELD = "sorted";
   protected static final String SORTED_SET_DV_FIELD = "sorted_set";
   protected static final String TERM_VECTORS_FIELD = "term_vectors";
-
-  private static final FieldType TERM_VECTORS_TYPE = new FieldType(TextField.TYPE_NOT_STORED);
-  static {
-    TERM_VECTORS_TYPE.setStoreTermVectors(true);
-    TERM_VECTORS_TYPE.freeze();
-  }
-  
-  private static final FieldType POSITIONS_TYPE = new FieldType(TextField.TYPE_NOT_STORED);
-  static {
-    POSITIONS_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    POSITIONS_TYPE.freeze();
-  }
   
   protected static Directory dir;
   protected static LeafReader unsortedReader;
   protected static LeafReader sortedReader;
   protected static Integer[] sortedValues;
 
-  private static Document doc(final int id, PositionsTokenStream positions) {
-    final Document doc = new Document();
-    doc.add(new StringField(ID_FIELD, Integer.toString(id), Store.YES));
-    doc.add(new StringField(DOCS_ENUM_FIELD, DOCS_ENUM_TERM, Store.NO));
+  private static Document doc(RandomIndexWriter w, final int id, PositionsTokenStream positions) {
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting(BINARY_DV_FIELD);
+    fieldTypes.setMultiValued(SORTED_SET_DV_FIELD);
+    fieldTypes.setMultiValued(SORTED_NUMERIC_DV_FIELD);
+    fieldTypes.enableTermVectors(TERM_VECTORS_FIELD);
+
+    final Document doc = w.newDocument();
+    doc.addAtom(ID_FIELD, Integer.toString(id));
+    doc.addAtom(DOCS_ENUM_FIELD, DOCS_ENUM_TERM);
     positions.setId(id);
-    doc.add(new Field(DOC_POSITIONS_FIELD, positions, POSITIONS_TYPE));
-    doc.add(new NumericDocValuesField(NUMERIC_DV_FIELD, id));
-    TextField norms = new TextField(NORMS_FIELD, Integer.toString(id), Store.NO);
-    norms.setBoost(Float.intBitsToFloat(id));
-    doc.add(norms);
-    doc.add(new BinaryDocValuesField(BINARY_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedDocValuesField(SORTED_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedSetDocValuesField(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedSetDocValuesField(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id + 1))));
-    doc.add(new SortedNumericDocValuesField(SORTED_NUMERIC_DV_FIELD, id));
-    doc.add(new SortedNumericDocValuesField(SORTED_NUMERIC_DV_FIELD, id + 1));
-    doc.add(new Field(TERM_VECTORS_FIELD, Integer.toString(id), TERM_VECTORS_TYPE));
+    doc.addLargeText(DOC_POSITIONS_FIELD, positions);
+    doc.addInt(NUMERIC_DV_FIELD, id);
+    doc.addLargeText(NORMS_FIELD, Integer.toString(id), Float.intBitsToFloat(id));
+    doc.addBinary(BINARY_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id + 1)));
+    doc.addInt(SORTED_NUMERIC_DV_FIELD, id);
+    doc.addInt(SORTED_NUMERIC_DV_FIELD, id + 1);
+    doc.addLargeText(TERM_VECTORS_FIELD, Integer.toString(id));
     return doc;
   }
 
@@ -210,7 +192,7 @@
     RandomIndexWriter writer = new RandomIndexWriter(random, dir, conf);
     writer.setDoRandomForceMerge(false);
     for (int id : ids) {
-      writer.addDocument(doc(id, positions));
+      writer.addDocument(doc(writer, id, positions));
     }
     // delete some documents
     writer.commit();
@@ -321,7 +303,7 @@
     int prev = -1;
     while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
       assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
-      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).get(ID_FIELD)));
+      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).getString(ID_FIELD)));
       while (++prev < doc) {
         assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
       }
@@ -339,7 +321,7 @@
     prev = -1;
     while ((doc = docs.advance(doc + 1)) != DocIdSetIterator.NO_MORE_DOCS) {
       assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
-      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).get(ID_FIELD)));
+      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).getString(ID_FIELD)));
       while (++prev < doc) {
         assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
       }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java b/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java
index caae319..4991eef 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java
@@ -23,12 +23,9 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -70,20 +67,17 @@
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), cfg);
-    final Document parentDoc = new Document();
-    final NumericDocValuesField parentVal = new NumericDocValuesField("parent_val", 0L);
-    parentDoc.add(parentVal);
-    final StringField parent = new StringField("parent", "true", Store.YES);
-    parentDoc.add(parent);
     for (int i = 0; i < numParents; ++i) {
       List<Document> documents = new ArrayList<>();
       final int numChildren = random().nextInt(10);
       for (int j = 0; j < numChildren; ++j) {
-        final Document childDoc = new Document();
-        childDoc.add(new NumericDocValuesField("child_val", random().nextInt(5)));
+        final Document childDoc = writer.newDocument();
+        childDoc.addInt("child_val", random().nextInt(5));
         documents.add(childDoc);
       }
-      parentVal.setLongValue(random().nextInt(50));
+      final Document parentDoc = writer.newDocument();
+      parentDoc.addLong("parent_val", random().nextInt(50));
+      parentDoc.addAtom("parent", "true");
       documents.add(parentDoc);
       writer.addDocuments(documents);
     }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
index bf58e53..85988d7 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
@@ -19,7 +19,6 @@
 import java.nio.file.Path;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -46,18 +45,15 @@
             setMergePolicy(mergePolicy)
     );
     for (int x=0; x < 100; x++) {
-      Document doc = DocHelper.createDocument(x, "index", 5);
-      iw.addDocument(doc);
+      iw.addDocument(DocHelper.createDocument(iw, x, "index", 5));
     }
     iw.commit();
     for (int x=100; x < 150; x++) {
-      Document doc = DocHelper.createDocument(x, "index2", 5);
-      iw.addDocument(doc);
+      iw.addDocument(DocHelper.createDocument(iw, x, "index2", 5));
     }
     iw.commit();
     for (int x=150; x < 200; x++) {
-      Document doc = DocHelper.createDocument(x, "index3", 5);
-      iw.addDocument(doc);
+      iw.addDocument(DocHelper.createDocument(iw, x, "index3", 5));
     }
     iw.commit();
     DirectoryReader iwReader = iw.getReader();
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
index fec4254..e9aecac 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
@@ -19,7 +19,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -35,11 +34,10 @@
     super.setUp();
     dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));
-    Document doc;
     for (int i = 0; i < NUM_DOCS; i++) {
-      doc = new Document();
-      doc.add(newStringField("id", i + "", Field.Store.YES));
-      doc.add(newTextField("f", i + " " + i, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addAtom("id", i + "");
+      doc.addLargeText("f", i + " " + i);
       w.addDocument(doc);
       if (i%3==0) w.commit();
     }
@@ -70,7 +68,7 @@
     IndexReader ir;
     ir = DirectoryReader.open(dirs[0]);
     assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
-    StoredDocument doc = ir.document(0);
+    Document doc = ir.document(0);
     assertEquals("0", doc.get("id"));
     TermsEnum te = MultiFields.getTerms(ir, "id").iterator(null);
     assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
@@ -115,8 +113,8 @@
     IndexReader ir;
     ir = DirectoryReader.open(dirs[0]);
     assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
-    StoredDocument doc = ir.document(0);
-    assertEquals("0", doc.get("id"));
+    Document doc = ir.document(0);
+    assertEquals("0", doc.getString("id"));
     int start = ir.numDocs();
     ir.close();
     ir = DirectoryReader.open(dirs[1]);
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java
index b615764..c1fcebd 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
@@ -39,12 +38,12 @@
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))
         .setOpenMode(OpenMode.CREATE).setMergePolicy(NoMergePolicy.INSTANCE));
     for (int x = 0; x < 11; x++) {
-      Document doc = createDocument(x, "1", 3, format);
+      Document doc = createDocument(w, x, "1", 3, format);
       w.addDocument(doc);
       if (x%3==0) w.commit();
     }
     for (int x = 11; x < 20; x++) {
-      Document doc = createDocument(x, "2", 3, format);
+      Document doc = createDocument(w, x, "2", 3, format);
       w.addDocument(doc);
       if (x%3==0) w.commit();
     }
@@ -98,20 +97,20 @@
     }
   }
   
-  private Document createDocument(int n, String indexName, 
+  private Document createDocument(IndexWriter w, int n, String indexName,
       int numFields, NumberFormat format) {
     StringBuilder sb = new StringBuilder();
-    Document doc = new Document();
+    Document doc = w.newDocument();
     String id = format.format(n);
-    doc.add(newStringField("id", id, Field.Store.YES));
-    doc.add(newStringField("indexname", indexName, Field.Store.YES));
+    doc.addAtom("id", id);
+    doc.addAtom("indexname", indexName);
     sb.append("a");
     sb.append(n);
-    doc.add(newTextField("field1", sb.toString(), Field.Store.YES));
+    doc.addLargeText("field1", sb.toString());
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(newTextField("field" + (i + 1), sb.toString(), Field.Store.YES));
+      doc.addLargeText("field" + (i + 1), sb.toString());
     }
     return doc;
   }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java b/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java
index d34215d..680e69e 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java
@@ -26,13 +26,14 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LogMergePolicy;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.NumericDocValues;
@@ -45,7 +46,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 public class TestSortingMergePolicy extends LuceneTestCase {
@@ -63,11 +63,34 @@
     createRandomIndexes();
   }
 
-  private Document randomDocument() {
-    final Document doc = new Document();
-    doc.add(new NumericDocValuesField("ndv", random().nextLong()));
-    doc.add(new StringField("s", RandomPicks.randomFrom(random(), terms), Store.YES));
-    return doc;
+  private void addRandomDocument(RandomIndexWriter w1, RandomIndexWriter w2) throws IOException {
+    long num = random().nextLong();
+    String term = RandomPicks.randomFrom(random(), terms);
+
+    Document doc = w1.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w1.addDocument(doc);
+
+    doc = w2.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w2.addDocument(doc);
+  }
+
+  private void addRandomDocument(IndexWriter w1, IndexWriter w2) throws IOException {
+    long num = random().nextLong();
+    String term = RandomPicks.randomFrom(random(), terms);
+
+    Document doc = w1.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w1.addDocument(doc);
+
+    doc = w2.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w2.addDocument(doc);
   }
 
   public static SortingMergePolicy newSortingMergePolicy(Sort sort) {
@@ -107,16 +130,22 @@
     final IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(new Random(seed)));
     iwc2.setMergePolicy(newSortingMergePolicy(sort));
     final RandomIndexWriter iw1 = new RandomIndexWriter(new Random(seed), dir1, iwc1);
+    FieldTypes fieldTypes = iw1.getFieldTypes();
+    fieldTypes.setIndexOptions("ndv", IndexOptions.NONE);
+    fieldTypes.disableStored("ndv");
+    fieldTypes.setDocValuesType("s", DocValuesType.NONE);
     final RandomIndexWriter iw2 = new RandomIndexWriter(new Random(seed), dir2, iwc2);
+    fieldTypes = iw2.getFieldTypes();
+    fieldTypes.setIndexOptions("ndv", IndexOptions.NONE);
+    fieldTypes.disableStored("ndv");
+    fieldTypes.setDocValuesType("s", DocValuesType.NONE);
     for (int i = 0; i < numDocs; ++i) {
       if (random().nextInt(5) == 0 && i != numDocs - 1) {
         final String term = RandomPicks.randomFrom(random(), terms);
         iw1.deleteDocuments(new Term("s", term));
         iw2.deleteDocuments(new Term("s", term));
       }
-      final Document doc = randomDocument();
-      iw1.addDocument(doc);
-      iw2.addDocument(doc);
+      addRandomDocument(iw1, iw2);
       if (random().nextInt(8) == 0) {
         iw1.commit();
         iw2.commit();
@@ -125,15 +154,13 @@
     // Make sure we have something to merge
     iw1.commit();
     iw2.commit();
-    final Document doc = randomDocument();
     // NOTE: don't use RIW.addDocument directly, since it sometimes commits
     // which may trigger a merge, at which case forceMerge may not do anything.
     // With field updates this is a problem, since the updates can go into the
     // single segment in the index, and threefore the index won't be sorted.
     // This hurts the assumption of the test later on, that the index is sorted
     // by SortingMP.
-    iw1.w.addDocument(doc);
-    iw2.w.addDocument(doc);
+    addRandomDocument(iw1.w, iw2.w);
 
     // update NDV of docs belonging to one term (covers many documents)
     final long value = random().nextLong();
diff --git a/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java b/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java
index fcc9fe7..ee74231 100644
--- a/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java
+++ b/lucene/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -174,58 +174,59 @@
   /********************Testing Utils**********************************/
     
   private static void indexDocs(IndexWriter writer) throws Exception {
-    Random rnd = random();
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+
     /**
      * Generate 10 documents where term n  has a docFreq of n and a totalTermFreq of n*2 (squared). 
      */
     for (int i = 1; i <= 10; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       String content = getContent(i);
     
-      doc.add(newTextField(rnd, "FIELD_1", content, Field.Store.YES));
+      doc.addLargeText("FIELD_1", content);
       //add a different field
-      doc.add(newTextField(rnd, "different_field", "diff", Field.Store.YES));
+      doc.addLargeText("different_field", "diff");
       writer.addDocument(doc);
     }
     
     //add 10 more docs with the term "diff" this will make it have the highest docFreq if we don't ask for the
     //highest freq terms for a specific field.
     for (int i = 1; i <= 10; i++) {
-      Document doc = new Document();
-      doc.add(newTextField(rnd, "different_field", "diff", Field.Store.YES));
+      Document doc = writer.newDocument();
+      doc.addLargeText("different_field", "diff");
       writer.addDocument(doc);
     }
     // add some docs where tf < df so we can see if sorting works
     // highTF low df
     int highTF = 200;
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     String content = "";
     for (int i = 0; i < highTF; i++) {
       content += "highTF ";
     }
-    doc.add(newTextField(rnd, "FIELD_1", content, Field.Store.YES));
+    doc.addLargeText("FIELD_1", content);
     writer.addDocument(doc);
     // highTF medium df =5
     int medium_df = 5;
     for (int i = 0; i < medium_df; i++) {
       int tf = 25;
-      Document newdoc = new Document();
+      Document newdoc = writer.newDocument();
       String newcontent = "";
       for (int j = 0; j < tf; j++) {
         newcontent += "highTFmedDF ";
       }
-      newdoc.add(newTextField(rnd, "FIELD_1", newcontent, Field.Store.YES));
+      newdoc.addLargeText("FIELD_1", newcontent);
       writer.addDocument(newdoc);
     }
     // add a doc with high tf in field different_field
     int targetTF =150;
-    doc = new Document();
+    doc = writer.newDocument();
     content = "";
     for (int i = 0; i < targetTF; i++) {
       content += "TF150 ";
     }
-    doc.add(newTextField(rnd, "different_field", content, Field.Store.YES));
+    doc.addLargeText("different_field", content);
     writer.addDocument(doc);
     writer.close();
     
diff --git a/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java b/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
index c558ceb..804f760 100644
--- a/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
+++ b/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
@@ -26,9 +26,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -68,11 +65,11 @@
     sort = new Sort(new SortField("ndv1", SortField.Type.LONG));
   }
 
-  private Document randomDocument() {
-    final Document doc = new Document();
-    doc.add(new NumericDocValuesField("ndv1", random().nextInt(10)));
-    doc.add(new NumericDocValuesField("ndv2", random().nextInt(10)));
-    doc.add(new StringField("s", RandomPicks.randomFrom(random(), terms), Store.YES));
+  private Document randomDocument(RandomIndexWriter iw) {
+    final Document doc = iw.newDocument();
+    doc.addInt("ndv1", random().nextInt(10));
+    doc.addInt("ndv2", random().nextInt(10));
+    doc.addAtom("s", RandomPicks.randomFrom(random(), terms));
     return doc;
   }
 
@@ -93,7 +90,7 @@
     iw = new RandomIndexWriter(new Random(seed), dir, iwc);
     iw.setDoRandomForceMerge(false); // don't do this, it may happen anyway with MockRandomMP
     for (int i = 0; i < numDocs; ++i) {
-      final Document doc = randomDocument();
+      final Document doc = randomDocument(iw);
       iw.addDocument(doc);
       if (i == numDocs / 2 || (i != numDocs - 1 && random().nextInt(8) == 0)) {
         iw.commit();
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
index 74bec62..364bf00 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
@@ -18,27 +18,24 @@
  */
 
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -46,8 +43,8 @@
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -66,16 +63,16 @@
   public void testSimple() throws Exception {
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    Document doc = new Document();
-    Field field = newTextField("field", "", Field.Store.NO);
-    doc.add(field);
-    field.setStringValue("a b c");
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "a b c");
     w.addDocument(doc);
 
-    field.setStringValue("d e f");
+    doc = w.newDocument();
+    doc.addLargeText("field", "d e f");
     w.addDocument(doc);
 
-    field.setStringValue("a f");
+    doc = w.newDocument();
+    doc.addLargeText("field", "a f");
     w.addDocument(doc);
     
     final IndexReader r = w.getReader();
@@ -133,14 +130,17 @@
     }
     
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("id");
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
 
     final int[][] idToOrds = new int[NUM_DOCS][];
     final Set<Integer> ordsForDocSet = new HashSet<>();
 
     for(int id=0;id<NUM_DOCS;id++) {
-      Document doc = new Document();
-
-      doc.add(new IntField("id", id, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addInt("id", id);
       
       final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -153,11 +153,10 @@
       }
       for(int ord : ordsForDocSet) {
         ordsForDoc[upto++] = ord;
-        Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
         if (VERBOSE) {
           System.out.println("  f=" + termsArray[ord].utf8ToString());
         }
-        doc.add(field);
+        doc.addAtom("field", termsArray[ord].utf8ToString());
       }
       ordsForDocSet.clear();
       Arrays.sort(ordsForDoc);
@@ -230,14 +229,18 @@
     }
     
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("id");
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
 
     final int[][] idToOrds = new int[NUM_DOCS][];
     final Set<Integer> ordsForDocSet = new HashSet<>();
 
     for(int id=0;id<NUM_DOCS;id++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
 
-      doc.add(new IntField("id", id, Field.Store.YES));
+      doc.addInt("id", id);
       
       final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -250,11 +253,10 @@
       }
       for(int ord : ordsForDocSet) {
         ordsForDoc[upto++] = ord;
-        Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
         if (VERBOSE) {
           System.out.println("  f=" + termsArray[ord].utf8ToString());
         }
-        doc.add(field);
+        doc.addAtom("field", termsArray[ord].utf8ToString());
       }
       ordsForDocSet.clear();
       Arrays.sort(ordsForDoc);
@@ -321,7 +323,7 @@
                                             TestUtil.nextInt(random(), 2, 10));
                                             
 
-    final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.DOCUMENT_INT_PARSER, false);
     /*
       for(int docID=0;docID<subR.maxDoc();docID++) {
       System.out.println("  docID=" + docID + " id=" + docIDToID[docID]);
@@ -395,15 +397,17 @@
   public void testBackToTheFuture() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    
-    Document doc = new Document();
-    doc.add(newStringField("foo", "bar", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("foo");
+
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(newStringField("foo", "baz", Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addAtom("foo", "baz");
     // we need a second value for a doc, or we don't actually test DocTermOrds!
-    doc.add(newStringField("foo", "car", Field.Store.NO));
+    doc.addAtom("foo", "car");
     iw.addDocument(doc);
     
     DirectoryReader r1 = DirectoryReader.open(iw, true);
@@ -427,14 +431,17 @@
   public void testNumericEncoded32() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    
-    Document doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
+
+    Document doc = iw.newDocument();
+    doc.addInt("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
-    doc.add(new IntField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addInt("foo", 5);
+    doc.addInt("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -443,7 +450,7 @@
     DirectoryReader ir = DirectoryReader.open(dir);
     LeafReader ar = getOnlySegmentReader(ir);
     
-    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT32_TERM_PREFIX);
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null);
     assertEquals(2, v.getValueCount());
     
     v.setDocument(0);
@@ -456,10 +463,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToInt(value));
+    assertEquals(-3, NumericUtils.bytesToInt(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToInt(value));
+    assertEquals(5, NumericUtils.bytesToInt(value));
     
     ir.close();
     dir.close();
@@ -468,14 +475,17 @@
   public void testNumericEncoded64() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
     
-    Document doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLong("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
-    doc.add(new LongField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addLong("foo", 5);
+    doc.addLong("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -484,7 +494,7 @@
     DirectoryReader ir = DirectoryReader.open(dir);
     LeafReader ar = getOnlySegmentReader(ir);
     
-    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT64_TERM_PREFIX);
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null);
     assertEquals(2, v.getValueCount());
     
     v.setDocument(0);
@@ -497,10 +507,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToLong(value));
+    assertEquals(-3, NumericUtils.bytesToLong(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToLong(value));
+    assertEquals(5, NumericUtils.bytesToLong(value));
     
     ir.close();
     dir.close();
@@ -512,19 +522,21 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new StringField("field", "hello", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("field", "hello");
     iwriter.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new StringField("field", "world", Field.Store.NO));
+    doc = iwriter.newDocument();
+    doc.addAtom("field", "world");
     // we need a second value for a doc, or we don't actually test DocTermOrds!
-    doc.add(new StringField("field", "hello", Field.Store.NO));
+    doc.addAtom("field", "hello");
     iwriter.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new StringField("field", "beer", Field.Store.NO));
+    doc = iwriter.newDocument();
+    doc.addAtom("field", "beer");
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     
@@ -595,21 +607,24 @@
     IndexWriterConfig iwconfig =  newIndexWriterConfig(null);
     iwconfig.setMergePolicy(newLogMergePolicy());
     IndexWriter iw = new IndexWriter(dir, iwconfig);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("foo");
+    fieldTypes.disableSorting("foo");
     
-    Document doc = new Document();
-    doc.add(new StringField("foo", "bar", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addAtom("foo", "bar");
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new StringField("foo", "baz", Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addAtom("foo", "baz");
     iw.addDocument(doc);
     
-    doc = new Document();
+    doc = iw.newDocument();
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new StringField("foo", "baz", Field.Store.NO));
-    doc.add(new StringField("foo", "baz", Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addAtom("foo", "baz");
+    doc.addAtom("foo", "baz");
     iw.addDocument(doc);
     
     iw.forceMerge(1);
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
index 29392c1..49645db 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
@@ -28,24 +28,14 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -77,6 +67,19 @@
     NUM_ORDS = atLeast(2);
     directory = newDirectory();
     RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String field : new String[] {"theDouble",
+                                     "theLong",
+                                     "theInt",
+                                     "theFloat",
+                                     "sparse",
+                                     "numInt",
+                                     "theRandomUnicodeString",
+                                     "theRandomUnicodeMultiValuedField"}) {
+      fieldTypes.disableSorting(field);
+    }
+    fieldTypes.setMultiValued("theRandomUnicodeMultiValuedField");
+
     long theLong = Long.MAX_VALUE;
     double theDouble = Double.MAX_VALUE;
     int theInt = Integer.MAX_VALUE;
@@ -87,23 +90,23 @@
       System.out.println("TEST: setUp");
     }
     for (int i = 0; i < NUM_DOCS; i++){
-      Document doc = new Document();
-      doc.add(new LongField("theLong", theLong--, Field.Store.NO));
-      doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
-      doc.add(new IntField("theInt", theInt--, Field.Store.NO));
-      doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLong("theLong", theLong--);
+      doc.addDouble("theDouble", theDouble--);
+      doc.addInt("theInt", theInt--);
+      doc.addFloat("theFloat", theFloat--);
       if (i%2 == 0) {
-        doc.add(new IntField("sparse", i, Field.Store.NO));
+        doc.addInt("sparse", i);
       }
 
       if (i%2 == 0) {
-        doc.add(new IntField("numInt", i, Field.Store.NO));
+        doc.addInt("numInt", i);
       }
 
       // sometimes skip the field:
       if (random().nextInt(40) != 17) {
         unicodeStrings[i] = generateString(i);
-        doc.add(newStringField("theRandomUnicodeString", unicodeStrings[i], Field.Store.YES));
+        doc.addAtom("theRandomUnicodeString", unicodeStrings[i]);
       }
 
       // sometimes skip the field:
@@ -111,7 +114,7 @@
         for (int j = 0; j < NUM_ORDS; j++) {
           String newValue = generateString(i);
           multiValued[i][j] = new BytesRef(newValue);
-          doc.add(newStringField("theRandomUnicodeMultiValuedField", newValue, Field.Store.YES));
+          doc.addAtom("theRandomUnicodeMultiValuedField", newValue);
         }
         Arrays.sort(multiValued[i]);
       }
@@ -137,17 +140,15 @@
       FieldCache cache = FieldCache.DEFAULT;
       ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
       cache.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8));
-      cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+      cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, false);
       cache.getNumerics(reader, "theDouble", new FieldCache.Parser() {
         @Override
         public TermsEnum termsEnum(Terms terms) throws IOException {
-          return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+          return terms.iterator(null);
         }
         @Override
         public long parseValue(BytesRef term) {
-          int val = (int) NumericUtils.prefixCodedToLong(term);
-          if (val<0) val ^= 0x7fffffff;
-          return val;
+          return NumericUtils.doubleToLong(NumericUtils.bytesToDouble(term));
         }
       }, false);
       assertTrue(bos.toString(IOUtils.UTF_8).indexOf("WARNING") != -1);
@@ -159,26 +160,26 @@
 
   public void test() throws IOException {
     FieldCache cache = FieldCache.DEFAULT;
-    NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
+    NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
-      assertEquals(Double.doubleToLongBits(Double.MAX_VALUE - i), doubles.get(i));
+      assertEquals(NumericUtils.doubleToLong(Double.MAX_VALUE - i), doubles.get(i));
     }
     
-    NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
+    NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.DOCUMENT_LONG_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.DOCUMENT_LONG_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Long.MAX_VALUE - i, longs.get(i));
     }
 
-    NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
+    NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.DOCUMENT_INT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.DOCUMENT_INT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Integer.MAX_VALUE - i, ints.get(i));
     }
     
-    NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
+    NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.DOCUMENT_FLOAT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.DOCUMENT_FLOAT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Float.floatToIntBits(Float.MAX_VALUE - i), floats.get(i));
     }
@@ -320,7 +321,7 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
-    cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, true);
 
     // The double[] takes one slots, and docsWithField should also
     // have been populated:
@@ -331,7 +332,7 @@
     assertEquals(2, cache.getCacheEntries().length);
     assertTrue(bits instanceof Bits.MatchAllBits);
 
-    NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.DOCUMENT_INT_PARSER, true);
     assertEquals(4, cache.getCacheEntries().length);
     Bits docsWithField = cache.getDocsWithField(reader, "sparse");
     assertEquals(4, cache.getCacheEntries().length);
@@ -344,7 +345,7 @@
       }
     }
 
-    NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
+    NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.DOCUMENT_INT_PARSER, random().nextBoolean());
     docsWithField = cache.getDocsWithField(reader, "numInt");
     for (int i = 0; i < docsWithField.length(); i++) {
       if (i%2 == 0) {
@@ -394,7 +395,7 @@
                     assertEquals(i%2 == 0, docsWithField.get(i));
                   }
                 } else {
-                  NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+                  NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.DOCUMENT_INT_PARSER, true);
                   Bits docsWithField = cache.getDocsWithField(reader, "sparse");
                   for (int i = 0; i < docsWithField.length(); i++) {
                     if (i%2 == 0) {
@@ -426,12 +427,15 @@
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(null);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("binary", new BytesRef("binary value")));
-    doc.add(new SortedDocValuesField("sorted", new BytesRef("sorted value")));
-    doc.add(new NumericDocValuesField("numeric", 42));
-    doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value1")));
-    doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value2")));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("sortedset");
+    fieldTypes.disableSorting("binary");
+    Document doc = iw.newDocument();
+    doc.addBinary("binary", new BytesRef("binary value"));
+    doc.addBinary("sorted", new BytesRef("sorted value"));
+    doc.addInt("numeric", -42);
+    doc.addAtom("sortedset", new BytesRef("sortedset value1"));
+    doc.addAtom("sortedset", new BytesRef("sortedset value2"));
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     iw.close();
@@ -439,7 +443,7 @@
     
     // Binary type: can be retrieved via getTerms()
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.DOCUMENT_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -467,7 +471,7 @@
     
     // Sorted type: can be retrieved via getTerms(), getTermsIndex(), getDocTermOrds()
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.DOCUMENT_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -496,8 +500,8 @@
     assertTrue(bits.get(0));
     
     // Numeric type: can be retrieved via getInts() and so on
-    NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
-    assertEquals(42, numeric.get(0));
+    NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.DOCUMENT_INT_PARSER, false);
+    assertEquals(-42, numeric.get(0));
     
     try {
       FieldCache.DEFAULT.getTerms(ar, "numeric", true);
@@ -524,7 +528,7 @@
     
     // SortedSet type: can be retrieved via getDocTermOrds() 
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.DOCUMENT_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -560,7 +564,7 @@
   public void testNonexistantFields() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    Document doc = iw.newDocument();
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     iw.close();
@@ -571,16 +575,16 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.DOCUMENT_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.DOCUMENT_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.DOCUMENT_FLOAT_PARSER, true);
     assertEquals(0, floats.get(0));
     
-    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOCUMENT_DOUBLE_PARSER, true);
     assertEquals(0, doubles.get(0));
     
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -608,17 +612,17 @@
   public void testNonIndexedFields() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new StoredField("bogusbytes", "bogus"));
-    doc.add(new StoredField("bogusshorts", "bogus"));
-    doc.add(new StoredField("bogusints", "bogus"));
-    doc.add(new StoredField("boguslongs", "bogus"));
-    doc.add(new StoredField("bogusfloats", "bogus"));
-    doc.add(new StoredField("bogusdoubles", "bogus"));
-    doc.add(new StoredField("bogusterms", "bogus"));
-    doc.add(new StoredField("bogustermsindex", "bogus"));
-    doc.add(new StoredField("bogusmultivalued", "bogus"));
-    doc.add(new StoredField("bogusbits", "bogus"));
+    Document doc = iw.newDocument();
+    doc.addStoredString("bogusbytes", "bogus");
+    doc.addStoredString("bogusshorts", "bogus");
+    doc.addStoredString("bogusints", "bogus");
+    doc.addStoredString("boguslongs", "bogus");
+    doc.addStoredString("bogusfloats", "bogus");
+    doc.addStoredString("bogusdoubles", "bogus");
+    doc.addStoredString("bogusterms", "bogus");
+    doc.addStoredString("bogustermsindex", "bogus");
+    doc.addStoredString("bogusmultivalued", "bogus");
+    doc.addStoredString("bogusbits", "bogus");
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     iw.close();
@@ -629,16 +633,16 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.DOCUMENT_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.DOCUMENT_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.DOCUMENT_FLOAT_PARSER, true);
     assertEquals(0, floats.get(0));
     
-    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOCUMENT_DOUBLE_PARSER, true);
     assertEquals(0, doubles.get(0));
     
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -669,9 +673,9 @@
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
-    Document doc = new Document();
-    LongField field = new LongField("f", 0L, Store.YES);
-    doc.add(field);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("f");
+
     final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
     for (int i = 0; i < values.length; ++i) {
       final long v;
@@ -690,17 +694,17 @@
           break;
       }
       values[i] = v;
+      Document doc = iw.newDocument();
       if (v == 0 && random().nextBoolean()) {
         // missing
-        iw.addDocument(new Document());
       } else {
-        field.setLongValue(v);
-        iw.addDocument(doc);
+        doc.addLong("f", v);
       }
+      iw.addDocument(doc);
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
+    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.DOCUMENT_LONG_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], longs.get(i));
     }
@@ -715,9 +719,9 @@
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
-    Document doc = new Document();
-    IntField field = new IntField("f", 0, Store.YES);
-    doc.add(field);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("f");
+
     final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
     for (int i = 0; i < values.length; ++i) {
       final int v;
@@ -736,17 +740,17 @@
           break;
       }
       values[i] = v;
+      Document doc = iw.newDocument();
       if (v == 0 && random().nextBoolean()) {
         // missing
-        iw.addDocument(new Document());
       } else {
-        field.setIntValue(v);
-        iw.addDocument(doc);
+        doc.addInt("f", v);
       }
+      iw.addDocument(doc);
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.DOCUMENT_INT_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], ints.get(i));
     }
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java
index b8ab53d..46bdd44 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheReopen.java
@@ -19,11 +19,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -42,15 +41,18 @@
         newIndexWriterConfig(new MockAnalyzer(random())).
             setMergePolicy(newLogMergePolicy(10))
     );
-    Document doc = new Document();
-    doc.add(new IntField("number", 17, Field.Store.NO));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("number");
+
+    Document doc = writer.newDocument();
+    doc.addInt("number", 17);
     writer.addDocument(doc);
     writer.commit();
   
     // Open reader1
     DirectoryReader r = DirectoryReader.open(dir);
     LeafReader r1 = getOnlySegmentReader(r);
-    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.DOCUMENT_INT_PARSER, false);
     assertEquals(17, ints.get(0));
   
     // Add new segment
@@ -62,7 +64,7 @@
     assertNotNull(r2);
     r.close();
     LeafReader sub0 = r2.leaves().get(0).reader();
-    final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.DOCUMENT_INT_PARSER, false);
     r2.close();
     assertTrue(ints == ints2);
   
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
index b8f67eb..7e86174 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSanityChecker.java
@@ -20,14 +20,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.store.Directory;
@@ -51,21 +47,37 @@
     dirB = newDirectory();
 
     IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = wA.getFieldTypes();
+    fieldTypes.disableSorting("theLong");
+    fieldTypes.disableSorting("theDouble");
+    fieldTypes.disableSorting("theInt");
+    fieldTypes.disableSorting("thedFloatLong");
+
     IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(new MockAnalyzer(random())));
+    fieldTypes = wB.getFieldTypes();
+    fieldTypes.disableSorting("theLong");
+    fieldTypes.disableSorting("theDouble");
+    fieldTypes.disableSorting("theInt");
+    fieldTypes.disableSorting("thedFloatLong");
 
     long theLong = Long.MAX_VALUE;
     double theDouble = Double.MAX_VALUE;
     int theInt = Integer.MAX_VALUE;
     float theFloat = Float.MAX_VALUE;
     for (int i = 0; i < NUM_DOCS; i++){
-      Document doc = new Document();
-      doc.add(new LongField("theLong", theLong--, Field.Store.NO));
-      doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
-      doc.add(new IntField("theInt", theInt--, Field.Store.NO));
-      doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
       if (0 == i % 3) {
+        Document doc = wA.newDocument();
+        doc.addLong("theLong", theLong--);
+        doc.addDouble("theDouble", theDouble--);
+        doc.addInt("theInt", theInt--);
+        doc.addFloat("theFloat", theFloat--);
         wA.addDocument(doc);
       } else {
+        Document doc = wB.newDocument();
+        doc.addLong("theLong", theLong--);
+        doc.addDouble("theDouble", theDouble--);
+        doc.addInt("theInt", theInt--);
+        doc.addFloat("theFloat", theFloat--);
         wB.addDocument(doc);
       }
     }
@@ -94,11 +106,11 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
 
-    cache.getNumerics(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
-    cache.getNumerics(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
-    cache.getNumerics(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getNumerics(readerA, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, false);
+    cache.getNumerics(readerAclone, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, false);
+    cache.getNumerics(readerB, "theDouble", FieldCache.DOCUMENT_DOUBLE_PARSER, false);
 
-    cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    cache.getNumerics(readerX, "theInt", FieldCache.DOCUMENT_INT_PARSER, false);
 
     // // // 
 
@@ -117,7 +129,7 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
 
-    cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    cache.getNumerics(readerX, "theInt", FieldCache.DOCUMENT_INT_PARSER, false);
     cache.getTerms(readerX, "theInt", false);
 
     // // // 
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java
index 541e772..fe4692c 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSort.java
@@ -24,13 +24,9 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -68,11 +64,15 @@
   private void testString(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setDocValuesType("value", DocValuesType.NONE);
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -85,8 +85,8 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).getString("value"));
 
     TestUtil.checkReader(ir);
     ir.close();
@@ -105,13 +105,16 @@
   private void testStringMissing(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -124,9 +127,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -144,11 +147,13 @@
   private void testStringReverse(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -161,8 +166,8 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'foo' comes after 'bar' in reverse order
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -181,13 +186,16 @@
   private void testStringMissingSortedFirst(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -201,9 +209,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -222,13 +230,16 @@
   private void testStringMissingSortedFirstReverse(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -241,10 +252,10 @@
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).getString("value"));
     // null comes last
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -263,13 +274,16 @@
   private void testStringMissingSortedLast(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -283,10 +297,10 @@
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).getString("value"));
     // null comes last
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -305,13 +319,15 @@
   private void testStringMissingSortedLastReverse(SortField.Type sortType) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Type type = sortType == SortField.Type.STRING ? Type.SORTED : Type.BINARY;
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
@@ -326,9 +342,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null comes first
-    assertNull(searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("bar", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertNull(searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -338,11 +354,13 @@
   public void testFieldDoc() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -364,11 +382,14 @@
   public void testFieldDocReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -390,11 +411,11 @@
   public void testFieldScore() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("value", "foo bar bar bar bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("value", "foo foo foo foo foo");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -420,11 +441,11 @@
   public void testFieldScoreReverse() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("value", "foo bar bar bar bar", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("value", "foo bar bar bar bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("value", "foo foo foo foo foo", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("value", "foo foo foo foo foo");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -449,14 +470,16 @@
   public void testInt() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new IntField("value", 300000, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addInt("value", 300000);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.INTEGER));
@@ -468,9 +491,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -480,13 +503,15 @@
   public void testIntMissing() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.INTEGER));
@@ -498,9 +523,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null is treated as a 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertNull(searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -510,13 +535,15 @@
   public void testIntMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.INTEGER));
@@ -530,9 +557,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null is treated as a Integer.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -542,14 +569,16 @@
   public void testIntReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new IntField("value", 300000, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addInt("value", 300000);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new IntField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addInt("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.INTEGER));
@@ -561,9 +590,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // reverse numeric order
-    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -573,14 +602,16 @@
   public void testLong() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addLong("value", 3000000000L);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.LONG));
@@ -592,9 +623,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -604,13 +635,15 @@
   public void testLongMissing() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.LONG));
@@ -622,9 +655,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null is treated as 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertNull(searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -634,13 +667,15 @@
   public void testLongMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.LONG));
@@ -654,9 +689,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null is treated as Long.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertNull(searcher.doc(td.scoreDocs[2].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -666,14 +701,16 @@
   public void testLongReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addLong("value", 3000000000L);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", -1, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", -1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new LongField("value", 4, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLong("value", 4);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.LONG));
@@ -685,9 +722,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // reverse numeric order
-    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -697,14 +734,16 @@
   public void testFloat() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addFloat("value", 30.1f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", -1.3f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", 4.2f);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.FLOAT));
@@ -716,9 +755,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -728,13 +767,15 @@
   public void testFloatMissing() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", -1.3f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", 4.2f);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.FLOAT));
@@ -745,10 +786,9 @@
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
-    // null is treated as 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
     assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -758,13 +798,15 @@
   public void testFloatMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", -1.3f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", 4.2f);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.FLOAT));
@@ -778,8 +820,8 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // null is treated as Float.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
     assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
     TestUtil.checkReader(ir);
     ir.close();
@@ -790,14 +832,16 @@
   public void testFloatReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addFloat("value", 30.1f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", -1.3f);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addFloat("value", 4.2f);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.FLOAT));
@@ -809,9 +853,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(3, td.totalHits);
     // reverse numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -821,17 +865,19 @@
   public void testDouble() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addDouble("value", 30.1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -1.3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333333);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333332);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.DOUBLE));
@@ -843,10 +889,10 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // numeric order
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
+    assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -856,13 +902,15 @@
   public void testDoubleSignedZero() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleField("value", +0d, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addDouble("value", +0d);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -0d, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -0d);
     writer.addDocument(doc);
-    doc = new Document();
+    doc = writer.newDocument();
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.DOUBLE));
     writer.close();
@@ -889,16 +937,18 @@
   public void testDoubleMissing() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -1.3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333333);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333332);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.DOUBLE));
@@ -910,10 +960,10 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // null treated as a 0
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertNull(searcher.doc(td.scoreDocs[1].doc).getString("value"));
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -923,16 +973,18 @@
   public void testDoubleMissingLast() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -1.3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333333);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333332);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.DOUBLE));
@@ -946,9 +998,9 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // null treated as Double.MAX_VALUE
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
     assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
     TestUtil.checkReader(ir);
     ir.close();
@@ -959,17 +1011,19 @@
   public void testDoubleReverse() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addDouble("value", 30.1);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", -1.3);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333333);
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addDouble("value", 4.2333333333332);
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(), 
                      Collections.singletonMap("value", Type.DOUBLE));
@@ -981,10 +1035,10 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(4, td.totalHits);
     // numeric order
-    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
+    assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value").toString());
+    assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value").toString());
+    assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value").toString());
+    assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value").toString());
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -993,13 +1047,16 @@
   public void testEmptyStringVsNullStringSort() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(newStringField("f", "", Field.Store.NO));
-    doc.add(newStringField("t", "1", Field.Store.NO));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("f");
+    fieldTypes.disableSorting("t");
+    Document doc = w.newDocument();
+    doc.addAtom("f", "");
+    doc.addAtom("t", "1");
     w.addDocument(doc);
     w.commit();
-    doc = new Document();
-    doc.add(newStringField("t", "1", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("t", "1");
     w.addDocument(doc);
 
     IndexReader r = UninvertingReader.wrap(DirectoryReader.open(w, true), 
@@ -1020,11 +1077,14 @@
   public void testMultiValuedField() throws IOException {
     Directory indexStore = newDirectory();
     IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("string");
+    fieldTypes.disableSorting("string");
     for(int i=0; i<5; i++) {
-        Document doc = new Document();
-        doc.add(new StringField("string", "a"+i, Field.Store.NO));
-        doc.add(new StringField("string", "b"+i, Field.Store.NO));
-        writer.addDocument(doc);
+      Document doc = writer.newDocument();
+      doc.addAtom("string", "a"+i);
+      doc.addAtom("string", "b"+i);
+      writer.addDocument(doc);
     }
     writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
     writer.close();
@@ -1049,14 +1109,14 @@
     int id = 0;
     for(int seg=0;seg<2;seg++) {
       for(int docIDX=0;docIDX<10;docIDX++) {
-        Document doc = new Document();
-        doc.add(new IntField("id", docIDX, Field.Store.YES));
+        Document doc = w.newDocument();
+        doc.addInt("id", docIDX);
         StringBuilder sb = new StringBuilder();
         for(int i=0;i<id;i++) {
           sb.append(' ');
           sb.append("text");
         }
-        doc.add(newTextField("body", sb.toString(), Field.Store.NO));
+        doc.addLargeText("body", sb.toString());
         w.addDocument(doc);
         id++;
       }
@@ -1112,8 +1172,10 @@
   public void testSortOneDocument() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(),
                      Collections.singletonMap("value", Type.SORTED));
@@ -1124,7 +1186,7 @@
 
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(1, td.totalHits);
-    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -1134,8 +1196,10 @@
   public void testSortOneDocumentWithScores() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     IndexReader ir = UninvertingReader.wrap(writer.getReader(),
                      Collections.singletonMap("value", Type.SORTED));
@@ -1159,13 +1223,16 @@
   public void testSortTwoFields() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
-    doc.add(newStringField("value", "foo", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("value");
+    fieldTypes.disableSorting("tievalue");
+    Document doc = writer.newDocument();
+    doc.addAtom("tievalue", "tied");
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("tievalue", "tied", Field.Store.NO));
-    doc.add(newStringField("value", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addAtom("tievalue", "tied");
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
     Map<String,Type> mappings = new HashMap<>();
     mappings.put("tievalue", Type.SORTED);
@@ -1182,8 +1249,8 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // 'bar' comes before 'foo'
-    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).getString("value"));
+    assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).getString("value"));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -1192,11 +1259,11 @@
   public void testScore() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "bar", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addAtom("value", "bar");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "foo", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addAtom("value", "foo");
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSortRandom.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSortRandom.java
index 0850700..3890f39 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSortRandom.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheSortRandom.java
@@ -29,12 +29,10 @@
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -50,9 +48,9 @@
 import org.apache.lucene.search.TopFieldDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -74,6 +72,10 @@
     final int NUM_DOCS = atLeast(100);
     final Directory dir = newDirectory();
     final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("stringdv");
+    fieldTypes.disableSorting("id");
+
     final boolean allowDups = random.nextBoolean();
     final Set<String> seen = new HashSet<>();
     final int maxLength = TestUtil.nextInt(random, 5, 100);
@@ -85,7 +87,7 @@
     final List<BytesRef> docValues = new ArrayList<>();
     // TODO: deletions
     while (numDocs < NUM_DOCS) {
-      final Document doc = new Document();
+      final Document doc = writer.newDocument();
 
       // 10% of the time, the document is missing the value:
       final BytesRef br;
@@ -108,7 +110,7 @@
           System.out.println("  " + numDocs + ": s=" + s);
         }
 
-        doc.add(new StringField("stringdv", s, Field.Store.NO));
+        doc.addAtom("stringdv", s);
         docValues.add(new BytesRef(s));
 
       } else {
@@ -119,7 +121,7 @@
         docValues.add(null);
       }
 
-      doc.add(new IntField("id", numDocs, Field.Store.YES));
+      doc.addInt("id", numDocs);
       writer.addDocument(doc);
       numDocs++;
 
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java
index c7c2986..f756f48 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheVsDocValues.java
@@ -17,42 +17,40 @@
  * limitations under the License.
  */
 
-import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
-
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
 public class TestFieldCacheVsDocValues extends LuceneTestCase {
   
   public void testByteMissingVsFieldCache() throws Exception {
@@ -131,6 +129,8 @@
       numDocs = TestUtil.nextInt(random(), 100, 200);
     }
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("field");
     List<byte[]> docBytes = new ArrayList<>();
     long totalBytes = 0;
     for(int docID=0;docID<numDocs;docID++) {
@@ -154,14 +154,16 @@
       byte[] bytes = new byte[numBytes];
       random().nextBytes(bytes);
       docBytes.add(bytes);
-      Document doc = new Document();      
+      Document doc = w.newDocument();
       BytesRef b = new BytesRef(bytes);
       b.length = bytes.length;
-      doc.add(new BinaryDocValuesField("field", b));
-      doc.add(new StringField("id", ""+docID, Field.Store.YES));
+      doc.addBinary("field", b);
+      doc.addAtom("id", ""+docID);
       try {
         w.addDocument(doc);
       } catch (IllegalArgumentException iae) {
+        System.out.println("got:");
+        iae.printStackTrace(System.out);
         if (iae.getMessage().indexOf("is too large") == -1) {
           throw iae;
         } else {
@@ -195,9 +197,9 @@
 
     BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
     for(int docID=0;docID<docBytes.size();docID++) {
-      StoredDocument doc = ar.document(docID);
+      Document doc = ar.document(docID);
       BytesRef bytes = s.get(docID);
-      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
+      byte[] expected = docBytes.get(Integer.parseInt(doc.getString("id")));
       assertEquals(expected.length, bytes.length);
       assertEquals(new BytesRef(expected), bytes);
     }
@@ -230,6 +232,9 @@
       numDocs = TestUtil.nextInt(random(), 100, 200);
     }
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("field");
+
     List<byte[]> docBytes = new ArrayList<>();
     long totalBytes = 0;
     for(int docID=0;docID<numDocs;docID++) {
@@ -253,11 +258,11 @@
       byte[] bytes = new byte[numBytes];
       random().nextBytes(bytes);
       docBytes.add(bytes);
-      Document doc = new Document();      
+      Document doc = w.newDocument();
       BytesRef b = new BytesRef(bytes);
       b.length = bytes.length;
-      doc.add(new BinaryDocValuesField("field", b));
-      doc.add(new StringField("id", ""+docID, Field.Store.YES));
+      doc.addBinary("field", b);
+      doc.addAtom("id", ""+docID);
       w.addDocument(doc);
     }
     
@@ -268,9 +273,9 @@
 
     BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
     for(int docID=0;docID<docBytes.size();docID++) {
-      StoredDocument doc = ar.document(docID);
+      Document doc = ar.document(docID);
       BytesRef bytes = s.get(docID);
-      byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
+      byte[] expected = docBytes.get(Integer.parseInt(doc.getString("id")));
       assertEquals(expected.length, bytes.length);
       assertEquals(new BytesRef(expected), bytes);
     }
@@ -283,18 +288,12 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field indexedField = new StringField("indexed", "", Field.Store.NO);
-    Field dvField = new SortedDocValuesField("dv", new BytesRef());
-    doc.add(idField);
-    doc.add(indexedField);
-    doc.add(dvField);
     
     // index some docs
     int numDocs = atLeast(300);
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       final int length;
       if (minLength == maxLength) {
         length = minLength; // fixed length
@@ -302,8 +301,8 @@
         length = TestUtil.nextInt(random(), minLength, maxLength);
       }
       String value = TestUtil.randomSimpleString(random(), length);
-      indexedField.setStringValue(value);
-      dvField.setBytesValue(new BytesRef(value));
+      doc.addAtom("indexed", value);
+      doc.addBinary("dv", new BytesRef(value));
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -324,7 +323,7 @@
       LeafReader r = context.reader();
       SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
       SortedDocValues actual = r.getSortedDocValues("dv");
-      assertEquals(r.maxDoc(), expected, actual);
+      assertEquals(r, r.maxDoc(), r.getLiveDocs(), expected, actual);
     }
     ir.close();
     dir.close();
@@ -334,13 +333,15 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    fieldTypes.setMultiValued("indexed");
+
     // index some docs
     int numDocs = atLeast(300);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
-      doc.add(idField);
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       final int length = TestUtil.nextInt(random(), minLength, maxLength);
       int numValues = random().nextInt(17);
       // create a random list of strings
@@ -348,19 +349,22 @@
       for (int v = 0; v < numValues; v++) {
         values.add(TestUtil.randomSimpleString(random(), minLength, length));
       }
+      if (VERBOSE) {
+        System.out.println("  doc id=" + i + " values=" + values);
+      }
       
       // add in any order to the indexed field
       ArrayList<String> unordered = new ArrayList<>(values);
       Collections.shuffle(unordered, random());
       for (String v : values) {
-        doc.add(newStringField("indexed", v, Field.Store.NO));
+        doc.addAtom("indexed", v);
       }
 
       // add in any order to the dv field
       ArrayList<String> unordered2 = new ArrayList<>(values);
       Collections.shuffle(unordered2, random());
       for (String v : unordered2) {
-        doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
+        doc.addAtom("dv", new BytesRef(v));
       }
 
       writer.addDocument(doc);
@@ -382,7 +386,7 @@
       LeafReader r = context.reader();
       SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed", null);
       SortedSetDocValues actual = r.getSortedSetDocValues("dv");
-      assertEquals(r.maxDoc(), expected, actual);
+      assertEquals(r, r.maxDoc(), r.getLiveDocs(), expected, actual);
     }
     ir.close();
     
@@ -393,7 +397,7 @@
     LeafReader ar = getOnlySegmentReader(ir);
     SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed", null);
     SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
-    assertEquals(ir.maxDoc(), expected, actual);
+    assertEquals(ar, ir.maxDoc(), ar.getLiveDocs(), expected, actual);
     ir.close();
     
     writer.close();
@@ -404,10 +408,6 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field indexedField = newStringField("indexed", "", Field.Store.NO);
-    Field dvField = new NumericDocValuesField("dv", 0);
-
     
     // index some docs
     int numDocs = atLeast(300);
@@ -415,16 +415,13 @@
     // for numbers of values <= 256, all storage layouts are tested
     assert numDocs > 256;
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
       long value = longs.next();
-      indexedField.setStringValue(Long.toString(value));
-      dvField.setLongValue(value);
-      Document doc = new Document();
-      doc.add(idField);
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       // 1/4 of the time we neglect to add the fields
       if (random().nextInt(4) > 0) {
-        doc.add(indexedField);
-        doc.add(dvField);
+        doc.addAtom("indexed", Long.toString(value));
+        doc.addLong("dv", value);
       }
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
@@ -477,100 +474,153 @@
     }
   }
   
-  private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
-    assertEquals(maxDoc, DocValues.singleton(expected), DocValues.singleton(actual));
+  private void assertEquals(IndexReader r, int maxDoc, Bits liveDocs, SortedDocValues expected, SortedDocValues actual) throws Exception {
+    assertEquals(r, maxDoc, liveDocs, DocValues.singleton(expected), DocValues.singleton(actual));
   }
   
-  private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
+  private void assertEquals(IndexReader r, int maxDoc, Bits liveDocs, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
     // can be null for the segment if no docs actually had any SortedDocValues
     // in this case FC.getDocTermsOrds returns EMPTY
     if (actual == null) {
       assertEquals(expected.getValueCount(), 0);
       return;
     }
-    assertEquals(expected.getValueCount(), actual.getValueCount());
-    // compare ord lists
+
+    FixedBitSet liveOrdsExpected = new FixedBitSet((int) expected.getValueCount());
+    FixedBitSet liveOrdsActual = new FixedBitSet((int) actual.getValueCount());
+
+    // compare values for all live docs:
     for (int i = 0; i < maxDoc; i++) {
+      if (VERBOSE) {
+        System.out.println("check doc=" + r.document(i).getString("id"));
+      }
+      if (liveDocs != null && liveDocs.get(i) == false) {
+        // Don't check deleted docs
+        continue;
+      }
       expected.setDocument(i);
       actual.setDocument(i);
       long expectedOrd;
       while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) {
-        assertEquals(expectedOrd, actual.nextOrd());
+        BytesRef expectedBytes = expected.lookupOrd(expectedOrd);
+        long actualOrd = actual.nextOrd();
+        assertTrue(actualOrd != NO_MORE_ORDS);
+        BytesRef actualBytes = actual.lookupOrd(actualOrd);
+        assertEquals(expectedBytes, actualBytes);
+        liveOrdsExpected.set((int) expectedOrd);
+        liveOrdsActual.set((int) actualOrd);
       }
+
       assertEquals(NO_MORE_ORDS, actual.nextOrd());
     }
+
+    // Make sure both have same number of non-deleted values:
+    assertEquals(liveOrdsExpected.cardinality(), liveOrdsActual.cardinality());
     
     // compare ord dictionary
-    for (long i = 0; i < expected.getValueCount(); i++) {
-      final BytesRef expectedBytes = BytesRef.deepCopyOf(expected.lookupOrd(i));
-      final BytesRef actualBytes = actual.lookupOrd(i);
+    int expectedOrd = 0;
+    int actualOrd = 0;
+    while (expectedOrd < expected.getValueCount()) {
+      expectedOrd = liveOrdsExpected.nextSetBit(expectedOrd);
+      if (expectedOrd == DocIdSetIterator.NO_MORE_DOCS) {
+        break;
+      }
+      if (VERBOSE) {
+        System.out.println("check expectedOrd=" + expectedOrd);
+      }
+      actualOrd = liveOrdsActual.nextSetBit(actualOrd);
+      BytesRef expectedBytes = expected.lookupOrd(expectedOrd);
+      BytesRef actualBytes = actual.lookupOrd(actualOrd);
       assertEquals(expectedBytes, actualBytes);
+      expectedOrd++;
+      actualOrd++;
     }
-    
+
+    assertTrue(actualOrd == actual.getValueCount() || liveOrdsActual.nextSetBit(actualOrd) == DocIdSetIterator.NO_MORE_DOCS);
+
     // compare termsenum
-    assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
+    assertEquals(expected.getValueCount(), expected.termsEnum(), liveOrdsExpected, actual.termsEnum(), liveOrdsActual);
   }
   
-  private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
+  /** Does termsEnum.next() but then skips over deleted ords. */
+  private static BytesRef next(TermsEnum termsEnum, Bits liveOrds) throws IOException {
+    while (termsEnum.next() != null) {
+      if (liveOrds.get((int) termsEnum.ord())) {
+        return termsEnum.term();
+      }
+    }
+    return null;
+  }
+
+  /** Does termsEnum.seekCeil() but then skips over deleted ords. */
+  private static SeekStatus seekCeil(TermsEnum termsEnum, BytesRef term, Bits liveOrds) throws IOException {
+    SeekStatus status = termsEnum.seekCeil(term);
+    if (status == SeekStatus.END) {
+      return status;
+    } else {
+      if (liveOrds.get((int) termsEnum.ord()) == false) {
+        while (termsEnum.next() != null) {
+          if (liveOrds.get((int) termsEnum.ord())) {
+            return SeekStatus.NOT_FOUND;
+          }
+        }
+        return SeekStatus.END;
+      } else {
+        return status;
+      }
+    }
+  }
+
+  private void assertEquals(long numOrds, TermsEnum expected, Bits liveOrdsExpected, TermsEnum actual, Bits liveOrdsActual) throws Exception {
     BytesRef ref;
     
     // sequential next() through all terms
-    while ((ref = expected.next()) != null) {
-      assertEquals(ref, actual.next());
-      assertEquals(expected.ord(), actual.ord());
+    while ((ref = next(expected, liveOrdsExpected)) != null) {
+      assertEquals(ref, next(actual, liveOrdsActual));
       assertEquals(expected.term(), actual.term());
     }
-    assertNull(actual.next());
-    
-    // sequential seekExact(ord) through all terms
-    for (long i = 0; i < numOrds; i++) {
-      expected.seekExact(i);
-      actual.seekExact(i);
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
+    assertNull(next(actual, liveOrdsActual));
     
     // sequential seekExact(BytesRef) through all terms
     for (long i = 0; i < numOrds; i++) {
+      if (liveOrdsExpected.get((int) i) == false) {
+        continue;
+      }
       expected.seekExact(i);
       assertTrue(actual.seekExact(expected.term()));
-      assertEquals(expected.ord(), actual.ord());
       assertEquals(expected.term(), actual.term());
     }
     
     // sequential seekCeil(BytesRef) through all terms
     for (long i = 0; i < numOrds; i++) {
+      if (liveOrdsExpected.get((int) i) == false) {
+        continue;
+      }
       expected.seekExact(i);
       assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
-      assertEquals(expected.ord(), actual.ord());
-      assertEquals(expected.term(), actual.term());
-    }
-    
-    // random seekExact(ord)
-    for (long i = 0; i < numOrds; i++) {
-      long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
-      expected.seekExact(randomOrd);
-      actual.seekExact(randomOrd);
-      assertEquals(expected.ord(), actual.ord());
       assertEquals(expected.term(), actual.term());
     }
     
     // random seekExact(BytesRef)
     for (long i = 0; i < numOrds; i++) {
       long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
+      if (liveOrdsExpected.get((int) randomOrd) == false) {
+        continue;
+      }
       expected.seekExact(randomOrd);
       actual.seekExact(expected.term());
-      assertEquals(expected.ord(), actual.ord());
       assertEquals(expected.term(), actual.term());
     }
     
     // random seekCeil(BytesRef)
     for (long i = 0; i < numOrds; i++) {
+      if (liveOrdsExpected.get((int) i) == false) {
+        continue;
+      }
       BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
-      SeekStatus expectedStatus = expected.seekCeil(target);
-      assertEquals(expectedStatus, actual.seekCeil(target));
+      SeekStatus expectedStatus = seekCeil(expected, target, liveOrdsExpected);
+      assertEquals(expectedStatus, seekCeil(actual, target, liveOrdsActual));
       if (expectedStatus != SeekStatus.END) {
-        assertEquals(expected.ord(), actual.ord());
         assertEquals(expected.term(), actual.term());
       }
     }
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
index eea0dbd..ecc21fe 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCacheWithThreads.java
@@ -26,15 +26,14 @@
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SortedDocValues;
@@ -48,20 +47,26 @@
   public void test() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("bytes");
+    fieldTypes.setDocValuesType("bytes", DocValuesType.NONE);
+    fieldTypes.disableSorting("number");
+    fieldTypes.setDocValuesType("number", DocValuesType.NONE);
+    fieldTypes.disableSorting("sorted");
+    fieldTypes.setDocValuesType("sorted", DocValuesType.NONE);
     final List<Long> numbers = new ArrayList<>();
     final List<BytesRef> binary = new ArrayList<>();
     final List<BytesRef> sorted = new ArrayList<>();
     final int numDocs = atLeast(100);
     for(int i=0;i<numDocs;i++) {
-      Document d = new Document();
+      Document d = w.newDocument();
       long number = random().nextLong();
-      d.add(new NumericDocValuesField("number", number));
+      d.addLong("number", number);
       BytesRef bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
-      d.add(new BinaryDocValuesField("bytes", bytes));
+      d.addAtom("bytes", bytes);
       binary.add(bytes);
       bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
-      d.add(new SortedDocValuesField("sorted", bytes));
+      d.addAtom("sorted", bytes);
       sorted.add(bytes);
       w.addDocument(d);
       numbers.add(number);
@@ -83,29 +88,13 @@
           @Override
           public void run() {
             try {
-              //NumericDocValues ndv = ar.getNumericDocValues("number");
-              NumericDocValues ndv = FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
-              //BinaryDocValues bdv = ar.getBinaryDocValues("bytes");
               BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes", false);
               SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
               startingGun.await();
               int iters = atLeast(1000);
               for(int iter=0;iter<iters;iter++) {
                 int docID = threadRandom.nextInt(numDocs);
-                switch(threadRandom.nextInt(4)) {
-                case 0:
-                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false).get(docID));
-                  break;
-                case 1:
-                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false).get(docID));
-                  break;
-                case 2:
-                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, false).get(docID));
-                  break;
-                case 3:
-                  assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false).get(docID));
-                  break;
-                }
+                assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.DOCUMENT_LONG_PARSER, false).get(docID));
                 BytesRef term = bdv.get(docID);
                 assertEquals(binary.get(docID), term);
                 term = sdv.get(docID);
@@ -164,9 +153,9 @@
         System.out.println("  " + numDocs + ": s=" + s);
       }
       
-      final Document doc = new Document();
-      doc.add(new SortedDocValuesField("stringdv", br));
-      doc.add(new NumericDocValuesField("id", numDocs));
+      final Document doc = writer.newDocument();
+      doc.addAtom("stringdv", br);
+      doc.addInt("id", numDocs);
       docValues.add(br);
       writer.addDocument(doc);
       numDocs++;
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java
index 99cf205..4eb24bb 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms32.java
@@ -22,12 +22,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -62,44 +61,19 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
-    
-    final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
-    storedInt.setStored(true);
-    storedInt.freeze();
-
-    final FieldType storedInt8 = new FieldType(storedInt);
-    storedInt8.setNumericPrecisionStep(8);
-
-    final FieldType storedInt4 = new FieldType(storedInt);
-    storedInt4.setNumericPrecisionStep(4);
-
-    final FieldType storedInt2 = new FieldType(storedInt);
-    storedInt2.setNumericPrecisionStep(2);
-
-    IntField
-      field8 = new IntField("field8", 0, storedInt8),
-      field4 = new IntField("field4", 0, storedInt4),
-      field2 = new IntField("field2", 0, storedInt2);
-    
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field4); doc.add(field2);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("field");
     
     // Add a series of noDocs docs with increasing int values
     for (int l=0; l<noDocs; l++) {
+      Document doc = writer.newDocument();
       int val=distance*l+startOffset;
-      field8.setIntValue(val);
-      field4.setIntValue(val);
-      field2.setIntValue(val);
-
-      val=l-(noDocs/2);
+      doc.addInt("field", val);
       writer.addDocument(doc);
     }
   
     Map<String,Type> map = new HashMap<>();
-    map.put("field2", Type.INTEGER);
-    map.put("field4", Type.INTEGER);
-    map.put("field8", Type.INTEGER);
+    map.put("field", Type.INTEGER);
     reader = UninvertingReader.wrap(writer.getReader(), map);
     searcher=newSearcher(reader);
     writer.close();
@@ -114,9 +88,12 @@
     directory.close();
     directory = null;
   }
-  
-  private void testSorting(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
+
+  @Test
+  public void testSorting() throws Exception {
+    FieldTypes fieldTypes = reader.getFieldTypes();
+
+    String field="field";
     // 10 random tests, the index order is ascending,
     // so using a reverse sort field should retun descending documents
     int num = TestUtil.nextInt(random(), 10, 20);
@@ -126,7 +103,7 @@
       if (lower>upper) {
         int a=lower; lower=upper; upper=a;
       }
-      Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      Query tq = new ConstantScoreQuery(fieldTypes.newIntRangeFilter(field, lower, true, upper, true));
       TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
       if (topDocs.totalHits==0) continue;
       ScoreDoc[] sd = topDocs.scoreDocs;
@@ -139,19 +116,4 @@
       }
     }
   }
-
-  @Test
-  public void testSorting_8bit() throws Exception {
-    testSorting(8);
-  }
-  
-  @Test
-  public void testSorting_4bit() throws Exception {
-    testSorting(4);
-  }
-  
-  @Test
-  public void testSorting_2bit() throws Exception {
-    testSorting(2);
-  }  
 }
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java
index 4d4bf9a..4fc5f44 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestNumericTerms64.java
@@ -22,12 +22,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -39,7 +38,6 @@
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Test;
 
 public class TestNumericTerms64 extends LuceneTestCase {
   // distance of entries
@@ -62,49 +60,18 @@
         newIndexWriterConfig(new MockAnalyzer(random()))
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("field");
 
-    final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
-    storedLong.setStored(true);
-    storedLong.freeze();
-
-    final FieldType storedLong8 = new FieldType(storedLong);
-    storedLong8.setNumericPrecisionStep(8);
-
-    final FieldType storedLong4 = new FieldType(storedLong);
-    storedLong4.setNumericPrecisionStep(4);
-
-    final FieldType storedLong6 = new FieldType(storedLong);
-    storedLong6.setNumericPrecisionStep(6);
-
-    final FieldType storedLong2 = new FieldType(storedLong);
-    storedLong2.setNumericPrecisionStep(2);
-
-    LongField
-      field8 = new LongField("field8", 0L, storedLong8),
-      field6 = new LongField("field6", 0L, storedLong6),
-      field4 = new LongField("field4", 0L, storedLong4),
-      field2 = new LongField("field2", 0L, storedLong2);
-
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2);
-    
     // Add a series of noDocs docs with increasing long values, by updating the fields
     for (int l=0; l<noDocs; l++) {
+      Document doc = writer.newDocument();
       long val=distance*l+startOffset;
-      field8.setLongValue(val);
-      field6.setLongValue(val);
-      field4.setLongValue(val);
-      field2.setLongValue(val);
-
-      val=l-(noDocs/2);
+      doc.addLong("field", val);
       writer.addDocument(doc);
     }
     Map<String,Type> map = new HashMap<>();
-    map.put("field2", Type.LONG);
-    map.put("field4", Type.LONG);
-    map.put("field6", Type.LONG);
-    map.put("field8", Type.LONG);
+    map.put("field", Type.LONG);
     reader = UninvertingReader.wrap(writer.getReader(), map);
     searcher=newSearcher(reader);
     writer.close();
@@ -120,8 +87,9 @@
     directory = null;
   }
   
-  private void testSorting(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
+  public void testSorting() throws Exception {
+    FieldTypes fieldTypes = reader.getFieldTypes();
+    String field="field";
     // 10 random tests, the index order is ascending,
     // so using a reverse sort field should retun descending documents
     int num = TestUtil.nextInt(random(), 10, 20);
@@ -131,7 +99,7 @@
       if (lower>upper) {
         long a=lower; lower=upper; upper=a;
       }
-      Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      Query tq = new ConstantScoreQuery(fieldTypes.newLongRangeFilter(field, lower, true, upper, true));
       TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
       if (topDocs.totalHits==0) continue;
       ScoreDoc[] sd = topDocs.scoreDocs;
@@ -144,24 +112,4 @@
       }
     }
   }
-
-  @Test
-  public void testSorting_8bit() throws Exception {
-    testSorting(8);
-  }
-  
-  @Test
-  public void testSorting_6bit() throws Exception {
-    testSorting(6);
-  }
-  
-  @Test
-  public void testSorting_4bit() throws Exception {
-    testSorting(4);
-  }
-  
-  @Test
-  public void testSorting_2bit() throws Exception {
-    testSorting(2);
-  }
 }
diff --git a/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java b/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java
index 69ee86d..4c994b8 100644
--- a/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java
+++ b/lucene/misc/src/test/org/apache/lucene/uninverting/TestUninvertingReader.java
@@ -21,12 +21,10 @@
 import java.util.Collections;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.uninverting.UninvertingReader.Type;
@@ -36,18 +34,22 @@
 import org.apache.lucene.util.TestUtil;
 
 public class TestUninvertingReader extends LuceneTestCase {
-  
+
+  // nocommit also make back-compat variants of these
   public void testSortedSetInteger() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    
-    Document doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
+
+    Document doc = iw.newDocument();
+    doc.addInt("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
-    doc.add(new IntField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addInt("foo", 5);
+    doc.addInt("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -69,10 +71,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToInt(value));
+    assertEquals(-3, NumericUtils.bytesToInt(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToInt(value));
+    assertEquals(5, NumericUtils.bytesToInt(value));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -81,14 +83,17 @@
   public void testSortedSetFloat() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
     
-    Document doc = new Document();
-    doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addFloat("foo", 5f);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
-    doc.add(new IntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addFloat("foo", 5f);
+    doc.addFloat("foo", -3f);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -111,10 +116,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(Float.floatToRawIntBits(-3f), NumericUtils.prefixCodedToInt(value));
+    assertEquals(-3f, NumericUtils.bytesToFloat(value), 0.0f);
     
     value = v.lookupOrd(1);
-    assertEquals(Float.floatToRawIntBits(5f), NumericUtils.prefixCodedToInt(value));
+    assertEquals(5f, NumericUtils.bytesToFloat(value), 0.0f);
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -123,14 +128,17 @@
   public void testSortedSetLong() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
     
-    Document doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLong("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
-    doc.add(new LongField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addLong("foo", 5);
+    doc.addLong("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -152,10 +160,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToLong(value));
+    assertEquals(-3, NumericUtils.bytesToLong(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToLong(value));
+    assertEquals(5, NumericUtils.bytesToLong(value));
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
@@ -164,14 +172,17 @@
   public void testSortedSetDouble() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
     
-    Document doc = new Document();
-    doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addDouble("foo", 5d);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
-    doc.add(new LongField("foo", Double.doubleToRawLongBits(-3d), Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addDouble("foo", 5d);
+    doc.addDouble("foo", -3d);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -193,10 +204,10 @@
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(Double.doubleToRawLongBits(-3d), NumericUtils.prefixCodedToLong(value));
+    assertEquals(-3d, NumericUtils.bytesToDouble(value), 0.0);
     
     value = v.lookupOrd(1);
-    assertEquals(Double.doubleToRawLongBits(5d), NumericUtils.prefixCodedToLong(value));
+    assertEquals(5d, NumericUtils.bytesToDouble(value), 0.0);
     TestUtil.checkReader(ir);
     ir.close();
     dir.close();
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
index c04a202..0660106 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DoubleFieldSource.java
@@ -20,13 +20,14 @@
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDouble;
 
@@ -52,7 +53,7 @@
     return new DoubleDocValues(this) {
       @Override
       public double doubleVal(int doc) {
-        return Double.longBitsToDouble(arr.get(doc));
+        return NumericUtils.longToDouble(arr.get(doc));
       }
 
       @Override
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
index 8f16661..6835763 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/FloatFieldSource.java
@@ -20,12 +20,13 @@
 import java.io.IOException;
 import java.util.Map;
 
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueFloat;
 
@@ -52,7 +53,7 @@
     return new FloatDocValues(this) {
       @Override
       public float floatVal(int doc) {
-        return Float.intBitsToFloat((int)arr.get(doc));
+        return NumericUtils.intToFloat((int)arr.get(doc));
       }
 
       @Override
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
index 4f07b22..62a4369 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
@@ -16,14 +16,23 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -37,14 +46,6 @@
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.PriorityQueue;
 
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
 
 /**
  * Generate "more like this" similarity queries.
@@ -737,9 +738,8 @@
 
       // field does not store term vector info
       if (vector == null) {
-        StoredDocument d = ir.document(docNum);
-        StorableField[] fields = d.getFields(fieldName);
-        for (StorableField field : fields) {
+        Document d = ir.document(docNum);
+        for (IndexableField field : d.getFields(fieldName)) {
           final String stringValue = field.stringValue();
           if (stringValue != null) {
             addTermFrequencies(new StringReader(stringValue), termFreqMap, fieldName);
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java b/lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java
index ac414dd..49449eb 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java
@@ -22,7 +22,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -36,8 +35,8 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermRangeFilter;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -69,11 +68,11 @@
   }
 
   private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("accessRights", accessRights, Field.Store.YES));
-    doc.add(newTextField("price", price, Field.Store.YES));
-    doc.add(newTextField("date", date, Field.Store.YES));
-    doc.add(newTextField("inStock", inStock, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("accessRights", accessRights);
+    doc.addLargeText("price", price);
+    doc.addLargeText("date", date);
+    doc.addLargeText("inStock", inStock);
     writer.addDocument(doc);
   }
 
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
index fa3d639..1d413d0 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
@@ -27,11 +27,10 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
@@ -66,9 +65,9 @@
         "this is the end of the universe as we know it",
         "there is the famous restaurant at the end of the universe",};
     for (int i = 0; i < docs.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + i, Field.Store.YES));
-      doc.add(newTextField("field", docs[i], Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", "" + i);
+      doc.addLargeText("field", docs[i]);
       w.addDocument(doc);
     }
     
@@ -196,9 +195,9 @@
         "this is the end of the universe as we know it",
         "there is the famous restaurant at the end of the universe",};
     for (int i = 0; i < docs.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + i, Field.Store.YES));
-      doc.add(newTextField("field", docs[i], Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", "" + i);
+      doc.addLargeText("field", docs[i]);
       w.addDocument(doc);
     }
     
@@ -355,9 +354,9 @@
         "this is the end of the universe as we know it",
         "there is the famous restaurant at the end of the universe",};
     for (int i = 0; i < docs.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + i, Field.Store.YES));
-      doc.add(newTextField("field", docs[i], Field.Store.NO));
+      Document doc = w.newDocument();
+      doc.addAtom("id", "" + i);
+      doc.addLargeText("field", docs[i]);
       w.addDocument(doc);
     }
 
@@ -532,7 +531,7 @@
       long seed) throws IOException {
     Random random = new Random(seed);
     // primary source for our data is from linefiledocs, it's realistic.
-    LineFileDocs lineFileDocs = new LineFileDocs(random);
+    LineFileDocs lineFileDocs = new LineFileDocs(writer.w, random);
     
     // TODO: we should add other fields that use things like docs&freqs but omit
     // positions,
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java
index c275233..0360677 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java
@@ -27,15 +27,14 @@
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocIdSet;
@@ -72,9 +71,9 @@
     Directory rd = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), rd);
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       int term = i * 10; //terms are units of 10;
-      doc.add(newStringField(fieldName, "" + term, Field.Store.YES));
+      doc.addAtom(fieldName, "" + term);
       w.addDocument(doc);
     }
     IndexReader reader = SlowCompositeReaderWrapper.wrap(w.getReader());
@@ -107,8 +106,8 @@
     String fieldName = "field1";
     Directory rd1 = newDirectory();
     RandomIndexWriter w1 = new RandomIndexWriter(random(), rd1);
-    Document doc = new Document();
-    doc.add(newStringField(fieldName, "content1", Field.Store.YES));
+    Document doc = w1.newDocument();
+    doc.addAtom(fieldName, "content1");
     w1.addDocument(doc);
     IndexReader reader1 = w1.getReader();
     w1.close();
@@ -116,8 +115,8 @@
     fieldName = "field2";
     Directory rd2 = newDirectory();
     RandomIndexWriter w2 = new RandomIndexWriter(random(), rd2);
-    doc = new Document();
-    doc.add(newStringField(fieldName, "content2", Field.Store.YES));
+    doc = w2.newDocument();
+    doc.addAtom(fieldName, "content2");
     w2.addDocument(doc);
     IndexReader reader2 = w2.getReader();
     w2.close();
@@ -148,11 +147,11 @@
     List<Term> terms = new ArrayList<>();
     for (int i = 0; i < num; i++) {
       terms.add(new Term("field" + i, "content1"));
-      Document doc = new Document();
+      Document doc = w.newDocument();
       if (skip == i) {
         continue;
       }
-      doc.add(newStringField("field" + i, "content1", Field.Store.YES));
+      doc.addAtom("field" + i, "content1");
       w.addDocument(doc);  
     }
     
@@ -180,8 +179,8 @@
     for (int i = 0; i < num; i++) {
       String field = "field" + random().nextInt(100);
       terms.add(new Term(field, "content1"));
-      Document doc = new Document();
-      doc.add(newStringField(field, "content1", Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addAtom(field, "content1");
       w.addDocument(doc);
     }
     int randomFields = random().nextInt(10);
@@ -218,8 +217,8 @@
       String field = "field" + (singleField ? "1" : random().nextInt(100));
       String string = TestUtil.randomRealisticUnicodeString(random());
       terms.add(new Term(field, string));
-      Document doc = new Document();
-      doc.add(newStringField(field, string, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addAtom(field, string);
       w.addDocument(doc);
     }
     IndexReader reader = w.getReader();
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
index 1709a01..8040f22 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
@@ -305,7 +305,7 @@
 
       log("doc = "+doc);
 
-      float fieldScore = expectedFieldScore(s.getIndexReader().document(doc).get(ID_FIELD));
+      float fieldScore = expectedFieldScore(s.getIndexReader().document(doc).getString(ID_FIELD));
       log("fieldScore = " + fieldScore);
       assertTrue("fieldScore should not be 0", fieldScore > 0);
 
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
index 4153317..dd2b207 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
@@ -3,20 +3,11 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -125,31 +116,13 @@
   }
 
   private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
-    Document d = new Document();
-    Field f;
+    Document d = iw.newDocument();
     int scoreAndID = i + 1;
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setTokenized(false);
-    customType.setOmitNorms(true);
-    
-    f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
-    d.add(f);
-    d.add(new SortedDocValuesField(ID_FIELD, new BytesRef(id2String(scoreAndID))));
-
-    FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType2.setOmitNorms(true);
-    f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
-    d.add(f);
-
-    f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
-    d.add(f);
-    d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
-
-    f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
-    d.add(f);
-    d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));
-
+    d.addAtom(ID_FIELD, id2String(scoreAndID)); // for debug purposes
+    d.addLargeText(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i));
+    d.addInt(INT_FIELD, scoreAndID); // for function scoring
+    d.addFloat(FLOAT_FIELD, scoreAndID); // for function scoring
     iw.addDocument(d);
     log("added: " + d);
   }
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestBoostedQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestBoostedQuery.java
index a6db9f1..4ccfbc0 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestBoostedQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestBoostedQuery.java
@@ -3,8 +3,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -55,9 +53,8 @@
     IndexWriterConfig iwConfig = newIndexWriterConfig(new MockAnalyzer(random()));
     iwConfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConfig);
-    Document document = new Document();
-    Field idField = new SortedDocValuesField("id", new BytesRef());
-    document.add(idField);
+    Document document = iw.newDocument();
+    document.addAtom("id", new BytesRef());
     iw.addDocument(document);
     ir = iw.getReader();
     is = newSearcher(ir);
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java
index 2342457..d8bf96d 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestDocValuesFieldSources.java
@@ -20,15 +20,12 @@
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
@@ -38,7 +35,6 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.packed.PackedInts;
-
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 
 
@@ -48,42 +44,33 @@
     Directory d = newDirectory();
     IndexWriterConfig iwConfig = newIndexWriterConfig(new MockAnalyzer(random()));
     final int nDocs = atLeast(50);
-    final Field id = new NumericDocValuesField("id", 0);
-    final Field f;
-    switch (type) {
-      case BINARY:
-        f = new BinaryDocValuesField("dv", new BytesRef());
-        break;
-      case SORTED:
-        f = new SortedDocValuesField("dv", new BytesRef());
-        break;
-      case NUMERIC:
-        f = new NumericDocValuesField("dv", 0);
-        break;
-      default:
-        throw new AssertionError();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), d, iwConfig);
+    if (type == DocValuesType.BINARY) {
+      FieldTypes fieldTypes = iw.getFieldTypes();
+      fieldTypes.disableSorting("dv");
     }
-    Document document = new Document();
-    document.add(id);
-    document.add(f);
-
     final Object[] vals = new Object[nDocs];
 
-    RandomIndexWriter iw = new RandomIndexWriter(random(), d, iwConfig);
     for (int i = 0; i < nDocs; ++i) {
-      id.setLongValue(i);
+      Document document = iw.newDocument();
+      document.addInt("id", i);
       switch (type) {
-        case SORTED:
         case BINARY:
           do {
             vals[i] = TestUtil.randomSimpleString(random(), 20);
           } while (((String) vals[i]).isEmpty());
-          f.setBytesValue(new BytesRef((String) vals[i]));
+          document.addBinary("dv", new BytesRef((String) vals[i]));
+          break;
+        case SORTED:
+          do {
+            vals[i] = TestUtil.randomSimpleString(random(), 20);
+          } while (((String) vals[i]).isEmpty());
+          document.addAtom("dv", new BytesRef((String) vals[i]));
           break;
         case NUMERIC:
           final int bitsPerValue = RandomInts.randomIntBetween(random(), 1, 31); // keep it an int
           vals[i] = (long) random().nextInt((int) PackedInts.maxValue(bitsPerValue));
-          f.setLongValue((Long) vals[i]);
+          document.addLong("dv", (Long) vals[i]);
           break;
       }
       iw.addDocument(document);
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
index f350358..1d1a93e 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
@@ -68,7 +68,7 @@
     assertEquals("All docs should be matched!",N_DOCS,h.length);
     String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test
     for (int i=0; i<h.length; i++) {
-      String resID = s.doc(h[i].doc).get(ID_FIELD);
+      String resID = s.doc(h[i].doc).getString(ID_FIELD);
       log(i+".   score="+h[i].score+"  -  "+resID);
       log(s.explain(functionQuery,h[i].doc));
       assertTrue("res id "+resID+" should be < prev res id "+prevID, resID.compareTo(prevID)<0);
@@ -101,7 +101,7 @@
     for (ScoreDoc aSd : sd) {
       float score = aSd.score;
       log(s.explain(functionQuery, aSd.doc));
-      String id = s.getIndexReader().document(aSd.doc).get(ID_FIELD);
+      String id = s.getIndexReader().document(aSd.doc).getString(ID_FIELD);
       float expectedScore = expectedFieldScore(id); // "ID7" --> 7.0
       assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
     }
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
index 394d5bf..9a68a15 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
@@ -20,9 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -47,17 +44,11 @@
     iwc.setMergePolicy(newLogMergePolicy()); // depends on docid order
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
 
-    Document doc = new Document();
-    Field field = new IntField("value", 0, Field.Store.YES);
-    Field dvField = new NumericDocValuesField("value", 0);
-    doc.add(field);
-    doc.add(dvField);
-
     // Save docs unsorted (decreasing value n, n-1, ...)
     final int NUM_VALS = 5;
     for (int val = NUM_VALS; val > 0; val--) {
-      field.setIntValue(val);
-      dvField.setLongValue(val);
+      Document doc = writer.newDocument();
+      doc.addInt("value", val);
       writer.addDocument(doc);
     }
 
@@ -79,7 +70,7 @@
     // Verify that sorting works in general
     int i = 0;
     for (ScoreDoc hit : hits.scoreDocs) {
-      int valueFromDoc = Integer.parseInt(reader.document(hit.doc).get("value"));
+      int valueFromDoc = reader.document(hit.doc).getInt("value");
       assertEquals(++i, valueFromDoc);
     }
 
@@ -94,7 +85,7 @@
     // Verify that hits are actually "after"
     int afterValue = ((Double) afterHit.fields[0]).intValue();
     for (ScoreDoc hit : hits.scoreDocs) {
-      int val = Integer.parseInt(reader.document(hit.doc).get("value"));
+      int val = reader.document(hit.doc).getInt("value");
       assertTrue(afterValue <= val);
       assertFalse(hit.doc == afterHit.doc);
     }
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java
index cb91ff2..76a6869 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestLongNormValueSource.java
@@ -19,8 +19,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -56,12 +54,12 @@
     iwConfig.setSimilarity(sim);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConfig);
 
-    Document doc = new Document();
-    doc.add(new TextField("text", "this is a test test test", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("text", "this is a test test test");
     iw.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new TextField("text", "second test", Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addLargeText("text", "second test");
     iw.addDocument(doc);
 
     reader = iw.getReader();
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java
index c689198..5b3bb43e 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestSortedSetFieldSource.java
@@ -20,11 +20,10 @@
 import java.util.Collections;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -34,14 +33,16 @@
   public void testSimple() throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
-    doc.add(newStringField("id", "2", Field.Store.YES));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("value");
+    Document doc = writer.newDocument();
+    doc.addBinary("value", new BytesRef("baz"));
+    doc.addAtom("id", "2");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
-    doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
-    doc.add(newStringField("id", "1", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addBinary("value", new BytesRef("foo"));
+    doc.addBinary("value", new BytesRef("bar"));
+    doc.addAtom("id", "1");
     writer.addDocument(doc);
     writer.forceMerge(1);
     writer.close();
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
index b66c1e2..3752463 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
@@ -17,27 +17,18 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.queries.function.valuesource.ConstValueSource;
@@ -76,8 +67,8 @@
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
@@ -112,49 +103,16 @@
     IndexWriterConfig iwConfig = newIndexWriterConfig(new MockAnalyzer(random()));
     iwConfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConfig);
-    Document document = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    document.add(idField);
-    Field idDVField = new SortedDocValuesField("id", new BytesRef());
-    document.add(idDVField);
-    Field doubleField = new DoubleField("double", 0d, Field.Store.NO);
-    document.add(doubleField);
-    Field doubleDVField = new NumericDocValuesField("double", 0);
-    document.add(doubleDVField);
-    Field floatField = new FloatField("float", 0f, Field.Store.NO);
-    document.add(floatField);
-    Field floatDVField = new NumericDocValuesField("float", 0);
-    document.add(floatDVField);
-    Field intField = new IntField("int", 0, Field.Store.NO);
-    document.add(intField);
-    Field intDVField = new NumericDocValuesField("int", 0);
-    document.add(intDVField);
-    Field longField = new LongField("long", 0L, Field.Store.NO);
-    document.add(longField);
-    Field longDVField = new NumericDocValuesField("long", 0);
-    document.add(longDVField);
-    Field stringField = new StringField("string", "", Field.Store.NO);
-    document.add(stringField);
-    Field stringDVField = new SortedDocValuesField("string", new BytesRef());
-    document.add(stringDVField);
-    Field textField = new TextField("text", "", Field.Store.NO);
-    document.add(textField);
-    
-    for (String [] doc : documents) {
-      idField.setStringValue(doc[0]);
-      idDVField.setBytesValue(new BytesRef(doc[0]));
-      doubleField.setDoubleValue(Double.valueOf(doc[1]));
-      doubleDVField.setLongValue(Double.doubleToRawLongBits(Double.valueOf(doc[1])));
-      floatField.setFloatValue(Float.valueOf(doc[2]));
-      floatDVField.setLongValue(Float.floatToRawIntBits(Float.valueOf(doc[2])));
-      intField.setIntValue(Integer.valueOf(doc[3]));
-      intDVField.setLongValue(Integer.valueOf(doc[3]));
-      longField.setLongValue(Long.valueOf(doc[4]));
-      longDVField.setLongValue(Long.valueOf(doc[4]));
-      stringField.setStringValue(doc[5]);
-      stringDVField.setBytesValue(new BytesRef(doc[5]));
-      textField.setStringValue(doc[6]);
-      iw.addDocument(document);
+    for (String [] data : documents) {
+      Document doc = iw.newDocument();
+      doc.addAtom("id", data[0]);
+      doc.addDouble("double", Double.valueOf(data[1]));
+      doc.addFloat("float", Float.valueOf(data[2]));
+      doc.addInt("int", Integer.valueOf(data[3]));
+      doc.addLong("long", Long.valueOf(data[4]));
+      doc.addAtom("string", data[5]);
+      doc.addLargeText("text", data[6]);
+      iw.addDocument(doc);
     }
     
     reader = iw.getReader();
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java b/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
index ee03da5..9995300 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -70,15 +70,17 @@
   }
   
   private void addDoc(RandomIndexWriter writer, String text) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("text", text, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("text", text);
     writer.addDocument(doc);
   }
 
   private void addDoc(RandomIndexWriter writer, String[] texts) throws IOException {
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("text");
+    Document doc = writer.newDocument();
     for (String text : texts) {
-      doc.add(newTextField("text", text, Field.Store.YES));
+      doc.addLargeText("text", text);
     }
     writer.addDocument(doc);
   }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/NumericRangeQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/NumericRangeQueryNodeBuilder.java
index 7452679..9aed322 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/NumericRangeQueryNodeBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/NumericRangeQueryNodeBuilder.java
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.FieldType.NumericType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
 import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
@@ -26,7 +26,9 @@
 import org.apache.lucene.queryparser.flexible.standard.config.NumericConfig;
 import org.apache.lucene.queryparser.flexible.standard.nodes.NumericQueryNode;
 import org.apache.lucene.queryparser.flexible.standard.nodes.NumericRangeQueryNode;
-import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
 
 /**
  * Builds {@link NumericRangeQuery}s out of {@link NumericRangeQueryNode}s.
@@ -44,7 +46,7 @@
   }
   
   @Override
-  public NumericRangeQuery<? extends Number> build(QueryNode queryNode)
+  public Query build(QueryNode queryNode)
       throws QueryNodeException {
     NumericRangeQueryNode numericRangeNode = (NumericRangeQueryNode) queryNode;
     
@@ -55,38 +57,46 @@
     Number upperNumber = upperNumericNode.getValue();
     
     NumericConfig numericConfig = numericRangeNode.getNumericConfig();
-    NumericType numberType = numericConfig.getType();
+    FieldTypes fieldTypes = numericConfig.getFieldTypes();
     String field = StringUtils.toString(numericRangeNode.getField());
     boolean minInclusive = numericRangeNode.isLowerInclusive();
     boolean maxInclusive = numericRangeNode.isUpperInclusive();
-    int precisionStep = numericConfig.getPrecisionStep();
-    
-    switch (numberType) {
-      
-      case LONG:
-        return NumericRangeQuery.newLongRange(field, precisionStep,
-            (Long) lowerNumber, (Long) upperNumber, minInclusive, maxInclusive);
-      
-      case INT:
-        return NumericRangeQuery.newIntRange(field, precisionStep,
-            (Integer) lowerNumber, (Integer) upperNumber, minInclusive,
-            maxInclusive);
-      
-      case FLOAT:
-        return NumericRangeQuery.newFloatRange(field, precisionStep,
-            (Float) lowerNumber, (Float) upperNumber, minInclusive,
-            maxInclusive);
-      
-      case DOUBLE:
-        return NumericRangeQuery.newDoubleRange(field, precisionStep,
-            (Double) lowerNumber, (Double) upperNumber, minInclusive,
-            maxInclusive);
-        
-        default :
-          throw new QueryNodeException(new MessageImpl(
-            QueryParserMessages.UNSUPPORTED_NUMERIC_DATA_TYPE, numberType));
-        
+
+    // TODO: we should here check that the incoming Number is correct type:
+    Filter filter;
+    switch (fieldTypes.getValueType(field)) {
+    case INT:
+      filter = fieldTypes.newIntRangeFilter(field,
+                                            lowerNumber == null ? null : Integer.valueOf(lowerNumber.intValue()),
+                                            minInclusive,
+                                            upperNumber == null ? null : Integer.valueOf(upperNumber.intValue()),
+                                            maxInclusive);
+      break;
+    case LONG:
+      filter = fieldTypes.newLongRangeFilter(field,
+                                             lowerNumber == null ? null : Long.valueOf(lowerNumber.longValue()),
+                                             minInclusive,
+                                             upperNumber == null ? null : Long.valueOf(upperNumber.longValue()),
+                                             maxInclusive);
+      break;
+    case FLOAT:
+      filter = fieldTypes.newFloatRangeFilter(field,
+                                              lowerNumber == null ? null : Float.valueOf(lowerNumber.floatValue()),
+                                              minInclusive,
+                                              upperNumber == null ? null : Float.valueOf(upperNumber.floatValue()),
+                                              maxInclusive);
+      break;
+    case DOUBLE:
+      filter = fieldTypes.newDoubleRangeFilter(field,
+                                               lowerNumber == null ? null : Double.valueOf(lowerNumber.doubleValue()),
+                                               minInclusive,
+                                               upperNumber == null ? null : Double.valueOf(upperNumber.doubleValue()),
+                                               maxInclusive);
+      break;
+    default:
+      throw new IllegalArgumentException("field \"" + field + "\": cannot create numeric query: unhandled valueType " + fieldTypes.getValueType(field));
     }
+
+    return new ConstantScoreQuery(filter);
   }
-  
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumericConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumericConfig.java
index 75ea68e..d5f84dd 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumericConfig.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/NumericConfig.java
@@ -19,8 +19,7 @@
 
 import java.text.NumberFormat;
 
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.document.FieldTypes;
 
 /**
  * This class holds the configuration used to parse numeric queries and create
@@ -31,56 +30,28 @@
  */
 public class NumericConfig {
   
-  private int precisionStep;
-  
   private NumberFormat format;
   
-  private NumericType type;
+  private final FieldTypes fieldTypes;
   
   /**
    * Constructs a {@link NumericConfig} object.
    * 
-   * @param precisionStep
-   *          the precision used to index the numeric values
    * @param format
    *          the {@link NumberFormat} used to parse a {@link String} to
    *          {@link Number}
    * @param type
    *          the numeric type used to index the numeric values
    * 
-   * @see NumericConfig#setPrecisionStep(int)
    * @see NumericConfig#setNumberFormat(NumberFormat)
-   * @see #setType(org.apache.lucene.document.FieldType.NumericType)
    */
-  public NumericConfig(int precisionStep, NumberFormat format,
-      NumericType type) {
-    setPrecisionStep(precisionStep);
+  public NumericConfig(NumberFormat format, FieldTypes fieldTypes) {
     setNumberFormat(format);
-    setType(type);
-    
+    this.fieldTypes = fieldTypes;
   }
-  
-  /**
-   * Returns the precision used to index the numeric values
-   * 
-   * @return the precision used to index the numeric values
-   * 
-   * @see NumericRangeQuery#getPrecisionStep()
-   */
-  public int getPrecisionStep() {
-    return precisionStep;
-  }
-  
-  /**
-   * Sets the precision used to index the numeric values
-   * 
-   * @param precisionStep
-   *          the precision used to index the numeric values
-   * 
-   * @see NumericRangeQuery#getPrecisionStep()
-   */
-  public void setPrecisionStep(int precisionStep) {
-    this.precisionStep = precisionStep;
+
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
   }
   
   /**
@@ -93,31 +64,7 @@
   public NumberFormat getNumberFormat() {
     return format;
   }
-  
-  /**
-   * Returns the numeric type used to index the numeric values
-   * 
-   * @return the numeric type used to index the numeric values
-   */
-  public NumericType getType() {
-    return type;
-  }
-  
-  /**
-   * Sets the numeric type used to index the numeric values
-   * 
-   * @param type the numeric type used to index the numeric values
-   */
-  public void setType(NumericType type) {
-    
-    if (type == null) {
-      throw new IllegalArgumentException("type cannot be null!");
-    }
-    
-    this.type = type;
-    
-  }
-  
+
   /**
    * Sets the {@link NumberFormat} used to parse a {@link String} to
    * {@link Number}
@@ -144,9 +91,7 @@
     if (obj instanceof NumericConfig) {
       NumericConfig other = (NumericConfig) obj;
       
-      if (this.precisionStep == other.precisionStep
-          && this.type == other.type
-          && (this.format == other.format || (this.format.equals(other.format)))) {
+      if (this.format == other.format || (this.format.equals(other.format))) {
         return true;
       }
       
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/NumericRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/NumericRangeQueryNode.java
index ee154aa..2db2b49 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/NumericRangeQueryNode.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/NumericRangeQueryNode.java
@@ -17,7 +17,6 @@
  * the License.
  */
 
-import org.apache.lucene.document.FieldType.NumericType;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
 import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
@@ -53,25 +52,6 @@
     setBounds(lower, upper, lowerInclusive, upperInclusive, numericConfig);
   }
   
-  private static NumericType getNumericDataType(Number number) throws QueryNodeException {
-    
-    if (number instanceof Long) {
-      return NumericType.LONG;
-    } else if (number instanceof Integer) {
-      return NumericType.INT;
-    } else if (number instanceof Double) {
-      return NumericType.DOUBLE;
-    } else if (number instanceof Float) {
-      return NumericType.FLOAT;
-    } else {
-      throw new QueryNodeException(
-          new MessageImpl(
-              QueryParserMessages.NUMBER_CLASS_NOT_SUPPORTED_BY_NUMERIC_RANGE_QUERY,
-              number.getClass()));
-    }
-    
-  }
-  
   /**
    * Sets the upper and lower bounds of this range query node and the
    * {@link NumericConfig} associated with these bounds.
@@ -90,34 +70,6 @@
       throw new IllegalArgumentException("numericConfig cannot be null!");
     }
     
-    NumericType lowerNumberType, upperNumberType;
-    
-    if (lower != null && lower.getValue() != null) {
-      lowerNumberType = getNumericDataType(lower.getValue());
-    } else {
-      lowerNumberType = null;
-    }
-    
-    if (upper != null && upper.getValue() != null) {
-      upperNumberType = getNumericDataType(upper.getValue());
-    } else {
-      upperNumberType = null;
-    }
-    
-    if (lowerNumberType != null
-        && !lowerNumberType.equals(numericConfig.getType())) {
-      throw new IllegalArgumentException(
-          "lower value's type should be the same as numericConfig type: "
-              + lowerNumberType + " != " + numericConfig.getType());
-    }
-    
-    if (upperNumberType != null
-        && !upperNumberType.equals(numericConfig.getType())) {
-      throw new IllegalArgumentException(
-          "upper value's type should be the same as numericConfig type: "
-              + upperNumberType + " != " + numericConfig.getType());
-    }
-    
     super.setBounds(lower, upper, lowerInclusive, upperInclusive);
     this.numericConfig = numericConfig;
     
@@ -137,9 +89,7 @@
     StringBuilder sb = new StringBuilder("<numericRange lowerInclusive='");
     
     sb.append(isLowerInclusive()).append("' upperInclusive='").append(
-        isUpperInclusive()).append(
-        "' precisionStep='" + numericConfig.getPrecisionStep()).append(
-        "' type='" + numericConfig.getType()).append("'>\n");
+        isUpperInclusive()).append("'>\n");
     
     sb.append(getLowerBound()).append('\n');
     sb.append(getUpperBound()).append('\n');
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericQueryNodeProcessor.java
index 7c18eb1..b073a07 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericQueryNodeProcessor.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericQueryNodeProcessor.java
@@ -100,20 +100,6 @@
                         .getCanonicalName()), e);
               }
               
-              switch (numericConfig.getType()) {
-                case LONG:
-                  number = number.longValue();
-                  break;
-                case INT:
-                  number = number.intValue();
-                  break;
-                case DOUBLE:
-                  number = number.doubleValue();
-                  break;
-                case FLOAT:
-                  number = number.floatValue();
-              }
-              
             } else {
               throw new QueryNodeParseException(new MessageImpl(
                   QueryParserMessages.NUMERIC_CANNOT_BE_EMPTY, fieldNode.getFieldAsString()));
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericRangeQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericRangeQueryNodeProcessor.java
index 7710fbc..4fd1f98 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericRangeQueryNodeProcessor.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericRangeQueryNodeProcessor.java
@@ -114,24 +114,6 @@
             
             }
             
-            switch (numericConfig.getType()) {
-              case LONG:
-                if (upperNumber != null) upperNumber = upperNumber.longValue();
-                if (lowerNumber != null) lowerNumber = lowerNumber.longValue();
-                break;
-              case INT:
-                if (upperNumber != null) upperNumber = upperNumber.intValue();
-                if (lowerNumber != null) lowerNumber = lowerNumber.intValue();
-                break;
-              case DOUBLE:
-                if (upperNumber != null) upperNumber = upperNumber.doubleValue();
-                if (lowerNumber != null) lowerNumber = lowerNumber.doubleValue();
-                break;
-              case FLOAT:
-                if (upperNumber != null) upperNumber = upperNumber.floatValue();
-                if (lowerNumber != null) lowerNumber = lowerNumber.floatValue();
-            }
-            
             NumericQueryNode lowerNode = new NumericQueryNode(
                 termRangeNode.getField(), lowerNumber, numberFormat);
             NumericQueryNode upperNode = new NumericQueryNode(
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
index 81c6b36..67b7cf7 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
@@ -1,16 +1,17 @@
 package org.apache.lucene.queryparser.xml;
 
+import java.io.InputStream;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.queryparser.xml.builders.*;
 import org.apache.lucene.search.Query;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import java.io.InputStream;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -37,6 +38,8 @@
   protected QueryParser parser;
   protected QueryBuilderFactory queryFactory;
   protected FilterBuilderFactory filterFactory;
+  protected final FieldTypes fieldTypes;
+
   //Controls the max size of the LRU cache used for QueryFilter objects parsed.
   public static int maxNumCachedFilters = 20;
 
@@ -47,8 +50,8 @@
    *
    * @param parser A QueryParser which will be synchronized on during parse calls.
    */
-  public CoreParser(Analyzer analyzer, QueryParser parser) {
-    this(null, analyzer, parser);
+  public CoreParser(FieldTypes fieldTypes, Analyzer analyzer, QueryParser parser) {
+    this(fieldTypes, null, analyzer, parser);
   }
 
   /**
@@ -56,13 +59,14 @@
    *
    * @param defaultField The default field name used by QueryParsers constructed for UserQuery tags
    */
-  public CoreParser(String defaultField, Analyzer analyzer) {
-    this(defaultField, analyzer, null);
+  public CoreParser(FieldTypes fieldTypes, String defaultField, Analyzer analyzer) {
+    this(fieldTypes, defaultField, analyzer, null);
   }
 
-  protected CoreParser(String defaultField, Analyzer analyzer, QueryParser parser) {
+  protected CoreParser(FieldTypes fieldTypes, String defaultField, Analyzer analyzer, QueryParser parser) {
     this.analyzer = analyzer;
     this.parser = parser;
+    this.fieldTypes = fieldTypes;
     filterFactory = new FilterBuilderFactory();
     filterFactory.addBuilder("RangeFilter", new RangeFilterBuilder());
     filterFactory.addBuilder("NumericRangeFilter", new NumericRangeFilterBuilder());
@@ -118,7 +122,7 @@
   }
 
   public Query parse(InputStream xmlStream) throws ParserException {
-    return getQuery(parseXML(xmlStream).getDocumentElement());
+    return getQuery(fieldTypes, parseXML(xmlStream).getDocumentElement());
   }
 
   public void addQueryBuilder(String nodeName, QueryBuilder builder) {
@@ -150,7 +154,7 @@
 
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
-    return queryFactory.getQuery(e);
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
+    return queryFactory.getQuery(fieldTypes, e);
   }
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java
index 8d69d62..630f9bb 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java
@@ -1,6 +1,7 @@
 package org.apache.lucene.queryparser.xml;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.queryparser.xml.builders.*;
 
@@ -34,8 +35,8 @@
    *
    * @param parser A QueryParser which will be synchronized on during parse calls.
    */
-  public CorePlusExtensionsParser(Analyzer analyzer, QueryParser parser) {
-    this(null, analyzer, parser);
+  public CorePlusExtensionsParser(FieldTypes fieldTypes, Analyzer analyzer, QueryParser parser) {
+    this(fieldTypes, null, analyzer, parser);
   }
 
   /**
@@ -43,12 +44,12 @@
    *
    * @param defaultField The default field name used by QueryParsers constructed for UserQuery tags
    */
-  public CorePlusExtensionsParser(String defaultField, Analyzer analyzer) {
-    this(defaultField, analyzer, null);
+  public CorePlusExtensionsParser(FieldTypes fieldTypes, String defaultField, Analyzer analyzer) {
+    this(fieldTypes, defaultField, analyzer, null);
   }
 
-  private CorePlusExtensionsParser(String defaultField, Analyzer analyzer, QueryParser parser) {
-    super(defaultField, analyzer, parser);
+  private CorePlusExtensionsParser(FieldTypes fieldTypes, String defaultField, Analyzer analyzer, QueryParser parser) {
+    super(fieldTypes, defaultField, analyzer, parser);
     filterFactory.addBuilder("TermsFilter", new TermsFilterBuilder(analyzer));
     filterFactory.addBuilder("BooleanFilter", new BooleanFilterBuilder(filterFactory));
     filterFactory.addBuilder("DuplicateFilter", new DuplicateFilterBuilder());
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java
index 0e732f6..a83156a 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java
@@ -3,6 +3,7 @@
  */
 package org.apache.lucene.queryparser.xml;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Filter;
 import org.w3c.dom.Element;
 /*
@@ -27,5 +28,5 @@
  */
 public interface FilterBuilder {
 
-   public Filter getFilter(Element e) throws ParserException;
+   public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java
index 1ab1bb8..58f6481 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java
@@ -3,10 +3,11 @@
  */
 package org.apache.lucene.queryparser.xml;
 
+import java.util.HashMap;
+
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Filter;
 import org.w3c.dom.Element;
-
-import java.util.HashMap;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,12 +33,12 @@
   HashMap<String, FilterBuilder> builders = new HashMap<>();
 
   @Override
-  public Filter getFilter(Element n) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element n) throws ParserException {
     FilterBuilder builder = builders.get(n.getNodeName());
     if (builder == null) {
       throw new ParserException("No FilterBuilder defined for node " + n.getNodeName());
     }
-    return builder.getFilter(n);
+    return builder.getFilter(fieldTypes, n);
   }
 
   public void addBuilder(String nodeName, FilterBuilder builder) {
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java
index eff70d3..8051bd9 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java
@@ -1,7 +1,9 @@
 package org.apache.lucene.queryparser.xml;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -18,11 +20,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 /**
  * Implemented by objects that produce Lucene Query objects from XML streams. Implementations are
  * expected to be thread-safe so that they can be used to simultaneously parse multiple XML documents.
  */
 public interface QueryBuilder {
 
-  public Query getQuery(Element e) throws ParserException;
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java
index 94ab8f8..8f34047 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java
@@ -3,10 +3,11 @@
  */
 package org.apache.lucene.queryparser.xml;
 
+import java.util.HashMap;
+
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
-
-import java.util.HashMap;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,12 +33,12 @@
   HashMap<String, QueryBuilder> builders = new HashMap<>();
 
   @Override
-  public Query getQuery(Element n) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element n) throws ParserException {
     QueryBuilder builder = builders.get(n.getNodeName());
     if (builder == null) {
       throw new ParserException("No QueryObjectBuilder defined for node " + n.getNodeName());
     }
-    return builder.getQuery(n);
+    return builder.getQuery(fieldTypes, n);
   }
 
   public void addBuilder(String nodeName, QueryBuilder builder) {
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanFilterBuilder.java
index 953a584..44beeaa 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanFilterBuilder.java
@@ -3,13 +3,14 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.BooleanFilter;
-import org.apache.lucene.search.Filter;
 import org.apache.lucene.queries.FilterClause;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilder;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
@@ -43,7 +44,7 @@
   }
 
   @Override
-  public Filter getFilter(Element e) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     BooleanFilter bf = new BooleanFilter();
     NodeList nl = e.getChildNodes();
 
@@ -54,7 +55,7 @@
         BooleanClause.Occur occurs = BooleanQueryBuilder.getOccursValue(clauseElem);
 
         Element clauseFilter = DOMUtils.getFirstChildOrFail(clauseElem);
-        Filter f = factory.getFilter(clauseFilter);
+        Filter f = factory.getFilter(fieldTypes, clauseFilter);
         bf.add(new FilterClause(f, occurs));
       }
     }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java
index d59508c..d257ee4 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BooleanQueryBuilder.java
@@ -3,12 +3,13 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
@@ -45,7 +46,7 @@
     */
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     BooleanQuery bq = new BooleanQuery(DOMUtils.getAttribute(e, "disableCoord", false));
     bq.setMinimumNumberShouldMatch(DOMUtils.getAttribute(e, "minimumNumberShouldMatch", 0));
     bq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
@@ -58,7 +59,7 @@
         BooleanClause.Occur occurs = getOccursValue(clauseElem);
 
         Element clauseQuery = DOMUtils.getFirstChildOrFail(clauseElem);
-        Query q = factory.getQuery(clauseQuery);
+        Query q = factory.getQuery(fieldTypes, clauseQuery);
         bq.add(new BooleanClause(q, occurs));
       }
     }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingQueryBuilder.java
index 5b962dc..16dfa8f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingQueryBuilder.java
@@ -1,10 +1,11 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.BoostingQuery;
-import org.apache.lucene.search.Query;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -37,15 +38,15 @@
   }
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     Element mainQueryElem = DOMUtils.getChildByTagOrFail(e, "Query");
     mainQueryElem = DOMUtils.getFirstChildOrFail(mainQueryElem);
-    Query mainQuery = factory.getQuery(mainQueryElem);
+    Query mainQuery = factory.getQuery(fieldTypes, mainQueryElem);
 
     Element boostQueryElem = DOMUtils.getChildByTagOrFail(e, "BoostQuery");
     float boost = DOMUtils.getAttribute(boostQueryElem, "boost", DEFAULT_BOOST);
     boostQueryElem = DOMUtils.getFirstChildOrFail(boostQueryElem);
-    Query boostQuery = factory.getQuery(boostQueryElem);
+    Query boostQuery = factory.getQuery(fieldTypes, boostQueryElem);
 
     BoostingQuery bq = new BoostingQuery(mainQuery, boostQuery, boost);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java
index eb920b1..9023286 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/BoostingTermBuilder.java
@@ -1,11 +1,12 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.search.payloads.AveragePayloadFunction;
 import org.apache.lucene.search.payloads.PayloadTermQuery;
 import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.ParserException;
 import org.w3c.dom.Element;
 
 /*
@@ -31,7 +32,7 @@
 public class BoostingTermBuilder extends SpanBuilderBase {
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String value = DOMUtils.getNonBlankTextOrFail(e);
 
@@ -40,4 +41,4 @@
     return btq;
   }
 
-}
\ No newline at end of file
+}
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java
index da01dc9..63a89fe 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java
@@ -3,14 +3,15 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
+import java.util.Map;
+
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.*;
 import org.apache.lucene.search.CachingWrapperFilter;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
 import org.w3c.dom.Element;
-
-import java.util.Map;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -61,7 +62,7 @@
   }
 
   @Override
-  public synchronized Filter getFilter(Element e) throws ParserException {
+  public synchronized Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     Element childElement = DOMUtils.getFirstChildOrFail(e);
 
     if (filterCache == null) {
@@ -75,10 +76,10 @@
     Query q = null;
     Filter f = null;
     if (qb != null) {
-      q = qb.getQuery(childElement);
+      q = qb.getQuery(fieldTypes, childElement);
       cacheKey = q;
     } else {
-      f = filterFactory.getFilter(childElement);
+      f = filterFactory.getFilter(fieldTypes, childElement);
       cacheKey = f;
     }
     Filter cachedFilter = filterCache.get(cacheKey);
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java
index 5e811ae..b27dc0e 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/ConstantScoreQueryBuilder.java
@@ -1,11 +1,12 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilderFactory;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -36,10 +37,10 @@
   }
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     Element filterElem = DOMUtils.getFirstChildOrFail(e);
 
-    Query q = new ConstantScoreQuery(filterFactory.getFilter(filterElem));
+    Query q = new ConstantScoreQuery(filterFactory.getFilter(fieldTypes, filterElem));
     q.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
     return q;
   }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java
index 16861e3..453a40f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DisjunctionMaxQueryBuilder.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
@@ -41,7 +42,7 @@
     */
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     float tieBreaker = DOMUtils.getAttribute(e, "tieBreaker", 0.0f); 
     DisjunctionMaxQuery dq = new DisjunctionMaxQuery(tieBreaker);
     dq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
@@ -51,7 +52,7 @@
       Node node = nl.item(i);
       if (node instanceof Element) { // all elements are disjuncts.
         Element queryElem = (Element) node;
-        Query q = factory.getQuery(queryElem);
+        Query q = factory.getQuery(fieldTypes, queryElem);
         dq.add(q);
       }
     }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DuplicateFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DuplicateFilterBuilder.java
index b85703a..ab462c6 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DuplicateFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/DuplicateFilterBuilder.java
@@ -3,6 +3,7 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilder;
 import org.apache.lucene.queryparser.xml.ParserException;
@@ -33,7 +34,7 @@
 public class DuplicateFilterBuilder implements FilterBuilder {
 
   @Override
-  public Filter getFilter(Element e) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     DuplicateFilter df = new DuplicateFilter(fieldName);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FilteredQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FilteredQueryBuilder.java
index 1c2a6ec..f708b23 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FilteredQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FilteredQueryBuilder.java
@@ -3,13 +3,14 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.FilteredQuery;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilder;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 
 /*
@@ -47,14 +48,14 @@
     * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element)
     */
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     Element filterElement = DOMUtils.getChildByTagOrFail(e, "Filter");
     filterElement = DOMUtils.getFirstChildOrFail(filterElement);
-    Filter f = filterFactory.getFilter(filterElement);
+    Filter f = filterFactory.getFilter(fieldTypes, filterElement);
 
     Element queryElement = DOMUtils.getChildByTagOrFail(e, "Query");
     queryElement = DOMUtils.getFirstChildOrFail(queryElement);
-    Query q = queryFactory.getQuery(queryElement);
+    Query q = queryFactory.getQuery(fieldTypes, queryElement);
 
     FilteredQuery fq = new FilteredQuery(q, f);
     fq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java
index c333815..4f59c3c 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/FuzzyLikeThisQueryBuilder.java
@@ -1,6 +1,7 @@
 package org.apache.lucene.queryparser.xml.builders;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
@@ -44,7 +45,7 @@
   }
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     NodeList nl = e.getElementsByTagName("Field");
     int maxNumTerms = DOMUtils.getAttribute(e, "maxNumTerms", DEFAULT_MAX_NUM_TERMS);
     FuzzyLikeThisQuery fbq = new FuzzyLikeThisQuery(maxNumTerms, analyzer);
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
index fe784a55..bc4a97c 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
@@ -10,11 +10,12 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.mlt.MoreLikeThisQuery;
-import org.apache.lucene.queryparser.xml.QueryBuilder;
-import org.apache.lucene.search.Query;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -54,7 +55,7 @@
     * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element)
     */
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldsList = e.getAttribute("fieldNames"); //a comma-delimited list of fields
     String fields[] = defaultFieldNames;
     if ((fieldsList != null) && (fieldsList.trim().length() > 0)) {
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java
index 36c071c..b92cc3c 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/MatchAllDocsQueryBuilder.java
@@ -1,9 +1,10 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -28,7 +29,7 @@
 public class MatchAllDocsQueryBuilder implements QueryBuilder {
   
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     return new MatchAllDocsQuery();
   }
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java
index 8c70b9d..d18a3d9 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java
@@ -17,19 +17,19 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.DocIdSet;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.NumericRangeFilter;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilder;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.NumericUtils;
 import org.w3c.dom.Element;
 
-import java.io.IOException;
-
 /**
  * Creates a {@link NumericRangeFilter}. The table below specifies the required
  * attributes and the defaults if optional attributes are omitted. For more
@@ -113,37 +113,26 @@
   }
 
   @Override
-  public Filter getFilter(Element e) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     String field = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String lowerTerm = DOMUtils.getAttributeOrFail(e, "lowerTerm");
     String upperTerm = DOMUtils.getAttributeOrFail(e, "upperTerm");
     boolean lowerInclusive = DOMUtils.getAttribute(e, "includeLower", true);
     boolean upperInclusive = DOMUtils.getAttribute(e, "includeUpper", true);
-    int precisionStep = DOMUtils.getAttribute(e, "precisionStep", NumericUtils.PRECISION_STEP_DEFAULT);
 
     String type = DOMUtils.getAttribute(e, "type", "int");
     try {
-      Filter filter;
       if (type.equalsIgnoreCase("int")) {
-        filter = NumericRangeFilter.newIntRange(field, precisionStep, Integer
-            .valueOf(lowerTerm), Integer.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        return fieldTypes.newIntRangeFilter(field, Integer.valueOf(lowerTerm), lowerInclusive, Integer.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("long")) {
-        filter = NumericRangeFilter.newLongRange(field, precisionStep, Long
-            .valueOf(lowerTerm), Long.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        return fieldTypes.newLongRangeFilter(field, Long.valueOf(lowerTerm), lowerInclusive, Long.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("double")) {
-        filter = NumericRangeFilter.newDoubleRange(field, precisionStep, Double
-            .valueOf(lowerTerm), Double.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        return fieldTypes.newDoubleRangeFilter(field, Double.valueOf(lowerTerm), lowerInclusive, Double.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("float")) {
-        filter = NumericRangeFilter.newFloatRange(field, precisionStep, Float
-            .valueOf(lowerTerm), Float.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        return fieldTypes.newFloatRangeFilter(field, Float.valueOf(lowerTerm), lowerInclusive, Float.valueOf(upperTerm), upperInclusive);
       } else {
         throw new ParserException("type attribute must be one of: [long, int, double, float]");
       }
-      return filter;
     } catch (NumberFormatException nfe) {
       if (strictMode) {
         throw new ParserException("Could not parse lowerTerm or upperTerm into a number", nfe);
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeQueryBuilder.java
index 877e4d9..42135fd 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeQueryBuilder.java
@@ -17,12 +17,14 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.NumericUtils;
 import org.w3c.dom.Element;
 
 /**
@@ -88,37 +90,29 @@
 public class NumericRangeQueryBuilder implements QueryBuilder {
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String field = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String lowerTerm = DOMUtils.getAttributeOrFail(e, "lowerTerm");
     String upperTerm = DOMUtils.getAttributeOrFail(e, "upperTerm");
     boolean lowerInclusive = DOMUtils.getAttribute(e, "includeLower", true);
     boolean upperInclusive = DOMUtils.getAttribute(e, "includeUpper", true);
-    int precisionStep = DOMUtils.getAttribute(e, "precisionStep", NumericUtils.PRECISION_STEP_DEFAULT);
 
     String type = DOMUtils.getAttribute(e, "type", "int");
+
     try {
-      Query filter;
+      Filter filter;
       if (type.equalsIgnoreCase("int")) {
-        filter = NumericRangeQuery.newIntRange(field, precisionStep, Integer
-            .valueOf(lowerTerm), Integer.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        filter = fieldTypes.newIntRangeFilter(field, Integer.valueOf(lowerTerm), lowerInclusive, Integer.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("long")) {
-        filter = NumericRangeQuery.newLongRange(field, precisionStep, Long
-            .valueOf(lowerTerm), Long.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        filter = fieldTypes.newLongRangeFilter(field, Long.valueOf(lowerTerm), lowerInclusive, Long.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("double")) {
-        filter = NumericRangeQuery.newDoubleRange(field, precisionStep, Double
-            .valueOf(lowerTerm), Double.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        filter = fieldTypes.newDoubleRangeFilter(field, Double.valueOf(lowerTerm), lowerInclusive, Double.valueOf(upperTerm), upperInclusive);
       } else if (type.equalsIgnoreCase("float")) {
-        filter = NumericRangeQuery.newFloatRange(field, precisionStep, Float
-            .valueOf(lowerTerm), Float.valueOf(upperTerm), lowerInclusive,
-            upperInclusive);
+        filter = fieldTypes.newFloatRangeFilter(field, Float.valueOf(lowerTerm), lowerInclusive, Float.valueOf(upperTerm), upperInclusive);
       } else {
         throw new ParserException("type attribute must be one of: [long, int, double, float]");
       }
-      return filter;
+      return new ConstantScoreQuery(filter);
     } catch (NumberFormatException nfe) {
       throw new ParserException("Could not parse lowerTerm or upperTerm into a number", nfe);
     }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeFilterBuilder.java
index 337c62d..23337a3 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/RangeFilterBuilder.java
@@ -3,11 +3,12 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.TermRangeFilter;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.FilterBuilder;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermRangeFilter;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -33,7 +34,7 @@
 public class RangeFilterBuilder implements FilterBuilder {
 
   @Override
-  public Filter getFilter(Element e) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritance(e, "fieldName");
 
     String lowerTerm = e.getAttribute("lowerTerm");
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java
index d1c4531..d9fb1eb 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanBuilderBase.java
@@ -1,8 +1,9 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.spans.SpanQuery; // javadocs
-import org.apache.lucene.queryparser.xml.ParserException;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -27,8 +28,8 @@
 public abstract class SpanBuilderBase implements SpanQueryBuilder {
   
   @Override
-  public Query getQuery(Element e) throws ParserException {
-    return getSpanQuery(e);
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
+    return getSpanQuery(fieldTypes, e);
   }
 
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java
index d2288b4..66830e4 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanFirstBuilder.java
@@ -1,9 +1,10 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.spans.SpanFirstQuery;
-import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.spans.SpanFirstQuery;
+import org.apache.lucene.search.spans.SpanQuery;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -34,10 +35,10 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     int end = DOMUtils.getAttribute(e, "end", 1);
     Element child = DOMUtils.getFirstChildElement(e);
-    SpanQuery q = factory.getSpanQuery(child);
+    SpanQuery q = factory.getSpanQuery(fieldTypes, child);
 
     SpanFirstQuery sfq = new SpanFirstQuery(q, end);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java
index e26735f..e9b766c 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java
@@ -1,14 +1,15 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.spans.SpanNearQuery;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-
 import java.util.ArrayList;
 import java.util.List;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -38,14 +39,14 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String slopString = DOMUtils.getAttributeOrFail(e, "slop");
     int slop = Integer.parseInt(slopString);
     boolean inOrder = DOMUtils.getAttribute(e, "inOrder", false);
     List<SpanQuery> spans = new ArrayList<>();
     for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) {
       if (kid.getNodeType() == Node.ELEMENT_NODE) {
-        spans.add(factory.getSpanQuery((Element) kid));
+        spans.add(factory.getSpanQuery(fieldTypes, (Element) kid));
       }
     }
     SpanQuery[] spanQueries = spans.toArray(new SpanQuery[spans.size()]);
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java
index 0be6dc8..dbf7dad 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNotBuilder.java
@@ -1,9 +1,10 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.spans.SpanNotQuery;
-import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.spans.SpanNotQuery;
+import org.apache.lucene.search.spans.SpanQuery;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -34,15 +35,15 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     Element includeElem = DOMUtils.getChildByTagOrFail(e, "Include");
     includeElem = DOMUtils.getFirstChildOrFail(includeElem);
 
     Element excludeElem = DOMUtils.getChildByTagOrFail(e, "Exclude");
     excludeElem = DOMUtils.getFirstChildOrFail(excludeElem);
 
-    SpanQuery include = factory.getSpanQuery(includeElem);
-    SpanQuery exclude = factory.getSpanQuery(excludeElem);
+    SpanQuery include = factory.getSpanQuery(fieldTypes, includeElem);
+    SpanQuery exclude = factory.getSpanQuery(fieldTypes, excludeElem);
 
     SpanNotQuery snq = new SpanNotQuery(include, exclude);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java
index ce48d00..7315918 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java
@@ -1,14 +1,15 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.spans.SpanOrQuery;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-
 import java.util.ArrayList;
 import java.util.List;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -38,11 +39,11 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     List<SpanQuery> clausesList = new ArrayList<>();
     for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) {
       if (kid.getNodeType() == Node.ELEMENT_NODE) {
-        SpanQuery clause = factory.getSpanQuery((Element) kid);
+        SpanQuery clause = factory.getSpanQuery(fieldTypes, (Element) kid);
         clausesList.add(clause);
       }
     }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
index e7978d1..350a740 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
@@ -1,21 +1,22 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.search.spans.SpanOrQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.ParserException;
 import org.w3c.dom.Element;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -45,7 +46,7 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String value = DOMUtils.getNonBlankTextOrFail(e);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java
index 8bbaa1c..fb815c9 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java
@@ -15,9 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.spans.SpanQuery;
 import org.w3c.dom.Element;
 
 /**
@@ -25,5 +26,5 @@
  */
 public interface SpanQueryBuilder extends QueryBuilder {
 
-  public SpanQuery getSpanQuery(Element e) throws ParserException;
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java
index 69fd7ba..2705b98 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java
@@ -1,12 +1,13 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Element;
-
 import java.util.HashMap;
 import java.util.Map;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,8 +33,8 @@
   private final Map<String, SpanQueryBuilder> builders = new HashMap<>();
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
-    return getSpanQuery(e);
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
+    return getSpanQuery(fieldTypes, e);
   }
 
   public void addBuilder(String nodeName, SpanQueryBuilder builder) {
@@ -41,12 +42,12 @@
   }
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     SpanQueryBuilder builder = builders.get(e.getNodeName());
     if (builder == null) {
       throw new ParserException("No SpanQueryObjectBuilder defined for node " + e.getNodeName());
     }
-    return builder.getSpanQuery(e);
+    return builder.getSpanQuery(fieldTypes, e);
   }
 
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java
index bc16975..fcc5dfd 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanTermBuilder.java
@@ -1,10 +1,11 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -29,7 +30,7 @@
 public class SpanTermBuilder extends SpanBuilderBase {
 
   @Override
-  public SpanQuery getSpanQuery(Element e) throws ParserException {
+  public SpanQuery getSpanQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String value = DOMUtils.getNonBlankTextOrFail(e);
     SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, value));
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java
index d949b3b..14cd821 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermQueryBuilder.java
@@ -1,11 +1,12 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
 import org.w3c.dom.Element;
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -30,7 +31,7 @@
 public class TermQueryBuilder implements QueryBuilder {
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String field = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String value = DOMUtils.getNonBlankTextOrFail(e);
     TermQuery tq = new TermQuery(new Term(field, value));
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
index 59f424d..382cb45 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
@@ -1,20 +1,21 @@
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.queries.TermsFilter;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.FilterBuilder;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Element;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.FilterBuilder;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.w3c.dom.Element;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -49,7 +50,7 @@
     * @see org.apache.lucene.xmlparser.FilterBuilder#process(org.w3c.dom.Element)
     */
   @Override
-  public Filter getFilter(Element e) throws ParserException {
+  public Filter getFilter(FieldTypes fieldTypes, Element e) throws ParserException {
     List<BytesRef> terms = new ArrayList<>();
     String text = DOMUtils.getNonBlankTextOrFail(e);
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
index d85d02c..6353e9f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
@@ -1,21 +1,22 @@
 package org.apache.lucene.queryparser.xml.builders;
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.queryparser.xml.QueryBuilder;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.queryparser.xml.DOMUtils;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.apache.lucene.queryparser.xml.QueryBuilder;
 import org.w3c.dom.Element;
 
-import java.io.IOException;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -45,7 +46,7 @@
   }
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     String text = DOMUtils.getNonBlankTextOrFail(e);
 
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java
index 21e9eeb..6943cdc 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java
@@ -1,13 +1,14 @@
 package org.apache.lucene.queryparser.xml.builders;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.queryparser.classic.QueryParser;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.Version;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
 import org.w3c.dom.Element;
 
 /*
@@ -58,7 +59,7 @@
     */
 
   @Override
-  public Query getQuery(Element e) throws ParserException {
+  public Query getQuery(FieldTypes fieldTypes, Element e) throws ParserException {
     String text = DOMUtils.getText(e);
     try {
       Query q = null;
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java
index 722e748..689e9e3 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java
@@ -31,10 +31,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.queryparser.classic.QueryParser;
@@ -271,13 +268,8 @@
   private boolean isAHit(Query q, String content, Analyzer analyzer) throws IOException{
     Directory ramDir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, analyzer);
-    Document doc = new Document();
-    FieldType fieldType = new FieldType();
-    fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    fieldType.setTokenized(true);
-    fieldType.setStored(true);
-    Field field = new Field(FIELD, content, fieldType);
-    doc.add(field);
+    Document doc = writer.newDocument();
+    doc.addLargeText(FIELD, content);
     writer.addDocument(doc);
     writer.close();
     DirectoryReader ir = DirectoryReader.open(ramDir);
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
index 8a36599..1bcd89f 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
@@ -24,7 +24,6 @@
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -287,8 +286,8 @@
     Analyzer analyzer = new MockAnalyzer(random());
     Directory ramDir = newDirectory();
     IndexWriter iw =  new IndexWriter(ramDir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
-    doc.add(newTextField("body", "blah the footest blah", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "blah the footest blah");
     iw.addDocument(doc);
     iw.close();
     
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
index 8d3cd69..47ab874 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
@@ -22,11 +22,9 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
@@ -116,8 +114,8 @@
     TopDocs td = searcher.search(q, 10);
     ScoreDoc[] sd = td.scoreDocs;
     for (int i = 0; i < sd.length; i++) {
-      StoredDocument doc = searcher.doc(sd[i].doc);
-      String id = doc.get("id");
+      Document doc = searcher.doc(sd[i].doc);
+      String id = doc.getString("id");
       assertTrue(qString + "matched doc#" + id + " not expected", expecteds
           .contains(id));
       expecteds.remove(id);
@@ -172,10 +170,10 @@
     rd = newDirectory();
     IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(analyzer));
     for (int i = 0; i < docsContent.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("name", docsContent[i].name, Field.Store.YES));
-      doc.add(newTextField("id", docsContent[i].id, Field.Store.YES));
-      doc.add(newTextField("role", docsContent[i].role, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addLargeText("name", docsContent[i].name);
+      doc.addLargeText("id", docsContent[i].id);
+      doc.addLargeText("role", docsContent[i].role);
       w.addDocument(doc);
     }
     w.close();
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
index e3bef9e..1af495a 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
@@ -24,7 +24,6 @@
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -321,8 +320,8 @@
     Analyzer analyzer = new MockAnalyzer(random());
     Directory ramDir = newDirectory();
     IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
-    doc.add(newTextField("body", "blah the footest blah", Field.Store.NO));
+    Document doc = iw.newDocument();
+    doc.addLargeText("body", "blah the footest blah");
     iw.addDocument(doc);
     iw.close();
 
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java
index 5fd74e2..4ee9839 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java
@@ -33,13 +33,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
@@ -58,6 +52,17 @@
 import org.junit.Test;
 
 public class TestNumericQueryParser extends LuceneTestCase {
+
+  private static enum NumericType {
+    /** 32-bit integer numeric type */
+    INT, 
+    /** 64-bit long numeric type */
+    LONG, 
+    /** 32-bit float numeric type */
+    FLOAT, 
+    /** 64-bit double numeric type */
+    DOUBLE
+  }
   
   private static enum NumberType {
     NEGATIVE, ZERO, POSITIVE;
@@ -66,7 +71,6 @@
   final private static int[] DATE_STYLES = {DateFormat.FULL, DateFormat.LONG,
       DateFormat.MEDIUM, DateFormat.SHORT};
   
-  final private static int PRECISION_STEP = 8;
   final private static String FIELD_NAME = "field";
   private static Locale LOCALE;
   private static TimeZone TIMEZONE;
@@ -193,62 +197,29 @@
             .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))
             .setMergePolicy(newLogMergePolicy()));
     
-    Document doc = new Document();
-    HashMap<String,NumericConfig> numericConfigMap = new HashMap<>();
-    HashMap<String,Field> numericFieldMap = new HashMap<>();
-    qp.setNumericConfigMap(numericConfigMap);
-    
-    for (NumericType type : NumericType.values()) {
-      numericConfigMap.put(type.name(), new NumericConfig(PRECISION_STEP,
-          NUMBER_FORMAT, type));
-
-      FieldType ft = new FieldType(IntField.TYPE_NOT_STORED);
-      ft.setNumericType(type);
-      ft.setStored(true);
-      ft.setNumericPrecisionStep(PRECISION_STEP);
-      ft.freeze();
-      final Field field;
-
-      switch(type) {
-      case INT:
-        field = new IntField(type.name(), 0, ft);
-        break;
-      case FLOAT:
-        field = new FloatField(type.name(), 0.0f, ft);
-        break;
-      case LONG:
-        field = new LongField(type.name(), 0l, ft);
-        break;
-      case DOUBLE:
-        field = new DoubleField(type.name(), 0.0, ft);
-        break;
-      default:
-        fail();
-        field = null;
-      }
-      numericFieldMap.put(type.name(), field);
-      doc.add(field);
-    }
-    
-    numericConfigMap.put(DATE_FIELD_NAME, new NumericConfig(PRECISION_STEP,
-        DATE_FORMAT, NumericType.LONG));
-    FieldType ft = new FieldType(LongField.TYPE_NOT_STORED);
-    ft.setStored(true);
-    ft.setNumericPrecisionStep(PRECISION_STEP);
-    LongField dateField = new LongField(DATE_FIELD_NAME, 0l, ft);
-    numericFieldMap.put(DATE_FIELD_NAME, dateField);
-    doc.add(dateField);
-    
     for (NumberType numberType : NumberType.values()) {
-      setFieldValues(numberType, numericFieldMap);
+      Document doc = writer.newDocument();
+      doc.addInt("INT", getNumberType(numberType, "INT").intValue());
+      doc.addLong("LONG", getNumberType(numberType, "LONG").longValue());
+      doc.addFloat("FLOAT", getNumberType(numberType, "FLOAT").floatValue());
+      doc.addDouble("DOUBLE", getNumberType(numberType, "DOUBLE").doubleValue());
+      doc.addDouble(DATE_FIELD_NAME, getNumberType(numberType, DATE_FIELD_NAME).longValue());
       if (VERBOSE) System.out.println("Indexing document: " + doc);
       writer.addDocument(doc);
     }
     
     reader = writer.getReader();
     searcher = newSearcher(reader);
+
+    HashMap<String,NumericConfig> numericConfigMap = new HashMap<>();
+    qp.setNumericConfigMap(numericConfigMap);
+    FieldTypes fieldTypes = reader.getFieldTypes();
+    for (NumericType type : NumericType.values()) {
+      numericConfigMap.put(type.name(), new NumericConfig(NUMBER_FORMAT, fieldTypes));
+    }
+    numericConfigMap.put(DATE_FIELD_NAME, new NumericConfig(DATE_FORMAT, fieldTypes));
+
     writer.close();
-    
   }
   
   private static Number getNumberType(NumberType numberType, String fieldName) {
@@ -291,31 +262,7 @@
     }
     
   }
-  
-  private static void setFieldValues(NumberType numberType,
-      HashMap<String,Field> numericFieldMap) {
-    
-    Number number = getNumberType(numberType, NumericType.DOUBLE
-        .name());
-    numericFieldMap.get(NumericType.DOUBLE.name()).setDoubleValue(
-        number.doubleValue());
-    
-    number = getNumberType(numberType, NumericType.INT.name());
-    numericFieldMap.get(NumericType.INT.name()).setIntValue(
-        number.intValue());
-    
-    number = getNumberType(numberType, NumericType.LONG.name());
-    numericFieldMap.get(NumericType.LONG.name()).setLongValue(
-        number.longValue());
-    
-    number = getNumberType(numberType, NumericType.FLOAT.name());
-    numericFieldMap.get(NumericType.FLOAT.name()).setFloatValue(
-        number.floatValue());
-    
-    number = getNumberType(numberType, DATE_FIELD_NAME);
-    numericFieldMap.get(DATE_FIELD_NAME).setLongValue(number.longValue());
-  }
-  
+
   private static int randomDateStyle(Random random) {
     return DATE_STYLES[random.nextInt(DATE_STYLES.length)];
   }
@@ -435,7 +382,7 @@
     }
     
     if (upperDateNumber != null) {
-    upperDateStr = ESCAPER.escape(
+      upperDateStr = ESCAPER.escape(
           DATE_FORMAT.format(new Date(upperDateNumber.longValue())), LOCALE,
           EscapeQuerySyntax.Type.STRING).toString();
     
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
index e8b7474..d3fc100 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
@@ -34,7 +34,6 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -1045,36 +1044,6 @@
     assertEquals(query1, query2);
   }
 
-// Todo: Convert from DateField to DateUtil
-//  public void testLocalDateFormat() throws IOException, QueryNodeException {
-//    Directory ramDir = newDirectory();
-//    IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
-//    addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
-//    addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
-//    iw.close();
-//    IndexSearcher is = new IndexSearcher(ramDir, true);
-//    assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
-//    assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
-//    assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
-//    assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
-//    assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
-//    assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
-//    is.close();
-//    ramDir.close();
-//  }
-//
-//  private void addDateDoc(String content, int year, int month, int day,
-//                          int hour, int minute, int second, IndexWriter iw) throws IOException {
-//    Document d = new Document();
-//    d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
-//    Calendar cal = Calendar.getInstance(Locale.ENGLISH);
-//    cal.set(year, month - 1, day, hour, minute, second);
-//    d.add(newField("date", DateField.dateToString(cal.getTime()),
-//        Field.Store.YES, Field.Index.NOT_ANALYZED));
-//    iw.addDocument(d);
-//  }
-
-
   public void testStarParsing() throws Exception {
     // final int[] type = new int[1];
     // StandardQueryParser qp = new StandardQueryParser("field", new
@@ -1310,8 +1279,8 @@
   public void testMultiPhraseQuery() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new CannedAnalyzer()));
-    Document doc = new Document();
-    doc.add(newTextField("field", "", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "");
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java
index 647f503..ac8390a 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java
@@ -19,15 +19,13 @@
 
 import java.util.Random;
 
-import org.apache.lucene.document.Field;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 
 public class SingleFieldTestDb {
   private Directory db;
@@ -41,8 +39,8 @@
       fieldName = fName;
       IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(new MockAnalyzer(random)));
       for (int j = 0; j < docs.length; j++) {
-        Document d = new Document();
-        d.add(new TextField(fieldName, docs[j], Field.Store.NO));
+        Document d = writer.newDocument();
+        d.addLargeText(fieldName, docs[j]);
         writer.addDocument(d);
       }
       writer.close();
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
index 1181942..a3583ab 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
@@ -31,7 +31,6 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -938,34 +937,6 @@
     assertEquals(query1, query2);
   }
 
-// Todo: convert this from DateField to DateUtil
-//  public void testLocalDateFormat() throws IOException, ParseException {
-//    Directory ramDir = newDirectory();
-//    IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
-//    addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
-//    addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
-//    iw.close();
-//    IndexSearcher is = new IndexSearcher(ramDir, true);
-//    assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
-//    assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
-//    assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
-//    assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
-//    assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
-//    assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
-//    is.close();
-//    ramDir.close();
-//  }
-//
-//  private void addDateDoc(String content, int year, int month,
-//                          int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
-//    Document d = new Document();
-//    d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
-//    Calendar cal = Calendar.getInstance(Locale.ENGLISH);
-//    cal.set(year, month - 1, day, hour, minute, second);
-//    d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
-//    iw.addDocument(d);
-//  }
-
   public abstract void testStarParsing() throws Exception;
 
   public void testEscapedWildcard() throws Exception {
@@ -1090,8 +1061,8 @@
     Directory dir = newDirectory();
     Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
-    Document doc = new Document();
-    doc.add(newTextField("field", "the wizard of ozzy", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "the wizard of ozzy");
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     w.close();
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java
index e5c61c2..de1c963 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java
@@ -17,18 +17,22 @@
  * limitations under the License.
  */
 
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.DisjunctionMaxQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -39,13 +43,7 @@
 import org.junit.AfterClass;
 import org.junit.Assume;
 import org.junit.BeforeClass;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
+import org.junit.Ignore;
 
 
 public class TestParser extends LuceneTestCase {
@@ -59,8 +57,6 @@
   public static void beforeClass() throws Exception {
     // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT):
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
-    //initialize the parser
-    builder = new CorePlusExtensionsParser("contents", analyzer);
 
     BufferedReader d = new BufferedReader(new InputStreamReader(
         TestParser.class.getResourceAsStream("reuters21578.txt"), StandardCharsets.US_ASCII));
@@ -71,10 +67,10 @@
       int endOfDate = line.indexOf('\t');
       String date = line.substring(0, endOfDate).trim();
       String content = line.substring(endOfDate).trim();
-      Document doc = new Document();
-      doc.add(newTextField("date", date, Field.Store.YES));
-      doc.add(newTextField("contents", content, Field.Store.YES));
-      doc.add(new IntField("date2", Integer.valueOf(date), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("date", date);
+      doc.addLargeText("contents", content);
+      doc.addInt("date2", Integer.valueOf(date));
       writer.addDocument(doc);
       line = d.readLine();
     }
@@ -82,7 +78,8 @@
     writer.close();
     reader = DirectoryReader.open(dir);
     searcher = newSearcher(reader);
-
+    //initialize the parser
+    builder = new CorePlusExtensionsParser(reader.getFieldTypes(), "contents", analyzer);
   }
 
   @AfterClass
@@ -233,8 +230,8 @@
       System.out.println("=========" + qType + "============");
       ScoreDoc[] scoreDocs = hits.scoreDocs;
       for (int i = 0; i < Math.min(numDocs, hits.totalHits); i++) {
-        StoredDocument ldoc = searcher.doc(scoreDocs[i].doc);
-        System.out.println("[" + ldoc.get("date") + "]" + ldoc.get("contents"));
+        Document ldoc = searcher.doc(scoreDocs[i].doc);
+        System.out.println("[" + ldoc.getString("date") + "]" + ldoc.getString("contents"));
       }
       System.out.println();
     }
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java
index 6b2d52c..2b7d64a 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java
@@ -19,7 +19,6 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -95,7 +94,7 @@
       Document doc = qtm.getQueryAsDOM(queryFormProperties, queryFormProperties.getProperty("template"));
 
       //Parse the XML query using the XML parser
-      Query q = builder.getQuery(doc.getDocumentElement());
+      Query q = builder.getQuery(searcher.getFieldTypes(), doc.getDocumentElement());
 
       //Run the query
       int h = searcher.search(q, null, 1000).totalHits;
@@ -122,14 +121,14 @@
   }
 
   //Helper method to construct Lucene documents used in our tests
-  org.apache.lucene.document.Document getDocumentFromString(String nameValuePairs) {
-    org.apache.lucene.document.Document result = new org.apache.lucene.document.Document();
+  org.apache.lucene.document.Document getDocumentFromString(IndexWriter w, String nameValuePairs) {
+    org.apache.lucene.document.Document result = w.newDocument();
     StringTokenizer st = new StringTokenizer(nameValuePairs, "\t=");
     while (st.hasMoreTokens()) {
       String name = st.nextToken().trim();
       if (st.hasMoreTokens()) {
         String value = st.nextToken().trim();
-        result.add(newTextField(name, value, Field.Store.YES));
+        result.addLargeText(name, value);
       }
     }
     return result;
@@ -147,7 +146,7 @@
     dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
     for (String docFieldValue : docFieldValues) {
-      w.addDocument(getDocumentFromString(docFieldValue));
+      w.addDocument(getDocumentFromString(w, docFieldValue));
     }
     w.forceMerge(1);
     w.close();
@@ -155,7 +154,7 @@
     searcher = newSearcher(reader);
 
     //initialize the parser
-    builder = new CorePlusExtensionsParser("artist", analyzer);
+    builder = new CorePlusExtensionsParser(reader.getFieldTypes(), "artist", analyzer);
 
   }
 
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeFilterBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeFilterBuilder.java
index bb154ac..3984cb3 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeFilterBuilder.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeFilterBuilder.java
@@ -17,29 +17,50 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.NumericRangeFilter;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Document;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.charset.StandardCharsets;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermRangeFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.BeforeClass;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
 
 public class TestNumericRangeFilterBuilder extends LuceneTestCase {
 
+  private static FieldTypes fieldTypes;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    org.apache.lucene.document.Document doc = w.newDocument();
+    doc.addInt("AGE_INT", 14);
+    doc.addLong("AGE_LONG", 14L);
+    doc.addFloat("AGE_FLOAT", 14F);
+    doc.addDouble("AGE_DOUBLE", 14D);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    w.close();
+    r.close();
+    dir.close();
+  }
+
   public void testGetFilterHandleNumericParseErrorStrict() throws Exception {
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(true);
@@ -47,7 +68,7 @@
     String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
     Document doc = getDocumentFromString(xml);
     try {
-      filterBuilder.getFilter(doc.getDocumentElement());
+      filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
     } catch (ParserException e) {
       return;
     }
@@ -58,9 +79,9 @@
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(false);
 
-    String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
+    String xml = "<NumericRangeFilter fieldName='AGE_INT' type='int' lowerTerm='-1' upperTerm='NaN'/>";
     Document doc = getDocumentFromString(xml);
-    Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
+    Filter filter = filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
     Directory ramDir = newDirectory();
     IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(null));
     writer.commit();
@@ -85,29 +106,25 @@
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(true);
 
-    String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10'/>";
+    String xml = "<NumericRangeFilter fieldName='AGE_INT' type='int' lowerTerm='-1' upperTerm='10'/>";
     Document doc = getDocumentFromString(xml);
-    Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeFilter<?>);
+    Filter filter = filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof TermRangeFilter);
+    assertTrue(filter.toString(), filter.toString().contains("[-1 TO 10]"));
+    TermRangeFilter numRangeFilter = (TermRangeFilter) filter;
+    assertEquals("AGE_INT", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    NumericRangeFilter<Integer> numRangeFilter = (NumericRangeFilter<Integer>) filter;
-    assertEquals(Integer.valueOf(-1), numRangeFilter.getMin());
-    assertEquals(Integer.valueOf(10), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
-
-    String xml2 = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10' includeUpper='false'/>";
+    String xml2 = "<NumericRangeFilter fieldName='AGE_INT' type='int' lowerTerm='-1' upperTerm='10' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeFilter);
-
-    NumericRangeFilter<Integer> numRangeFilter2 = (NumericRangeFilter) filter2;
-    assertEquals(Integer.valueOf(-1), numRangeFilter2.getMin());
-    assertEquals(Integer.valueOf(10), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    Filter filter2 = filterBuilder.getFilter(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof TermRangeFilter);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-1 TO 10}"));
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) filter2;
+    assertEquals("AGE_INT", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
@@ -115,28 +132,27 @@
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(true);
 
-    String xml = "<NumericRangeFilter fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000'/>";
+    String xml = "<NumericRangeFilter fieldName='AGE_LONG' type='LoNg' lowerTerm='-2321' upperTerm='60000000'/>";
     Document doc = getDocumentFromString(xml);
-    Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeFilter<?>);
+    Filter filter = filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof TermRangeFilter);
+    assertTrue(filter.toString(), filter.toString().contains("[-2321 TO 60000000]"));
 
-    NumericRangeFilter<Long> numRangeFilter = (NumericRangeFilter) filter;
-    assertEquals(Long.valueOf(-2321L), numRangeFilter.getMin());
-    assertEquals(Long.valueOf(60000000L), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
+    TermRangeFilter numRangeFilter = (TermRangeFilter) filter;
+    assertEquals("AGE_LONG", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    String xml2 = "<NumericRangeFilter fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000' includeUpper='false'/>";
+    String xml2 = "<NumericRangeFilter fieldName='AGE_LONG' type='LoNg' lowerTerm='-2321' upperTerm='60000000' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeFilter<?>);
-    NumericRangeFilter<Long> numRangeFilter2 = (NumericRangeFilter) filter2;
-    assertEquals(Long.valueOf(-2321L), numRangeFilter2.getMin());
-    assertEquals(Long.valueOf(60000000L), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    Filter filter2 = filterBuilder.getFilter(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof TermRangeFilter);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-2321 TO 60000000}"));
+
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) filter2;
+    assertEquals("AGE_LONG", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
@@ -144,30 +160,26 @@
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(true);
 
-    String xml = "<NumericRangeFilter fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023'/>";
+    String xml = "<NumericRangeFilter fieldName='AGE_DOUBLE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023'/>";
     Document doc = getDocumentFromString(xml);
 
-    Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeFilter<?>);
+    Filter filter = filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof TermRangeFilter);
+    assertTrue(filter.toString(), filter.toString().contains("[-23.21 TO 60000.00023]"));
+    TermRangeFilter numRangeFilter = (TermRangeFilter) filter;
+    assertEquals("AGE_DOUBLE", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    NumericRangeFilter<Double> numRangeFilter = (NumericRangeFilter) filter;
-    assertEquals(Double.valueOf(-23.21d), numRangeFilter.getMin());
-    assertEquals(Double.valueOf(60000.00023d), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
-
-    String xml2 = "<NumericRangeFilter fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023' includeUpper='false'/>";
+    String xml2 = "<NumericRangeFilter fieldName='AGE_DOUBLE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeFilter<?>);
-
-    NumericRangeFilter<Double> numRangeFilter2 = (NumericRangeFilter) filter2;
-    assertEquals(Double.valueOf(-23.21d), numRangeFilter2.getMin());
-    assertEquals(Double.valueOf(60000.00023d), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    Filter filter2 = filterBuilder.getFilter(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof TermRangeFilter);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-23.21 TO 60000.00023}"));
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) filter2;
+    assertEquals("AGE_DOUBLE", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
@@ -175,31 +187,27 @@
     NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
     filterBuilder.setStrictMode(true);
 
-    String xml = "<NumericRangeFilter fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23'/>";
+    String xml = "<NumericRangeFilter fieldName='AGE_FLOAT' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23'/>";
     Document doc = getDocumentFromString(xml);
 
-    Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeFilter<?>);
+    Filter filter = filterBuilder.getFilter(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof TermRangeFilter);
+    assertTrue(filter.toString(), filter.toString().contains("[-2.321432 TO 32432.23]"));
+    TermRangeFilter numRangeFilter = (TermRangeFilter) filter;
+    assertEquals("AGE_FLOAT", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    NumericRangeFilter<Float> numRangeFilter = (NumericRangeFilter) filter;
-    assertEquals(Float.valueOf(-2.321432f), numRangeFilter.getMin());
-    assertEquals(Float.valueOf(32432.23f), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
-
-    String xml2 = "<NumericRangeFilter fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23' includeUpper='false' precisionStep='2' />";
+    String xml2 = "<NumericRangeFilter fieldName='AGE_FLOAT' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23' includeUpper='false' precisionStep='2' />";
     Document doc2 = getDocumentFromString(xml2);
 
-    Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeFilter<?>);
-    
-    NumericRangeFilter<Float> numRangeFilter2 = (NumericRangeFilter) filter2;
-    assertEquals(Float.valueOf(-2.321432f), numRangeFilter2.getMin());
-    assertEquals(Float.valueOf(32432.23f), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    Filter filter2 = filterBuilder.getFilter(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof TermRangeFilter);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-2.321432 TO 32432.23}"));
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) filter2;
+    assertEquals("AGE_FLOAT", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   private static Document getDocumentFromString(String str)
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
index 53a7f09..5d52688 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
@@ -17,31 +17,57 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.queryparser.xml.ParserException;
-import org.w3c.dom.Document;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.charset.StandardCharsets;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.BeforeClass;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
 
 public class TestNumericRangeQueryBuilder extends LuceneTestCase {
 
+  private static FieldTypes fieldTypes;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    org.apache.lucene.document.Document doc = w.newDocument();
+    doc.addInt("AGE_INT", 14);
+    doc.addLong("AGE_LONG", 14L);
+    doc.addFloat("AGE_FLOAT", 14F);
+    doc.addDouble("AGE_DOUBLE", 14D);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    fieldTypes = r.getFieldTypes();
+    w.close();
+    r.close();
+    dir.close();
+  }
+
   public void testGetFilterHandleNumericParseErrorStrict() throws Exception {
     NumericRangeQueryBuilder filterBuilder = new NumericRangeQueryBuilder();
 
     String xml = "<NumericRangeQuery fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
     Document doc = getDocumentFromString(xml);
     try {
-      filterBuilder.getQuery(doc.getDocumentElement());
+      filterBuilder.getQuery(fieldTypes, doc.getDocumentElement());
     } catch (ParserException e) {
       return;
     }
@@ -52,118 +78,113 @@
   public void testGetFilterInt() throws Exception {
     NumericRangeQueryBuilder filterBuilder = new NumericRangeQueryBuilder();
 
-    String xml = "<NumericRangeQuery fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10'/>";
+    String xml = "<NumericRangeQuery fieldName='AGE_INT' type='int' lowerTerm='-1' upperTerm='10'/>";
     Document doc = getDocumentFromString(xml);
-    Query filter = filterBuilder.getQuery(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeQuery<?>);
+    Query filter = filterBuilder.getQuery(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof ConstantScoreQuery);
+    assertTrue(filter.toString(), filter.toString().contains("[-1 TO 10]"));
 
-    NumericRangeQuery<Integer> numRangeFilter = (NumericRangeQuery<Integer>) filter;
-    assertEquals(Integer.valueOf(-1), numRangeFilter.getMin());
-    assertEquals(Integer.valueOf(10), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
+    TermRangeFilter numRangeFilter = (TermRangeFilter) ((ConstantScoreQuery) filter).getFilter();
+    assertEquals("AGE_INT", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    String xml2 = "<NumericRangeQuery fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10' includeUpper='false'/>";
+    String xml2 = "<NumericRangeQuery fieldName='AGE_INT' type='int' lowerTerm='-1' upperTerm='10' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Query filter2 = filterBuilder.getQuery(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeQuery<?>);
+    Query filter2 = filterBuilder.getQuery(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof ConstantScoreQuery);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-1 TO 10}"));
 
-    NumericRangeQuery<Integer> numRangeFilter2 = (NumericRangeQuery) filter2;
-    assertEquals(Integer.valueOf(-1), numRangeFilter2.getMin());
-    assertEquals(Integer.valueOf(10), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) ((ConstantScoreQuery) filter2).getFilter();
+    assertEquals("AGE_INT", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
   public void testGetFilterLong() throws Exception {
     NumericRangeQueryBuilder filterBuilder = new NumericRangeQueryBuilder();
 
-    String xml = "<NumericRangeQuery fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000'/>";
+    String xml = "<NumericRangeQuery fieldName='AGE_LONG' type='LoNg' lowerTerm='-2321' upperTerm='60000000'/>";
     Document doc = getDocumentFromString(xml);
-    Query filter = filterBuilder.getQuery(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeQuery<?>);
-    NumericRangeQuery<Long> numRangeFilter = (NumericRangeQuery) filter;
-    assertEquals(Long.valueOf(-2321L), numRangeFilter.getMin());
-    assertEquals(Long.valueOf(60000000L), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
+    Query filter = filterBuilder.getQuery(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof ConstantScoreQuery);
+    assertTrue(filter.toString(), filter.toString().contains("[-2321 TO 60000000]"));
 
-    String xml2 = "<NumericRangeQuery fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000' includeUpper='false'/>";
+    TermRangeFilter numRangeFilter = (TermRangeFilter) ((ConstantScoreQuery) filter).getFilter();
+    assertEquals("AGE_LONG", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
+
+
+    String xml2 = "<NumericRangeQuery fieldName='AGE_LONG' type='LoNg' lowerTerm='-2321' upperTerm='60000000' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Query filter2 = filterBuilder.getQuery(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeQuery<?>);
+    Query filter2 = filterBuilder.getQuery(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof ConstantScoreQuery);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-2321 TO 60000000}"));
 
-    NumericRangeQuery<Long> numRangeFilter2 = (NumericRangeQuery) filter2;
-    assertEquals(Long.valueOf(-2321L), numRangeFilter2.getMin());
-    assertEquals(Long.valueOf(60000000L), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) ((ConstantScoreQuery) filter2).getFilter();
+    assertEquals("AGE_LONG", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
   public void testGetFilterDouble() throws Exception {
     NumericRangeQueryBuilder filterBuilder = new NumericRangeQueryBuilder();
 
-    String xml = "<NumericRangeQuery fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023'/>";
+    String xml = "<NumericRangeQuery fieldName='AGE_DOUBLE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023'/>";
     Document doc = getDocumentFromString(xml);
 
-    Query filter = filterBuilder.getQuery(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeQuery<?>);
+    Query filter = filterBuilder.getQuery(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof ConstantScoreQuery);
+    assertTrue(filter.toString(), filter.toString().contains("[-23.21 TO 60000.00023]"));
 
-    NumericRangeQuery<Double> numRangeFilter = (NumericRangeQuery) filter;
-    assertEquals(Double.valueOf(-23.21d), numRangeFilter.getMin());
-    assertEquals(Double.valueOf(60000.00023d), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
+    TermRangeFilter numRangeFilter = (TermRangeFilter) ((ConstantScoreQuery) filter).getFilter();
+    assertEquals("AGE_DOUBLE", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    String xml2 = "<NumericRangeQuery fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023' includeUpper='false'/>";
+
+    String xml2 = "<NumericRangeQuery fieldName='AGE_DOUBLE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023' includeUpper='false'/>";
     Document doc2 = getDocumentFromString(xml2);
-    Query filter2 = filterBuilder.getQuery(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeQuery<?>);
+    Query filter2 = filterBuilder.getQuery(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof ConstantScoreQuery);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-23.21 TO 60000.00023}"));
 
-    NumericRangeQuery<Double> numRangeFilter2 = (NumericRangeQuery) filter2;
-    assertEquals(Double.valueOf(-23.21d), numRangeFilter2.getMin());
-    assertEquals(Double.valueOf(60000.00023d), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) ((ConstantScoreQuery) filter2).getFilter();
+    assertEquals("AGE_DOUBLE", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
   public void testGetFilterFloat() throws Exception {
     NumericRangeQueryBuilder filterBuilder = new NumericRangeQueryBuilder();
 
-    String xml = "<NumericRangeQuery fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23'/>";
+    String xml = "<NumericRangeQuery fieldName='AGE_FLOAT' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23'/>";
     Document doc = getDocumentFromString(xml);
 
-    Query filter = filterBuilder.getQuery(doc.getDocumentElement());
-    assertTrue(filter instanceof NumericRangeQuery<?>);
+    Query filter = filterBuilder.getQuery(fieldTypes, doc.getDocumentElement());
+    assertTrue(filter instanceof ConstantScoreQuery);
+    assertTrue(filter.toString(), filter.toString().contains("[-2.321432 TO 32432.23]"));
 
-    NumericRangeQuery<Float> numRangeFilter = (NumericRangeQuery) filter;
-    assertEquals(Float.valueOf(-2.321432f), numRangeFilter.getMin());
-    assertEquals(Float.valueOf(32432.23f), numRangeFilter.getMax());
-    assertEquals("AGE", numRangeFilter.getField());
-    assertTrue(numRangeFilter.includesMin());
-    assertTrue(numRangeFilter.includesMax());
+    TermRangeFilter numRangeFilter = (TermRangeFilter) ((ConstantScoreQuery) filter).getFilter();
+    assertEquals("AGE_FLOAT", numRangeFilter.getField());
+    assertTrue(numRangeFilter.includesLower());
+    assertTrue(numRangeFilter.includesUpper());
 
-    String xml2 = "<NumericRangeQuery fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23' includeUpper='false' precisionStep='2' />";
+    String xml2 = "<NumericRangeQuery fieldName='AGE_FLOAT' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23' includeUpper='false' precisionStep='2' />";
     Document doc2 = getDocumentFromString(xml2);
 
-    Query filter2 = filterBuilder.getQuery(doc2.getDocumentElement());
-    assertTrue(filter2 instanceof NumericRangeQuery<?>);
+    Query filter2 = filterBuilder.getQuery(fieldTypes, doc2.getDocumentElement());
+    assertTrue(filter2 instanceof ConstantScoreQuery);
+    assertTrue(filter2.toString(), filter2.toString().contains("[-2.321432 TO 32432.23}"));
 
-    NumericRangeQuery<Float> numRangeFilter2 = (NumericRangeQuery) filter2;
-    assertEquals(Float.valueOf(-2.321432f), numRangeFilter2.getMin());
-    assertEquals(Float.valueOf(32432.23f), numRangeFilter2.getMax());
-    assertEquals("AGE", numRangeFilter2.getField());
-    assertTrue(numRangeFilter2.includesMin());
-    assertFalse(numRangeFilter2.includesMax());
+    TermRangeFilter numRangeFilter2 = (TermRangeFilter) ((ConstantScoreQuery) filter2).getFilter();
+    assertEquals("AGE_FLOAT", numRangeFilter2.getField());
+    assertTrue(numRangeFilter2.includesLower());
+    assertFalse(numRangeFilter2.includesUpper());
   }
 
   private static Document getDocumentFromString(String str)
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java
index 310399f..0ebc6d9 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java
@@ -183,7 +183,7 @@
   }
   
   private Document newDocument(TaxonomyWriter taxoWriter, int id) throws IOException {
-    Document doc = new Document();
+    Document doc = publishIndexWriter.newDocument();
     doc.add(new FacetField("A", Integer.toString(id, 16)));
     return config.build(taxoWriter, doc);
   }
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
index 2c832da..7c150dc 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
@@ -20,8 +20,8 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
-import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Map;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.facet.FacetField;
@@ -41,9 +41,9 @@
 
 public class IndexAndTaxonomyRevisionTest extends ReplicatorTestCase {
   
-  private Document newDocument(TaxonomyWriter taxoWriter) throws IOException {
+  private Document newDocument(IndexWriter w, TaxonomyWriter taxoWriter) throws IOException {
     FacetsConfig config = new FacetsConfig();
-    Document doc = new Document();
+    Document doc = w.newDocument();
     doc.add(new FacetField("A", "1"));
     return config.build(taxoWriter, doc);
   }
@@ -82,7 +82,7 @@
       ((MockDirectoryWrapper)indexDir).setEnableVirusScanner(false);
     }
     try {
-      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.addDocument(newDocument(indexWriter, taxoWriter));
       indexWriter.commit();
       taxoWriter.commit();
       Revision rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
@@ -92,7 +92,7 @@
       assertTrue(slowFileExists(taxoDir, IndexFileNames.SEGMENTS + "_1"));
       
       rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); // create revision again, so the files are snapshotted
-      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.addDocument(newDocument(indexWriter, taxoWriter));
       indexWriter.commit();
       taxoWriter.commit();
       assertNotNull(new IndexAndTaxonomyRevision(indexWriter, taxoWriter));
@@ -118,7 +118,7 @@
     Directory taxoDir = newDirectory();
     SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
     try {
-      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.addDocument(newDocument(indexWriter, taxoWriter));
       indexWriter.commit();
       taxoWriter.commit();
       Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
@@ -144,7 +144,7 @@
     Directory taxoDir = newDirectory();
     SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
     try {
-      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.addDocument(newDocument(indexWriter, taxoWriter));
       indexWriter.commit();
       taxoWriter.commit();
       Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java
index 3f91013..2a46bbe 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java
@@ -23,7 +23,6 @@
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -122,7 +121,7 @@
   }
   
   private Revision createRevision(final int id) throws IOException {
-    publishWriter.addDocument(new Document());
+    publishWriter.addDocument(publishWriter.newDocument());
     publishWriter.setCommitData(new HashMap<String, String>() {{
       put(VERSION_ID, Integer.toString(id, 16));
     }});
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
index 6ebdd15..8979897 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
@@ -21,7 +21,6 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -81,7 +80,7 @@
     conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
     IndexWriter writer = new IndexWriter(dir, conf);
     try {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       Revision rev1 = new IndexRevision(writer);
       // releasing that revision should not delete the files
@@ -89,7 +88,7 @@
       assertTrue(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
       
       rev1 = new IndexRevision(writer); // create revision again, so the files are snapshotted
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       assertNotNull(new IndexRevision(writer));
       rev1.release(); // this release should trigger the delete of segments_1
@@ -106,7 +105,7 @@
     conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
     IndexWriter writer = new IndexWriter(dir, conf);
     try {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       Revision rev = new IndexRevision(writer);
       @SuppressWarnings("unchecked")
@@ -128,7 +127,7 @@
     conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
     IndexWriter writer = new IndexWriter(dir, conf);
     try {
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       Revision rev = new IndexRevision(writer);
       @SuppressWarnings("unchecked")
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
index 3c9a8d1..aaa161d 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
@@ -24,7 +24,6 @@
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexWriter;
@@ -66,7 +65,7 @@
   }
   
   private Revision createRevision(final int id) throws IOException {
-    sourceWriter.addDocument(new Document());
+    sourceWriter.addDocument(sourceWriter.newDocument());
     sourceWriter.setCommitData(new HashMap<String, String>() {{
       put(VERSION_ID, Integer.toString(id, 16));
     }});
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java
index 7416f1c..cb76fd4 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java
@@ -24,7 +24,6 @@
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.SnapshotDeletionPolicy;
@@ -40,7 +39,7 @@
     IndexWriterConfig conf = new IndexWriterConfig(null);
     conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     Revision rev = new IndexRevision(writer);
     
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
index cb9e466..4111999 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
@@ -44,7 +44,6 @@
 import org.junit.Test;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
-
 import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 
 public class HttpReplicatorTest extends ReplicatorTestCase {
@@ -101,7 +100,7 @@
   }
   
   private void publishRevision(int id) throws IOException {
-    Document doc = new Document();
+    Document doc = writer.newDocument();
     writer.addDocument(doc);
     writer.setCommitData(Collections.singletonMap("ID", Integer.toString(id, 16)));
     writer.commit();
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
index 9d25fd3..bed38d4 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
@@ -65,12 +65,7 @@
   public int setField(FieldInfo fieldInfo) {
     super.setField(fieldInfo);
     if (fieldInfo.getIndexOptions() != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) {
-      throw new IllegalArgumentException("field must be index using IndexOptions.DOCS_AND_FREQS_AND_POSITIONS");
-    }
-    // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted docs, and because our PF only indexes the
-    // non-deleted documents on flush, CheckIndex will see this as corruption:
-    if (fieldInfo.hasVectors()) {
-      throw new IllegalArgumentException("field cannot index term vectors: CheckIndex will report this as index corruption");
+      throw new IllegalArgumentException("field \"" + fieldInfo.name + "\" must be indexed using IndexOptions.DOCS_AND_FREQS_AND_POSITIONS (got: " + fieldInfo.getIndexOptions() + ")");
     }
     lastState = emptyState;
     return 0;
@@ -83,10 +78,6 @@
 
   @Override
   public void startDoc(int docID, int termDocFreq) throws IOException {
-    // TODO: LUCENE-5693: we don't need this check if we fix IW to not send deleted docs to us on flush:
-    if (liveDocs != null && liveDocs.get(docID) == false) {
-      return;
-    }
     if (lastDocID != -1) {
       throw new IllegalArgumentException("term appears in more than one document");
     }
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
index e5ca33c..8d0a0c3 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
@@ -165,6 +165,8 @@
   private final List<FieldMetaData> fields = new ArrayList<>();
   // private final String segment;
 
+  // TODO: add auto-prefix support?
+
   /** Create a new writer.  The number of items (terms or
    *  sub-blocks) per block will aim to be between
    *  minItemsPerBlock and maxItemsPerBlock, though in some
@@ -734,8 +736,7 @@
     public void write(BytesRef text, TermsEnum termsEnum) throws IOException {
 
       BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
-      // TODO: LUCENE-5693: we don't need this check if we fix IW to not send deleted docs to us on flush:
-      if (state != null && ((IDVersionPostingsWriter) postingsWriter).lastDocID != -1) {
+      if (state != null) {
         assert state.docFreq != 0;
         assert fieldInfo.getIndexOptions() == IndexOptions.DOCS || state.totalTermFreq >= state.docFreq: "postingsWriter=" + postingsWriter;
         pushTerm(text);
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
index ecf4c55..eba26c1 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeFilter.java
@@ -20,7 +20,6 @@
 import java.text.Collator;
 
 import org.apache.lucene.search.MultiTermQueryWrapperFilter;
-import org.apache.lucene.search.NumericRangeFilter; // javadoc
 import org.apache.lucene.search.DocValuesRangeFilter; // javadoc
 
 /**
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeQuery.java
index 6d65bd0..bfef841 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowCollatedTermRangeQuery.java
@@ -23,7 +23,6 @@
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.MultiTermQuery; // javadoc
-import org.apache.lucene.search.NumericRangeQuery; // javadoc
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.ToStringUtils;
 
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/SingleTokenWithPayloadTokenStream.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/SingleTokenWithPayloadTokenStream.java
new file mode 100644
index 0000000..093a2d1
--- /dev/null
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/SingleTokenWithPayloadTokenStream.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.codecs.idversion;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.util.BytesRef;
+
+final class SingleTokenWithPayloadTokenStream extends TokenStream {
+
+  private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+  private final PayloadAttribute payloadAttribute = addAttribute(PayloadAttribute.class);
+  private boolean used = false;
+  private String value = null;
+  private BytesRef payload;
+    
+  /** Sets the string value. */
+  void setValue(String value, BytesRef payload) {
+    this.value = value;
+    this.payload = payload;
+  }
+
+  @Override
+  public boolean incrementToken() {
+    if (used) {
+      return false;
+    }
+    clearAttributes();
+    termAttribute.append(value);
+    payloadAttribute.setPayload(payload);
+    used = true;
+    return true;
+  }
+
+  @Override
+  public void reset() {
+    used = false;
+  }
+
+  @Override
+  public void close() {
+    value = null;
+    payload = null;
+  }
+}
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/StringAndPayloadField.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/StringAndPayloadField.java
deleted file mode 100644
index 4514351..0000000
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/StringAndPayloadField.java
+++ /dev/null
@@ -1,103 +0,0 @@
-package org.apache.lucene.codecs.idversion;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.util.BytesRef;
-
-// TODO: can we take a BytesRef token instead?
-
-/** Produces a single String token from the provided value, with the provided payload. */
-class StringAndPayloadField extends Field {
-
-  public static final FieldType TYPE = new FieldType();
-
-  static {
-    TYPE.setOmitNorms(true);
-    TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    TYPE.setTokenized(true);
-    TYPE.freeze();
-  }
-
-  private final BytesRef payload;
-
-  public StringAndPayloadField(String name, String value, BytesRef payload) {
-    super(name, value, TYPE);
-    this.payload = payload;
-  }
-
-  @Override
-  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
-    SingleTokenWithPayloadTokenStream ts;
-    if (reuse instanceof SingleTokenWithPayloadTokenStream) {
-      ts = (SingleTokenWithPayloadTokenStream) reuse;
-    } else {
-      ts = new SingleTokenWithPayloadTokenStream();
-    }
-    ts.setValue((String) fieldsData, payload);
-    return ts;
-  }
-
-  static final class SingleTokenWithPayloadTokenStream extends TokenStream {
-
-    private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
-    private final PayloadAttribute payloadAttribute = addAttribute(PayloadAttribute.class);
-    private boolean used = false;
-    private String value = null;
-    private BytesRef payload;
-    
-    /** Sets the string value. */
-    void setValue(String value, BytesRef payload) {
-      this.value = value;
-      this.payload = payload;
-    }
-
-    @Override
-    public boolean incrementToken() {
-      if (used) {
-        return false;
-      }
-      clearAttributes();
-      termAttribute.append(value);
-      payloadAttribute.setPayload(payload);
-      used = true;
-      return true;
-    }
-
-    @Override
-    public void reset() {
-      used = false;
-    }
-
-    @Override
-    public void close() {
-      value = null;
-      payload = null;
-    }
-  }
-}
-
-
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
index b8cfe3d..02cc363 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -29,15 +29,14 @@
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Analyzer.TokenStreamComponents;
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.codecs.idversion.StringAndPayloadField.SingleTokenWithPayloadTokenStream;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
@@ -70,11 +69,14 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id0", 100));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id0", 100));
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(makeIDField("id1", 110));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id1", 110));
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IDVersionSegmentTermsEnum termsEnum = (IDVersionSegmentTermsEnum) r.leaves().get(0).reader().fields().terms("id").iterator(null);
@@ -195,6 +197,14 @@
     int maxItemsInBlock = 2*(minItemsInBlock-1) + random().nextInt(50);
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat(minItemsInBlock, maxItemsInBlock)));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    if (random().nextBoolean()) {
+      fieldTypes.enableTermVectors("id");
+      fieldTypes.enableTermVectorPositions("id");
+      fieldTypes.enableTermVectorOffsets("id");
+    }
     //IndexWriter w = new IndexWriter(dir, iwc);
     int numDocs = atLeast(1000);
     Map<String,Long> idValues = new HashMap<String,Long>();
@@ -236,8 +246,8 @@
       if (VERBOSE) {
         System.out.println("  " + idValue + " -> " + version);
       }
-      Document doc = new Document();
-      doc.add(makeIDField(idValue, version));
+      Document doc = w.newDocument();
+      doc.addLargeText("id", makeIDTokenStream(idValue, version));
       w.addDocument(doc);
       idsList.add(idValue);
 
@@ -250,8 +260,8 @@
           } else {
             version = random().nextLong() & 0x3fffffffffffffffL;
           }
-          doc = new Document();
-          doc.add(makeIDField(idValue, version));
+          doc = w.newDocument();
+          doc.addLargeText("id", makeIDTokenStream(idValue, version));
           if (VERBOSE) {
             System.out.println("  update " + idValue + " -> " + version);
           }
@@ -350,11 +360,17 @@
     }
   }
 
-  private static Field makeIDField(String id, long version) {
+  private static TokenStream makeIDTokenStream(String idValue, long version) {
     BytesRef payload = new BytesRef(8);
     payload.length = 8;
     IDVersionPostingsFormat.longToBytes(version, payload);
-    return new StringAndPayloadField("id", id, payload);
+    return makeIDTokenStream(idValue, payload);
+  }
+
+  private static TokenStream makeIDTokenStream(String idValue, BytesRef payload) {
+    SingleTokenWithPayloadTokenStream ts = new SingleTokenWithPayloadTokenStream();
+    ts.setValue(idValue, payload);
+    return ts;
   }
 
   public void testMoreThanOneDocPerIDOneSegment() throws Exception {
@@ -362,11 +378,14 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(makeIDField("id", 17));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     try {
       w.addDocument(doc);
       w.commit();
@@ -393,12 +412,15 @@
         });
     }
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
     w.commit();
-    doc = new Document();
-    doc.add(makeIDField("id", 17));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     try {
       w.addDocument(doc);
       w.commit();
@@ -419,11 +441,14 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
-    doc = new Document();
-    doc.add(makeIDField("id", 17));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     // Replaces the doc we just indexed:
     w.updateDocument(new Term("id", "id"), doc);
     w.commit();
@@ -436,12 +461,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
     w.deleteDocuments(new Term("id", "id"));
-    doc = new Document();
-    doc.add(makeIDField("id", 17));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
     w.commit();
     w.close();
@@ -464,8 +492,11 @@
     IndexWriterConfig iwc = newIndexWriterConfig(a);
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newTextField("id", "id", Field.Store.NO));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", "id");
     try {
       w.addDocument(doc);
       w.commit();
@@ -483,8 +514,11 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("id", "id", Field.Store.NO));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addAtom("id", "id");
     try {
       w.addDocument(doc);
       w.commit();
@@ -502,8 +536,11 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(new StringAndPayloadField("id", "id", new BytesRef("foo")));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", new BytesRef("foo")));
     try {
       w.addDocument(doc);
       w.commit();
@@ -521,12 +558,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     w.addDocument(doc);
     w.commit();
-    doc = new Document();
-    doc.add(makeIDField("id", 17));
+    doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     // Replaces the doc we just indexed:
     w.updateDocument(new Term("id", "id"), doc);
     w.forceMerge(1);
@@ -534,44 +574,18 @@
     dir.close();
   }
 
-  // LUCENE-5693: because CheckIndex cross-checks term vectors with postings even for deleted docs, and because our PF only indexes the
-  // non-deleted documents on flush, CheckIndex will see this as corruption:
-  public void testCannotIndexTermVectors() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
-    iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-
-    FieldType ft = new FieldType(StringAndPayloadField.TYPE);
-    ft.setStoreTermVectors(true);
-    SingleTokenWithPayloadTokenStream ts = new SingleTokenWithPayloadTokenStream();
-    BytesRef payload = new BytesRef(8);
-    payload.length = 8;
-    IDVersionPostingsFormat.longToBytes(17, payload);
-    ts.setValue("foo", payload);
-    Field field = new Field("id", ts, ft);
-    doc.add(new Field("id", ts, ft));
-    try {
-      w.addDocument(doc);
-      w.commit();
-      fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
-      // expected
-      // iae.printStackTrace(System.out);
-    }
-    w.close();
-    dir.close();
-  }
-
   public void testMoreThanOnceInSingleDoc() throws IOException {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(makeIDField("id", 17));
-    doc.add(makeIDField("id", 17));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    fieldTypes.setMultiValued("id");
+    Document doc = w.newDocument();
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
+    doc.addLargeText("id", makeIDTokenStream("id", 17));
     try {
       w.addDocument(doc);
       w.commit();
@@ -588,9 +602,12 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
+    Document doc = w.newDocument();
     // -1
-    doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
+    doc.addLargeText("id", makeIDTokenStream("id", new BytesRef(new byte[] {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
     try {
       w.addDocument(doc);
       w.commit();
@@ -612,9 +629,9 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
+    Document doc = w.newDocument();
     // Long.MAX_VALUE:
-    doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0x7f, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
+    doc.addLargeText("id", makeIDTokenStream("id", new BytesRef(new byte[] {(byte)0x7f, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
     try {
       w.addDocument(doc);
       w.commit();
@@ -637,6 +654,9 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("id");
+    fieldTypes.disableExistsFilters();
 
     IDSource idsSource = getRandomIDs();
     int numIDs = atLeast(100);
@@ -786,8 +806,8 @@
                       System.out.println(Thread.currentThread() + ": now fail!");
                     }
                     assertTrue(passes);
-                    Document doc = new Document();
-                    doc.add(makeIDField(id, newVersion));
+                    Document doc = w.newDocument();
+                    doc.addLargeText("id", makeIDTokenStream(id, newVersion));
                     w.updateDocument(new Term("id", id), doc);
                     truth.put(id, newVersion);
                     versionValues.add(id, newVersion);
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
index ffe8c34..714b6f0 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
@@ -22,7 +22,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.*;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
@@ -74,10 +73,10 @@
   }
 
   private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException {
-    Document doc = new Document();
-    doc.add(newStringField(KEY_FIELD, url, Field.Store.YES));
-    doc.add(newTextField("text", text, Field.Store.YES));
-    doc.add(newTextField("date", date, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addAtom(KEY_FIELD, url);
+    doc.addLargeText("text", text);
+    doc.addLargeText("date", date);
     writer.addDocument(doc);
   }
 
@@ -87,8 +86,8 @@
     ScoreDoc[] hits = searcher.search(tq, df, 1000).scoreDocs;
 
     for (ScoreDoc hit : hits) {
-      StoredDocument d = searcher.doc(hit.doc);
-      String url = d.get(KEY_FIELD);
+      Document d = searcher.doc(hit.doc);
+      String url = d.getString(KEY_FIELD);
       assertFalse("No duplicate urls should be returned", results.contains(url));
       results.add(url);
     }
@@ -101,8 +100,8 @@
     boolean dupsFound = false;
 
     for (ScoreDoc hit : hits) {
-      StoredDocument d = searcher.doc(hit.doc);
-      String url = d.get(KEY_FIELD);
+      Document d = searcher.doc(hit.doc);
+      String url = d.getString(KEY_FIELD);
       if (!dupsFound)
         dupsFound = results.contains(url);
       results.add(url);
@@ -118,8 +117,8 @@
     assertTrue("Filtered searching should have found some matches", hits.length > 0);
 
     for (ScoreDoc hit : hits) {
-      StoredDocument d = searcher.doc(hit.doc);
-      String url = d.get(KEY_FIELD);
+      Document d = searcher.doc(hit.doc);
+      String url = d.getString(KEY_FIELD);
       assertFalse("No duplicate urls should be returned", results.contains(url));
       results.add(url);
     }
@@ -132,8 +131,8 @@
     ScoreDoc[] hits = searcher.search(tq, df, 1000).scoreDocs;
     assertTrue("Filtered searching should have found some matches", hits.length > 0);
     for (ScoreDoc hit : hits) {
-      StoredDocument d = searcher.doc(hit.doc);
-      String url = d.get(KEY_FIELD);
+      Document d = searcher.doc(hit.doc);
+      String url = d.getString(KEY_FIELD);
       DocsEnum td = TestUtil.docs(random(), reader,
           KEY_FIELD,
           new BytesRef(url),
@@ -156,8 +155,8 @@
     ScoreDoc[] hits = searcher.search(tq, df, 1000).scoreDocs;
     assertTrue("Filtered searching should have found some matches", hits.length > 0);
     for (ScoreDoc hit : hits) {
-      StoredDocument d = searcher.doc(hit.doc);
-      String url = d.get(KEY_FIELD);
+      Document d = searcher.doc(hit.doc);
+      String url = d.getString(KEY_FIELD);
       DocsEnum td = TestUtil.docs(random(), reader,
           KEY_FIELD,
           new BytesRef(url),
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java
index cd47a47..d8c43d2 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java
@@ -17,13 +17,14 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.HashSet;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -32,9 +33,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-import java.util.HashSet;
-
 public class FuzzyLikeThisQueryTest extends LuceneTestCase {
   private Directory directory;
   private IndexSearcher searcher;
@@ -69,9 +67,9 @@
   }
 
   private void addDoc(RandomIndexWriter writer, String name, String id) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("name", name, Field.Store.YES));
-    doc.add(newTextField("id", id, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("name", name);
+    doc.addLargeText("id", id);
     writer.addDocument(doc);
   }
 
@@ -89,8 +87,8 @@
     TopDocs topDocs = searcher.search(flt, 1);
     ScoreDoc[] sd = topDocs.scoreDocs;
     assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0));
-    StoredDocument doc = searcher.doc(sd[0].doc);
-    assertEquals("Should match most similar not most rare variant", "2", doc.get("id"));
+    Document doc = searcher.doc(sd[0].doc);
+    assertEquals("Should match most similar not most rare variant", "2", doc.getString("id"));
   }
 
   //Test multiple input words are having variants produced
@@ -105,8 +103,8 @@
     TopDocs topDocs = searcher.search(flt, 1);
     ScoreDoc[] sd = topDocs.scoreDocs;
     assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0));
-    StoredDocument doc = searcher.doc(sd[0].doc);
-    assertEquals("Should match most similar when using 2 words", "2", doc.get("id"));
+    Document doc = searcher.doc(sd[0].doc);
+    assertEquals("Should match most similar when using 2 words", "2", doc.getString("id"));
   }
   
   // LUCENE-4809
@@ -123,8 +121,8 @@
     TopDocs topDocs = searcher.search(flt, 1);
     ScoreDoc[] sd = topDocs.scoreDocs;
     assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0));
-    StoredDocument doc = searcher.doc(sd[0].doc);
-    assertEquals("Should match most similar when using 2 words", "2", doc.get("id"));
+    Document doc = searcher.doc(sd[0].doc);
+    assertEquals("Should match most similar when using 2 words", "2", doc.getString("id"));
   }
 
 
@@ -139,8 +137,8 @@
     TopDocs topDocs = searcher.search(flt, 1);
     ScoreDoc[] sd = topDocs.scoreDocs;
     assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0));
-    StoredDocument doc = searcher.doc(sd[0].doc);
-    assertEquals("Should match most similar when using 2 words", "2", doc.get("id"));
+    Document doc = searcher.doc(sd[0].doc);
+    assertEquals("Should match most similar when using 2 words", "2", doc.getString("id"));
   }
 
   public void testFuzzyLikeThisQueryEquals() {
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
index fe932ae..63f0dc7 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java
@@ -4,14 +4,11 @@
 import java.util.Locale;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -56,12 +53,9 @@
     dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       String value = TestUtil.randomUnicodeString(random());
-      Field field = newStringField("field", value, Field.Store.YES);
-      doc.add(field);
-      Field dvField = new SortedDocValuesField("field", new BytesRef(value));
-      doc.add(dvField);
+      doc.addAtom("field", value);
       iw.addDocument(doc);
     }
     splitDoc = TestUtil.randomUnicodeString(random());
@@ -84,7 +78,7 @@
   private void doCheckSorting(TopDocs docs) throws Exception {
     String prev = "";
     for (ScoreDoc doc : docs.scoreDocs) {
-      String value = reader.document(doc.doc).get("field");
+      String value = reader.document(doc.doc).getString("field");
       assertTrue(collator.compare(value, prev) >= 0);
       prev = value;
     }
@@ -115,7 +109,7 @@
     // positive test
     TopDocs docs = searcher.search(query, numDocs);
     for (ScoreDoc doc : docs.scoreDocs) {
-      String value = reader.document(doc.doc).get("field");
+      String value = reader.document(doc.doc).getString("field");
       assertTrue(collator.compare(value, startPoint) >= 0);
       assertTrue(collator.compare(value, endPoint) <= 0);
     }
@@ -126,7 +120,7 @@
     bq.add(query, Occur.MUST_NOT);
     docs = searcher.search(bq, numDocs);
     for (ScoreDoc doc : docs.scoreDocs) {
-      String value = reader.document(doc.doc).get("field");
+      String value = reader.document(doc.doc).getString("field");
       assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0);
     }
   }
@@ -165,10 +159,9 @@
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       String value = TestUtil.randomUnicodeString(random());
-      Field field = newStringField("field", value, Field.Store.YES);
-      doc.add(field);
+      doc.addAtom("field", value);
       iw.addDocument(doc);
     }
     IndexReader reader = iw.getReader();
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
index 23b2d03..5a10c62 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
@@ -17,12 +17,11 @@
  * limitations under the License.
  */
 
-import java.util.List;
-import java.util.Arrays;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -89,7 +88,7 @@
     assertEquals("3 documents should match", 3, hits.length);
     List<String> order = Arrays.asList("bbbbb","abbbb","aabbb");
     for (int i = 0; i < hits.length; i++) {
-      final String term = searcher.doc(hits[i].doc).get("field");
+      final String term = searcher.doc(hits[i].doc).getString("field");
       //System.out.println(hits[i].score);
       assertEquals(order.get(i), term);
     }
@@ -101,7 +100,7 @@
     assertEquals("only 2 documents should match", 2, hits.length);
     order = Arrays.asList("bbbbb","abbbb");
     for (int i = 0; i < hits.length; i++) {
-      final String term = searcher.doc(hits[i].doc).get("field");
+      final String term = searcher.doc(hits[i].doc).getString("field");
       //System.out.println(hits[i].score);
       assertEquals(order.get(i), term);
     }
@@ -486,8 +485,8 @@
   }
 
   private void addDoc(String text, RandomIndexWriter writer) throws IOException {
-    Document doc = new Document();
-    doc.add(newTextField("field", text, Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", text);
     writer.addDocument(doc);
   }
 }
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery2.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery2.java
index d46e626..a9fe756 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery2.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery2.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -92,12 +91,9 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()));
 
-    Document doc = new Document();
-    Field field = newTextField("field", "", Field.Store.NO);
-    doc.add(field);
-    
     for (int i = 0; i < terms; i++) {
-      field.setStringValue(mapInt(codePointTable, i));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", mapInt(codePointTable, i));
       writer.addDocument(doc);
     }   
     
@@ -152,12 +148,9 @@
     IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(),
         IndexWriter.MaxFieldLength.UNLIMITED);
     
-    Document doc = new Document();
-    Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED);
-    doc.add(field);
-
     for (int i = 0; i < terms; i++) {
-      field.setValue(Integer.toBinaryString(i));
+      Document doc = writer.newDocument();
+      doc.addLargeText("field", Integer.toBinaryString(i));
       writer.addDocument(doc);
     }
     
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java
index a2e517b..993cbf0 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java
@@ -17,20 +17,18 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
-import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.index.TermsEnum;
-
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -46,8 +44,8 @@
     super.setUp();
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    doc.add(newTextField(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText(FN, "the quick brown fox jumps over the lazy dog");
     writer.addDocument(doc);
     reader = writer.getReader();
     writer.close();
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
index 940fe77..258d0e3 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
@@ -20,7 +20,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -54,15 +53,11 @@
   public void testSpanRegex() throws Exception {
     Directory directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    // doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
-    // Field.Store.NO, Field.Index.ANALYZED));
-    // writer.addDocument(doc);
-    // doc = new Document();
-    doc.add(newTextField("field", "auto update", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("field", "auto update");
     writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newTextField("field", "first auto update", Field.Store.NO));
+    doc = writer.newDocument();
+    doc.addLargeText("field", "first auto update");
     writer.addDocument(doc);
     writer.forceMerge(1);
     writer.close();
diff --git a/lucene/sandbox/src/test/org/apache/lucene/search/TestTermAutomatonQuery.java b/lucene/sandbox/src/test/org/apache/lucene/search/TestTermAutomatonQuery.java
index be96263..a9b2de3 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/search/TestTermAutomatonQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/search/TestTermAutomatonQuery.java
@@ -36,17 +36,16 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -59,14 +58,14 @@
   public void testBasic1() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    Document doc = w.newDocument();
     // matches
-    doc.add(newTextField("field", "here comes the sun", Field.Store.NO));
+    doc.addLargeText("field", "here comes the sun");
     w.addDocument(doc);
 
-    doc = new Document();
+    doc = w.newDocument();
     // doesn't match
-    doc.add(newTextField("field", "here comes the other sun", Field.Store.NO));
+    doc.addLargeText("field", "here comes the other sun");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IndexSearcher s = newSearcher(r);
@@ -93,12 +92,12 @@
   public void testBasicSynonym() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "here comes the sun", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "here comes the sun");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes the moon", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes the moon");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IndexSearcher s = newSearcher(r);
@@ -126,16 +125,16 @@
   public void testBasicSlop() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "here comes the sun", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "here comes the sun");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes sun", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes sun");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes the other sun", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes the other sun");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IndexSearcher s = newSearcher(r);
@@ -167,25 +166,25 @@
   public void testPosLengthAtQueryTimeMock() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "speedy wifi network", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "speedy wifi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "speedy wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "speedy wi fi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "fast wifi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "fast wifi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "fast wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "fast wi fi network");
     w.addDocument(doc);
 
     // doesn't match:
-    doc = new Document();
-    doc.add(newTextField("field", "slow wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "slow wi fi network");
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -218,25 +217,25 @@
   public void testPosLengthAtQueryTimeTrueish() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "speedy wifi network", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "speedy wifi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "speedy wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "speedy wi fi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "fast wifi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "fast wifi network");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "fast wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "fast wi fi network");
     w.addDocument(doc);
 
     // doesn't match:
-    doc = new Document();
-    doc.add(newTextField("field", "slow wi fi network", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "slow wi fi network");
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -263,14 +262,14 @@
   public void testFreq() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    Document doc = w.newDocument();
     // matches freq == 3
-    doc.add(newTextField("field", "here comes the sun foo bar here comes another sun here comes shiny sun", Field.Store.NO));
+    doc.addLargeText("field", "here comes the sun foo bar here comes another sun here comes shiny sun");
     w.addDocument(doc);
 
-    doc = new Document();
+    doc = w.newDocument();
     // doesn't match
-    doc.add(newTextField("field", "here comes the other sun", Field.Store.NO));
+    doc.addLargeText("field", "here comes the other sun");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IndexSearcher s = newSearcher(r);
@@ -312,13 +311,13 @@
   public void testSegsMissingTerms() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "here comes the sun", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "here comes the sun");
     w.addDocument(doc);
     w.commit();
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes the moon", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes the moon");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     IndexSearcher s = newSearcher(r);
@@ -376,21 +375,21 @@
   public void testAnyFromTokenStream() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "here comes the sun", Field.Store.NO));
+    Document doc = w.newDocument();
+    doc.addLargeText("field", "here comes the sun");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes the moon", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes the moon");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "here comes sun", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes sun");
     w.addDocument(doc);
 
     // Should not match:
-    doc = new Document();
-    doc.add(newTextField("field", "here comes the other sun", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addLargeText("field", "here comes the other sun");
     w.addDocument(doc);
 
     IndexReader r = w.getReader();
@@ -474,9 +473,10 @@
 
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("field");
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       int numTokens = atLeast(10);
 
       StringBuilder sb = new StringBuilder();
@@ -485,8 +485,8 @@
         sb.append((char) (97 + random().nextInt(3)));
       }
       String contents = sb.toString();
-      doc.add(newTextField("field", contents, Field.Store.NO));
-      doc.add(new StoredField("id", ""+i));
+      doc.addLargeText("field", contents);
+      doc.addStoredString("id", ""+i);
       if (VERBOSE) {
         System.out.println("  doc " + i + " -> " + contents);
       }
@@ -609,7 +609,7 @@
   private Set<String> toDocIDs(IndexSearcher s, TopDocs hits) throws IOException {
     Set<String> result = new HashSet<>();
     for(ScoreDoc hit : hits.scoreDocs) {
-      result.add(s.doc(hit.doc).get("id"));
+      result.add(s.doc(hit.doc).getString("id"));
     }
     return result;
   }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/NumberRangePrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/NumberRangePrefixTreeStrategy.java
index 8d3ed92..a643b22 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/NumberRangePrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/NumberRangePrefixTreeStrategy.java
@@ -18,15 +18,14 @@
  */
 
 import java.io.IOException;
+import java.text.ParseException;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Filter;
@@ -34,6 +33,8 @@
 import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
 import org.apache.lucene.spatial.prefix.tree.Cell;
 import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
 
 import static org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape;
 
@@ -61,11 +62,9 @@
   }
 
   @Override
-  public Field[] createIndexableFields(Shape shape) {
+  public void addFields(Document doc, Shape shape) {
     //levels doesn't actually matter; NumberRange based Shapes have their own "level".
-    TokenStream tokenStream = createTokenStream(shape, grid.getMaxLevels());
-    Field field = new Field(getFieldName(), tokenStream, FIELD_TYPE);
-    return new Field[]{field};
+    doc.addLargeText(getFieldName(), createTokenStream(shape, grid.getMaxLevels()));
   }
 
   /** Unsupported. */
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
index c20fb8f..aee0ed4 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
@@ -17,17 +17,18 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
-import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.ReciprocalFloatFunction;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.spatial.query.SpatialArgs;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * The SpatialStrategy encapsulates an approach to indexing and searching based
@@ -98,7 +99,7 @@
    * @return Not null nor will it have null elements.
    * @throws UnsupportedOperationException if given a shape incompatible with the strategy
    */
-  public abstract Field[] createIndexableFields(Shape shape);
+  public abstract void addFields(Document doc, Shape shape);
 
   /**
    * See {@link #makeDistanceValueSource(com.spatial4j.core.shape.Point, double)} called with
@@ -126,8 +127,8 @@
    * @throws org.apache.lucene.spatial.query.UnsupportedSpatialOperation If the strategy does not support the {@link
    * org.apache.lucene.spatial.query.SpatialOperation} in {@code args}.
    */
-  public Query makeQuery(SpatialArgs args) {
-    return new ConstantScoreQuery(makeFilter(args));
+  public Query makeQuery(FieldTypes fieldTypes, SpatialArgs args) {
+    return new ConstantScoreQuery(makeFilter(fieldTypes, args));
   }
 
   /**
@@ -143,7 +144,7 @@
    * @throws org.apache.lucene.spatial.query.UnsupportedSpatialOperation If the strategy does not support the {@link
    * org.apache.lucene.spatial.query.SpatialOperation} in {@code args}.
    */
-  public abstract Filter makeFilter(SpatialArgs args);
+  public abstract Filter makeFilter(FieldTypes fieldTypes, SpatialArgs args);
 
   /**
    * Returns a ValueSource with values ranging from 1 to 0, depending inversely
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
index 1ac7037..5d67b27 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
@@ -17,19 +17,16 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
 import org.apache.lucene.search.TermQuery;
@@ -38,7 +35,6 @@
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.spatial.util.DistanceToShapeValueSource;
-import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.NumericUtils;
 import com.spatial4j.core.context.SpatialContext;
 import com.spatial4j.core.shape.Point;
@@ -66,7 +62,7 @@
  * <p>
  * This uses 4 double fields for minX, maxX, minY, maxY
  * and a boolean to mark a dateline cross. Depending on the particular {@link
- * SpatialOperation}s, there are a variety of {@link NumericRangeQuery}s to be
+ * SpatialOperation}s, there are a variety of range queries to be
  * done.
  * The {@link #makeOverlapRatioValueSource(com.spatial4j.core.shape.Rectangle, double)}
  * works by calculating the query bbox overlap percentage against the indexed
@@ -94,9 +90,6 @@
   protected final String field_maxY;
   protected final String field_xdl; // crosses dateline
 
-  protected FieldType fieldType;//for the 4 numbers
-  protected FieldType xdlFieldType;
-
   public BBoxStrategy(SpatialContext ctx, String fieldNamePrefix) {
     super(ctx, fieldNamePrefix);
     field_bbox = fieldNamePrefix;
@@ -105,35 +98,6 @@
     field_minY = fieldNamePrefix + SUFFIX_MINY;
     field_maxY = fieldNamePrefix + SUFFIX_MAXY;
     field_xdl = fieldNamePrefix + SUFFIX_XDL;
-
-    FieldType fieldType = new FieldType(DoubleField.TYPE_NOT_STORED);
-    fieldType.setNumericPrecisionStep(8);//Solr's default
-    fieldType.setDocValuesType(DocValuesType.NUMERIC);
-    setFieldType(fieldType);
-  }
-
-  private int getPrecisionStep() {
-    return fieldType.numericPrecisionStep();
-  }
-
-  public FieldType getFieldType() {
-    return fieldType;
-  }
-
-  /** Used to customize the indexing options of the 4 number fields, and to a lesser degree the XDL field too. Search
-   * requires indexed=true, and relevancy requires docValues. If these features aren't needed then disable them.
-   * {@link FieldType#freeze()} is called on the argument. */
-  public void setFieldType(FieldType fieldType) {
-    fieldType.freeze();
-    this.fieldType = fieldType;
-    //only double's supported right now
-    if (fieldType.numericType() != FieldType.NumericType.DOUBLE)
-      throw new IllegalArgumentException("BBoxStrategy only supports doubles at this time.");
-    //for xdlFieldType, copy some similar options. Don't do docValues since it isn't needed here.
-    xdlFieldType = new FieldType(StringField.TYPE_NOT_STORED);
-    xdlFieldType.setStored(fieldType.stored());
-    xdlFieldType.setIndexOptions(fieldType.indexOptions());
-    xdlFieldType.freeze();
   }
 
   //---------------------------------
@@ -141,42 +105,32 @@
   //---------------------------------
 
   @Override
-  public Field[] createIndexableFields(Shape shape) {
-    return createIndexableFields(shape.getBoundingBox());
+  public void addFields(Document doc, Shape shape) {
+    addFields(doc, shape.getBoundingBox());
   }
 
-  public Field[] createIndexableFields(Rectangle bbox) {
-    Field[] fields = new Field[5];
-    fields[0] = new ComboField(field_minX, bbox.getMinX(), fieldType);
-    fields[1] = new ComboField(field_maxX, bbox.getMaxX(), fieldType);
-    fields[2] = new ComboField(field_minY, bbox.getMinY(), fieldType);
-    fields[3] = new ComboField(field_maxY, bbox.getMaxY(), fieldType);
-    fields[4] = new ComboField(field_xdl, bbox.getCrossesDateLine()?"T":"F", xdlFieldType);
-    return fields;
+  public void addFields(Document doc, Rectangle bbox) {
+    doc.addDouble(field_minX, bbox.getMinX());
+    doc.addDouble(field_maxX, bbox.getMaxX());
+    doc.addDouble(field_minY, bbox.getMinY());
+    doc.addDouble(field_maxY, bbox.getMaxY());
+    doc.addAtom(field_xdl, bbox.getCrossesDateLine()?"T":"F");
   }
 
-  /** Field subclass circumventing Field limitations. This one instance can have any combination of indexed, stored,
-   * and docValues.
-   */
-  private static class ComboField extends Field {
-    private ComboField(String name, Object value, FieldType type) {
-      super(name, type);//this expert constructor allows us to have a field that has docValues & indexed/stored
-      super.fieldsData = value;
-    }
+  public void setDocValuesType(FieldTypes fieldTypes, DocValuesType dvType) {
+    fieldTypes.setDocValuesType(field_minX, dvType);
+    fieldTypes.setDocValuesType(field_minY, dvType);
+    fieldTypes.setDocValuesType(field_maxX, dvType);
+    fieldTypes.setDocValuesType(field_maxY, dvType);
+    fieldTypes.setDocValuesType(field_xdl, dvType);
+  }
 
-    //Is this a hack?  We assume that numericValue() is only called for DocValues purposes.
-    @Override
-    public Number numericValue() {
-      //Numeric DocValues only supports Long,
-      final Number number = super.numericValue();
-      if (number == null)
-        return null;
-      if (fieldType().numericType() == FieldType.NumericType.DOUBLE)
-        return Double.doubleToLongBits(number.doubleValue());
-      if (fieldType().numericType() == FieldType.NumericType.FLOAT)
-        return Float.floatToIntBits(number.floatValue());
-      return number.longValue();
-    }
+  public void setIndexOptions(FieldTypes fieldTypes, IndexOptions indexOptions) {
+    fieldTypes.setIndexOptions(field_minX, indexOptions);
+    fieldTypes.setIndexOptions(field_maxX, indexOptions);
+    fieldTypes.setIndexOptions(field_minY, indexOptions);
+    fieldTypes.setIndexOptions(field_maxY, indexOptions);
+    fieldTypes.setIndexOptions(field_xdl, indexOptions);
   }
 
   //---------------------------------
@@ -210,13 +164,13 @@
   //---------------------------------
 
   @Override
-  public Filter makeFilter(SpatialArgs args) {
-    return new QueryWrapperFilter(makeSpatialQuery(args));
+  public Filter makeFilter(FieldTypes fieldTypes, SpatialArgs args) {
+    return new QueryWrapperFilter(makeSpatialQuery(fieldTypes, args));
   }
 
   @Override
-  public ConstantScoreQuery makeQuery(SpatialArgs args) {
-    return new ConstantScoreQuery(makeSpatialQuery(args));
+  public ConstantScoreQuery makeQuery(FieldTypes fieldTypes, SpatialArgs args) {
+    return new ConstantScoreQuery(makeSpatialQuery(fieldTypes, args));
   }
 
 //  Utility on SpatialStrategy?
@@ -224,7 +178,7 @@
 //    return new FilteredQuery(new FunctionQuery(valueSource), makeFilter(args));
 //  }
 
-  private Query makeSpatialQuery(SpatialArgs args) {
+  private Query makeSpatialQuery(FieldTypes fieldTypes, SpatialArgs args) {
     Shape shape = args.getShape();
     if (!(shape instanceof Rectangle))
       throw new UnsupportedOperationException("Can only query by Rectangle, not " + shape);
@@ -235,13 +189,14 @@
     // Useful for understanding Relations:
     // http://edndoc.esri.com/arcsde/9.1/general_topics/understand_spatial_relations.htm
     SpatialOperation op = args.getOperation();
-         if( op == SpatialOperation.BBoxIntersects ) spatial = makeIntersects(bbox);
-    else if( op == SpatialOperation.BBoxWithin     ) spatial = makeWithin(bbox);
-    else if( op == SpatialOperation.Contains       ) spatial = makeContains(bbox);
-    else if( op == SpatialOperation.Intersects     ) spatial = makeIntersects(bbox);
-    else if( op == SpatialOperation.IsEqualTo      ) spatial = makeEquals(bbox);
-    else if( op == SpatialOperation.IsDisjointTo   ) spatial = makeDisjoint(bbox);
-    else if( op == SpatialOperation.IsWithin       ) spatial = makeWithin(bbox);
+
+    if( op == SpatialOperation.BBoxIntersects      ) spatial = makeIntersects(fieldTypes, bbox);
+    else if( op == SpatialOperation.BBoxWithin     ) spatial = makeWithin(fieldTypes, bbox);
+    else if( op == SpatialOperation.Contains       ) spatial = makeContains(fieldTypes, bbox);
+    else if( op == SpatialOperation.Intersects     ) spatial = makeIntersects(fieldTypes, bbox);
+    else if( op == SpatialOperation.IsEqualTo      ) spatial = makeEquals(fieldTypes, bbox);
+    else if( op == SpatialOperation.IsDisjointTo   ) spatial = makeDisjoint(fieldTypes, bbox);
+    else if( op == SpatialOperation.IsWithin       ) spatial = makeWithin(fieldTypes, bbox);
     else { //no Overlaps support yet
         throw new UnsupportedSpatialOperation(op);
     }
@@ -253,15 +208,15 @@
    *
    * @return the spatial query
    */
-  Query makeContains(Rectangle bbox) {
+  Query makeContains(FieldTypes fieldTypes, Rectangle bbox) {
 
     // general case
     // docMinX <= queryExtent.getMinX() AND docMinY <= queryExtent.getMinY() AND docMaxX >= queryExtent.getMaxX() AND docMaxY >= queryExtent.getMaxY()
 
     // Y conditions
     // docMinY <= queryExtent.getMinY() AND docMaxY >= queryExtent.getMaxY()
-    Query qMinY = NumericRangeQuery.newDoubleRange(field_minY, getPrecisionStep(), null, bbox.getMinY(), false, true);
-    Query qMaxY = NumericRangeQuery.newDoubleRange(field_maxY, getPrecisionStep(), bbox.getMaxY(), null, true, false);
+    Query qMinY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minY, null, false, bbox.getMinY(), true));
+    Query qMaxY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxY, bbox.getMaxY(), true, null, false));
     Query yConditions = this.makeQuery(BooleanClause.Occur.MUST, qMinY, qMaxY);
 
     // X conditions
@@ -273,8 +228,8 @@
       // X Conditions for documents that do not cross the date line,
       // documents that contain the min X and max X of the query envelope,
       // docMinX <= queryExtent.getMinX() AND docMaxX >= queryExtent.getMaxX()
-      Query qMinX = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), null, bbox.getMinX(), false, true);
-      Query qMaxX = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), bbox.getMaxX(), null, true, false);
+      Query qMinX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, null, false, bbox.getMinX(), true));
+      Query qMaxX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, bbox.getMaxX(), true, null, false));
       Query qMinMax = this.makeQuery(BooleanClause.Occur.MUST, qMinX, qMaxX);
       Query qNonXDL = this.makeXDL(false, qMinMax);
 
@@ -285,8 +240,8 @@
         // the left portion of the document contains the min X of the query
         // OR the right portion of the document contains the max X of the query,
         // docMinXLeft <= queryExtent.getMinX() OR docMaxXRight >= queryExtent.getMaxX()
-        Query qXDLLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), null, bbox.getMinX(), false, true);
-        Query qXDLRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), bbox.getMaxX(), null, true, false);
+        Query qXDLLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, null, false, bbox.getMinX(), true));
+        Query qXDLRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, bbox.getMaxX(), true, null, false));
         Query qXDLLeftRight = this.makeQuery(BooleanClause.Occur.SHOULD, qXDLLeft, qXDLRight);
         Query qXDL = this.makeXDL(true, qXDLLeftRight);
 
@@ -294,7 +249,7 @@
         if (bbox.getMinX() == bbox.getMaxX() && Math.abs(bbox.getMinX()) == 180) {
           double edge = bbox.getMinX() * -1;//opposite dateline edge
           qEdgeDL = makeQuery(BooleanClause.Occur.SHOULD,
-              makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge));
+              makeNumberTermQuery(fieldTypes, field_minX, edge), makeNumberTermQuery(fieldTypes, field_maxX, edge));
         }
 
         // apply the non-XDL and XDL conditions
@@ -309,12 +264,12 @@
       // the left portion of the document contains the min X of the query
       // AND the right portion of the document contains the max X of the query,
       // docMinXLeft <= queryExtent.getMinX() AND docMaxXRight >= queryExtent.getMaxX()
-      Query qXDLLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), null, bbox.getMinX(), false, true);
-      Query qXDLRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), bbox.getMaxX(), null, true, false);
+      Query qXDLLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, null, false, bbox.getMinX(), true));
+      Query qXDLRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, bbox.getMaxX(), true, null, false));
       Query qXDLLeftRight = this.makeXDL(true, this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight));
 
       Query qWorld = makeQuery(BooleanClause.Occur.MUST,
-          makeNumberTermQuery(field_minX, -180), makeNumberTermQuery(field_maxX, 180));
+          makeNumberTermQuery(fieldTypes, field_minX, -180), makeNumberTermQuery(fieldTypes, field_maxX, 180));
 
       xConditions = makeQuery(BooleanClause.Occur.SHOULD, qXDLLeftRight, qWorld);
     }
@@ -328,15 +283,15 @@
    *
    * @return the spatial query
    */
-  Query makeDisjoint(Rectangle bbox) {
+  Query makeDisjoint(FieldTypes fieldTypes, Rectangle bbox) {
 
     // general case
     // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX() OR docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY()
 
     // Y conditions
     // docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY()
-    Query qMinY = NumericRangeQuery.newDoubleRange(field_minY, getPrecisionStep(), bbox.getMaxY(), null, false, false);
-    Query qMaxY = NumericRangeQuery.newDoubleRange(field_maxY, getPrecisionStep(), null, bbox.getMinY(), false, false);
+    Query qMinY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minY, bbox.getMaxY(), false, null, false));
+    Query qMaxY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxY, null, false, bbox.getMinY(), false));
     Query yConditions = this.makeQuery(BooleanClause.Occur.SHOULD, qMinY, qMaxY);
 
     // X conditions
@@ -347,18 +302,18 @@
 
       // X Conditions for documents that do not cross the date line,
       // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()
-      Query qMinX = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMaxX(), null, false, false);
+      Query qMinX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMaxX(), false, null, false));
       if (bbox.getMinX() == -180.0 && ctx.isGeo()) {//touches dateline; -180 == 180
         BooleanQuery bq = new BooleanQuery();
         bq.add(qMinX, BooleanClause.Occur.MUST);
-        bq.add(makeNumberTermQuery(field_maxX, 180.0), BooleanClause.Occur.MUST_NOT);
+        bq.add(makeNumberTermQuery(fieldTypes, field_maxX, 180.0), BooleanClause.Occur.MUST_NOT);
         qMinX = bq;
       }
-      Query qMaxX = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMinX(), false, false);
+      Query qMaxX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, bbox.getMinX(), false));
       if (bbox.getMaxX() == 180.0 && ctx.isGeo()) {//touches dateline; -180 == 180
         BooleanQuery bq = new BooleanQuery();
         bq.add(qMaxX, BooleanClause.Occur.MUST);
-        bq.add(makeNumberTermQuery(field_minX, -180.0), BooleanClause.Occur.MUST_NOT);
+        bq.add(makeNumberTermQuery(fieldTypes, field_minX, -180.0), BooleanClause.Occur.MUST_NOT);
         qMaxX = bq;
       }
       Query qMinMax = this.makeQuery(BooleanClause.Occur.SHOULD, qMinX, qMaxX);
@@ -375,8 +330,8 @@
         // where: docMaxXLeft = 180.0, docMinXRight = -180.0
         // (docMaxXLeft  < queryExtent.getMinX()) equates to (180.0  < queryExtent.getMinX()) and is ignored
         // (docMinXRight > queryExtent.getMaxX()) equates to (-180.0 > queryExtent.getMaxX()) and is ignored
-        Query qMinXLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMaxX(), null, false, false);
-        Query qMaxXRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMinX(), false, false);
+        Query qMinXLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMaxX(), false, null, false));
+        Query qMaxXRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, bbox.getMinX(), false));
         Query qLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qMinXLeft, qMaxXRight);
         Query qXDL = this.makeXDL(true, qLeftRight);
 
@@ -390,10 +345,10 @@
       // the document must be disjoint to both the left and right query portions
       // (docMinX > queryExtent.getMaxX()Left OR docMaxX < queryExtent.getMinX()) AND (docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()Left)
       // where: queryExtent.getMaxX()Left = 180.0, queryExtent.getMinX()Left = -180.0
-      Query qMinXLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), 180.0, null, false, false);
-      Query qMaxXLeft = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMinX(), false, false);
-      Query qMinXRight = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMaxX(), null, false, false);
-      Query qMaxXRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, -180.0, false, false);
+      Query qMinXLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, 180.0, false, null, false));
+      Query qMaxXLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false,bbox.getMinX(), false));
+      Query qMinXRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMaxX(), false, null, false));
+      Query qMaxXRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, -180.0, false));
       Query qLeft = this.makeQuery(BooleanClause.Occur.SHOULD, qMinXLeft, qMaxXLeft);
       Query qRight = this.makeQuery(BooleanClause.Occur.SHOULD, qMinXRight, qMaxXRight);
       Query qLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qLeft, qRight);
@@ -412,13 +367,13 @@
    *
    * @return the spatial query
    */
-  Query makeEquals(Rectangle bbox) {
+  Query makeEquals(FieldTypes fieldTypes, Rectangle bbox) {
 
     // docMinX = queryExtent.getMinX() AND docMinY = queryExtent.getMinY() AND docMaxX = queryExtent.getMaxX() AND docMaxY = queryExtent.getMaxY()
-    Query qMinX = makeNumberTermQuery(field_minX, bbox.getMinX());
-    Query qMinY = makeNumberTermQuery(field_minY, bbox.getMinY());
-    Query qMaxX = makeNumberTermQuery(field_maxX, bbox.getMaxX());
-    Query qMaxY = makeNumberTermQuery(field_maxY, bbox.getMaxY());
+    Query qMinX = makeNumberTermQuery(fieldTypes, field_minX, bbox.getMinX());
+    Query qMinY = makeNumberTermQuery(fieldTypes, field_minY, bbox.getMinY());
+    Query qMaxX = makeNumberTermQuery(fieldTypes, field_maxX, bbox.getMaxX());
+    Query qMaxY = makeNumberTermQuery(fieldTypes, field_maxY, bbox.getMaxY());
     return makeQuery(BooleanClause.Occur.MUST, qMinX, qMinY, qMaxX, qMaxY);
   }
 
@@ -427,7 +382,7 @@
    *
    * @return the spatial query
    */
-  Query makeIntersects(Rectangle bbox) {
+  Query makeIntersects(FieldTypes fieldTypes, Rectangle bbox) {
 
     // the original intersects query does not work for envelopes that cross the date line,
     // switch to a NOT Disjoint query
@@ -447,7 +402,7 @@
 
     BooleanQuery qNotDisjoint = new BooleanQuery();
     qNotDisjoint.add(qHasEnv, BooleanClause.Occur.MUST);
-    Query qDisjoint = makeDisjoint(bbox);
+    Query qDisjoint = makeDisjoint(fieldTypes, bbox);
     qNotDisjoint.add(qDisjoint, BooleanClause.Occur.MUST_NOT);
 
     //Query qDisjoint = makeDisjoint();
@@ -478,15 +433,15 @@
    *
    * @return the spatial query
    */
-  Query makeWithin(Rectangle bbox) {
+  Query makeWithin(FieldTypes fieldTypes, Rectangle bbox) {
 
     // general case
     // docMinX >= queryExtent.getMinX() AND docMinY >= queryExtent.getMinY() AND docMaxX <= queryExtent.getMaxX() AND docMaxY <= queryExtent.getMaxY()
 
     // Y conditions
     // docMinY >= queryExtent.getMinY() AND docMaxY <= queryExtent.getMaxY()
-    Query qMinY = NumericRangeQuery.newDoubleRange(field_minY, getPrecisionStep(), bbox.getMinY(), null, true, false);
-    Query qMaxY = NumericRangeQuery.newDoubleRange(field_maxY, getPrecisionStep(), null, bbox.getMaxY(), false, true);
+    Query qMinY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minY, bbox.getMinY(), true, null, false));
+    Query qMaxY = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxY, null, false, bbox.getMaxY(), true));
     Query yConditions = this.makeQuery(BooleanClause.Occur.MUST, qMinY, qMaxY);
 
     // X conditions
@@ -500,8 +455,8 @@
       // queries that do not cross the date line
 
       // docMinX >= queryExtent.getMinX() AND docMaxX <= queryExtent.getMaxX()
-      Query qMinX = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMinX(), null, true, false);
-      Query qMaxX = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMaxX(), false, true);
+      Query qMinX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMinX(), true, null, false));
+      Query qMaxX = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, bbox.getMaxX(), true));
       Query qMinMax = this.makeQuery(BooleanClause.Occur.MUST, qMinX, qMaxX);
 
       double edge = 0;//none, otherwise opposite dateline of query
@@ -511,7 +466,7 @@
         edge = -180;
       if (edge != 0 && ctx.isGeo()) {
         Query edgeQ = makeQuery(BooleanClause.Occur.MUST,
-            makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge));
+            makeNumberTermQuery(fieldTypes, field_minX, edge), makeNumberTermQuery(fieldTypes, field_maxX, edge));
         qMinMax = makeQuery(BooleanClause.Occur.SHOULD, qMinMax, edgeQ);
       }
 
@@ -524,14 +479,14 @@
 
       // the document should be within the left portion of the query
       // docMinX >= queryExtent.getMinX() AND docMaxX <= 180.0
-      Query qMinXLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMinX(), null, true, false);
-      Query qMaxXLeft = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, 180.0, false, true);
+      Query qMinXLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMinX(), true, null, false));
+      Query qMaxXLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, 180.0, true));
       Query qLeft = this.makeQuery(BooleanClause.Occur.MUST, qMinXLeft, qMaxXLeft);
 
       // the document should be within the right portion of the query
       // docMinX >= -180.0 AND docMaxX <= queryExtent.getMaxX()
-      Query qMinXRight = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), -180.0, null, true, false);
-      Query qMaxXRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMaxX(), false, true);
+      Query qMinXRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, -180.0, true, null, false));
+      Query qMaxXRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, bbox.getMaxX(), true));
       Query qRight = this.makeQuery(BooleanClause.Occur.MUST, qMinXRight, qMaxXRight);
 
       // either left or right conditions should occur,
@@ -544,8 +499,8 @@
       // AND the right portion of the document must be within the right portion of the query
       // docMinXLeft >= queryExtent.getMinX() AND docMaxXLeft <= 180.0
       // AND docMinXRight >= -180.0 AND docMaxXRight <= queryExtent.getMaxX()
-      Query qXDLLeft = NumericRangeQuery.newDoubleRange(field_minX, getPrecisionStep(), bbox.getMinX(), null, true, false);
-      Query qXDLRight = NumericRangeQuery.newDoubleRange(field_maxX, getPrecisionStep(), null, bbox.getMaxX(), false, true);
+      Query qXDLLeft = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_minX, bbox.getMinX(), true, null, false));
+      Query qXDLRight = new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(field_maxX, null, false, bbox.getMaxX(), true));
       Query qXDLLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight);
       Query qXDL = this.makeXDL(true, qXDLLeftRight);
 
@@ -587,12 +542,9 @@
     return bq;
   }
 
-  private Query makeNumberTermQuery(String field, double number) {
-    BytesRefBuilder bytes = new BytesRefBuilder();
-    NumericUtils.longToPrefixCodedBytes(NumericUtils.doubleToSortableLong(number), 0, bytes);
-    return new TermQuery(new Term(field, bytes.get()));
+  private Query makeNumberTermQuery(FieldTypes fieldTypes, String field, double number) {
+    return new TermQuery(new Term(field, NumericUtils.doubleToBytes(number)));
   }
-
 }
 
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java
index 6c9a929..0de5e40 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxValueSource.java
@@ -17,18 +17,20 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.shape.Rectangle;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
-import java.util.Map;
+import org.apache.lucene.util.NumericUtils;
+import com.spatial4j.core.shape.Rectangle;
 
 /**
  * A ValueSource in which the indexed Rectangle is returned from
@@ -68,8 +70,10 @@
           return null;
         } else {
           rect.reset(
-              Double.longBitsToDouble(minX.get(doc)), Double.longBitsToDouble(maxX.get(doc)),
-              Double.longBitsToDouble(minY.get(doc)), Double.longBitsToDouble(maxY.get(doc)));
+                     NumericUtils.longToDouble(minX.get(doc)),
+                     NumericUtils.longToDouble(maxX.get(doc)),
+                     NumericUtils.longToDouble(minY.get(doc)),
+                     NumericUtils.longToDouble(maxY.get(doc)));
           return rect;
         }
       }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
index 7ce92fa..3f535dc 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
@@ -24,8 +24,8 @@
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Shape;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.spatial.SpatialStrategy;
@@ -127,9 +127,9 @@
   }
 
   @Override
-  public Field[] createIndexableFields(Shape shape) {
+  public void addFields(Document doc, Shape shape) {
     double distErr = SpatialArgs.calcDistanceFromErrPct(shape, distErrPct, ctx);
-    return createIndexableFields(shape, distErr);
+    addFields(doc, shape, distErr);
   }
 
   /**
@@ -139,11 +139,14 @@
    * simply/aggregate sets of complete leaves in a cell to its parent, resulting in ~20-25%
    * fewer cells. It will likely be removed in the future.
    */
-  public Field[] createIndexableFields(Shape shape, double distErr) {
+  public void addFields(Document doc, Shape shape, double distErr) {
     int detailLevel = grid.getLevelForDistance(distErr);
-    TokenStream tokenStream = createTokenStream(shape, detailLevel);
-    Field field = new Field(getFieldName(), tokenStream, FIELD_TYPE);
-    return new Field[]{field};
+    FieldTypes fieldTypes = doc.getFieldTypes();
+    fieldTypes.disableNorms(getFieldName());
+    fieldTypes.disableHighlighting(getFieldName());
+    fieldTypes.setIndexOptions(getFieldName(), IndexOptions.DOCS);
+    fieldTypes.setMultiValued(getFieldName());
+    doc.addLargeText(getFieldName(), createTokenStream(shape, detailLevel));
   }
 
   protected TokenStream createTokenStream(Shape shape, int detailLevel) {
@@ -154,16 +157,6 @@
     return new CellTokenStream().setCells(cells);
   }
 
-  /* Indexed, tokenized, not stored. */
-  public static final FieldType FIELD_TYPE = new FieldType();
-
-  static {
-    FIELD_TYPE.setTokenized(true);
-    FIELD_TYPE.setOmitNorms(true);
-    FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
-    FIELD_TYPE.freeze();
-  }
-
   @Override
   public ValueSource makeDistanceValueSource(Point queryPoint, double multiplier) {
     PointPrefixTreeFieldCacheProvider p = provider.get( getFieldName() );
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
index b7353b3..b1b3814 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
@@ -20,9 +20,8 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.spatial.prefix.tree.Cell;
 import org.apache.lucene.spatial.prefix.tree.CellIterator;
@@ -31,6 +30,10 @@
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeFilter}.
@@ -166,7 +169,7 @@
   }
 
   @Override
-  public Filter makeFilter(SpatialArgs args) {
+  public Filter makeFilter(FieldTypes fieldTypes, SpatialArgs args) {
     final SpatialOperation op = args.getOperation();
 
     Shape shape = args.getShape();
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
index 41bb66b..8f7bc5a 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
@@ -17,9 +17,10 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.TermsFilter;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.spatial.prefix.tree.Cell;
@@ -30,9 +31,8 @@
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-
-import java.util.ArrayList;
-import java.util.List;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * A basic implementation of {@link PrefixTreeStrategy} using a large
@@ -55,7 +55,7 @@
   }
 
   @Override
-  public Filter makeFilter(SpatialArgs args) {
+  public Filter makeFilter(FieldTypes fieldTypes, SpatialArgs args) {
     final SpatialOperation op = args.getOperation();
     if (op != SpatialOperation.Intersects)
       throw new UnsupportedSpatialOperation(op);
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java
index 03b4c05..40d095b 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java
@@ -17,13 +17,14 @@
  * limitations under the License.
  */
 
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.StringHelper;
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Shape;
 import com.spatial4j.core.shape.SpatialRelation;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.StringHelper;
-
-import java.util.Collection;
 
 /** The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs.
  * @lucene.internal */
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
index 5cf730b..6778497 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
@@ -272,7 +272,6 @@
       BytesRef token = getTokenBytesNoLeaf(null);
       double xmin = QuadPrefixTree.this.xmin;
       double ymin = QuadPrefixTree.this.ymin;
-
       for (int i = 0; i < token.length; i++) {
         byte c = token.bytes[token.offset + i];
         switch (c) {
@@ -289,7 +288,7 @@
             xmin += levelW[i];
             break;
           default:
-            throw new RuntimeException("unexpected char: " + c);
+            throw new RuntimeException("unexpected char: " + (char) c);
         }
       }
       int len = token.length;
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
index 4a398d8..ff1110c 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
@@ -17,15 +17,18 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.io.BinaryCodec;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.util.Map;
 
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.DocIdSet;
@@ -40,14 +43,10 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.FilterOutputStream;
-import java.io.IOException;
-import java.util.Map;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.io.BinaryCodec;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
 
 
 /**
@@ -76,7 +75,7 @@
   }
 
   @Override
-  public Field[] createIndexableFields(Shape shape) {
+  public void addFields(Document doc, Shape shape) {
     int bufSize = Math.max(128, (int) (this.indexLastBufSize * 1.5));//50% headroom over last
     ByteArrayOutputStream byteStream = new ByteArrayOutputStream(bufSize);
     final BytesRef bytesRef = new BytesRef();//receiver of byteStream's bytes
@@ -95,7 +94,8 @@
       throw new RuntimeException(e);
     }
     this.indexLastBufSize = bytesRef.length;//cache heuristic
-    return new Field[]{new BinaryDocValuesField(getFieldName(), bytesRef)};
+    doc.getFieldTypes().disableSorting(getFieldName());
+    doc.addBinary(getFieldName(), bytesRef);
   }
 
   @Override
@@ -105,7 +105,7 @@
   }
 
   @Override
-  public Query makeQuery(SpatialArgs args) {
+  public Query makeQuery(FieldTypes fieldTypes, SpatialArgs args) {
     throw new UnsupportedOperationException("This strategy can't return a query that operates" +
         " efficiently. Instead try a Filter or ValueSource.");
   }
@@ -116,7 +116,7 @@
    * to prevent misuse because the filter can't efficiently work via iteration.
    */
   @Override
-  public Filter makeFilter(final SpatialArgs args) {
+  public Filter makeFilter(FieldTypes fieldTypes, final SpatialArgs args) {
     ValueSource shapeValueSource = makeShapeValueSource();
     ShapePredicateValueSource predicateValueSource = new ShapePredicateValueSource(
         shapeValueSource, args.getOperation(), args.getShape());
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
index 85afeae..3ed40f8 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/DistanceValueSource.java
@@ -17,18 +17,20 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.distance.DistanceCalculator;
-import com.spatial4j.core.shape.Point;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
-import java.util.Map;
+import org.apache.lucene.util.NumericUtils;
+import com.spatial4j.core.distance.DistanceCalculator;
+import com.spatial4j.core.shape.Point;
 
 /**
  * An implementation of the Lucene ValueSource model that returns the distance
@@ -88,7 +90,7 @@
         // make sure it has minX and area
         if (validX.get(doc)) {
           assert validY.get(doc);
-          return calculator.distance(from, Double.longBitsToDouble(ptX.get(doc)), Double.longBitsToDouble(ptY.get(doc))) * multiplier;
+          return calculator.distance(from, NumericUtils.longToDouble(ptX.get(doc)), NumericUtils.longToDouble(ptY.get(doc))) * multiplier;
         }
         return nullValue;
       }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
index 5721ed6..819f7ab 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
@@ -17,14 +17,8 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Circle;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
-import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
@@ -33,7 +27,6 @@
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FilteredQuery;
 import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
 import org.apache.lucene.spatial.SpatialStrategy;
@@ -42,6 +35,11 @@
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.spatial.util.CachingDoubleValueSource;
 import org.apache.lucene.spatial.util.ValueSourceFilter;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Circle;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * Simple {@link SpatialStrategy} which represents Points in two numeric {@link
@@ -64,7 +62,7 @@
  * <p>
  * <b>Implementation:</b>
  * <p>
- * This is a simple Strategy.  Search works with {@link NumericRangeQuery}s on
+ * This is a simple Strategy.  Search works with range queries on
  * an x and y pair of fields.  A Circle query does the same bbox query but adds a
  * ValueSource filter on
  * {@link #makeDistanceValueSource(com.spatial4j.core.shape.Point)}.
@@ -107,20 +105,18 @@
   }
 
   @Override
-  public Field[] createIndexableFields(Shape shape) {
-    if (shape instanceof Point)
-      return createIndexableFields((Point) shape);
+  public void addFields(Document doc, Shape shape) {
+    if (shape instanceof Point) {
+      addFields(doc, (Point) shape);
+      return;
+    }
     throw new UnsupportedOperationException("Can only index Point, not " + shape);
   }
 
   /** @see #createIndexableFields(com.spatial4j.core.shape.Shape) */
-  public Field[] createIndexableFields(Point point) {
-    FieldType doubleFieldType = new FieldType(DoubleField.TYPE_NOT_STORED);
-    doubleFieldType.setNumericPrecisionStep(precisionStep);
-    Field[] f = new Field[2];
-    f[0] = new DoubleField(fieldNameX, point.getX(), doubleFieldType);
-    f[1] = new DoubleField(fieldNameY, point.getY(), doubleFieldType);
-    return f;
+  public void addFields(Document doc, Point point) {
+    doc.addDouble(fieldNameX, point.getX());
+    doc.addDouble(fieldNameY, point.getY());
   }
 
   @Override
@@ -129,9 +125,9 @@
   }
 
   @Override
-  public Filter makeFilter(SpatialArgs args) {
+  public Filter makeFilter(FieldTypes fieldTypes, SpatialArgs args) {
     //unwrap the CSQ from makeQuery
-    ConstantScoreQuery csq = makeQuery(args);
+    ConstantScoreQuery csq = makeQuery(fieldTypes, args);
     Filter filter = csq.getFilter();
     if (filter != null)
       return filter;
@@ -140,7 +136,7 @@
   }
 
   @Override
-  public ConstantScoreQuery makeQuery(SpatialArgs args) {
+  public ConstantScoreQuery makeQuery(FieldTypes fieldTypes, SpatialArgs args) {
     if(! SpatialOperation.is( args.getOperation(),
         SpatialOperation.Intersects,
         SpatialOperation.IsWithin ))
@@ -148,12 +144,12 @@
     Shape shape = args.getShape();
     if (shape instanceof Rectangle) {
       Rectangle bbox = (Rectangle) shape;
-      return new ConstantScoreQuery(makeWithin(bbox));
+      return new ConstantScoreQuery(makeWithin(fieldTypes, bbox));
     } else if (shape instanceof Circle) {
       Circle circle = (Circle)shape;
       Rectangle bbox = circle.getBoundingBox();
       ValueSourceFilter vsf = new ValueSourceFilter(
-          new QueryWrapperFilter(makeWithin(bbox)),
+          new QueryWrapperFilter(makeWithin(fieldTypes, bbox)),
           makeDistanceValueSource(circle.getCenter()),
           0,
           circle.getRadius() );
@@ -165,7 +161,7 @@
   }
 
   //TODO this is basically old code that hasn't been verified well and should probably be removed
-  public Query makeQueryDistanceScore(SpatialArgs args) {
+  public Query makeQueryDistanceScore(FieldTypes fieldTypes, SpatialArgs args) {
     // For starters, just limit the bbox
     Shape shape = args.getShape();
     if (!(shape instanceof Rectangle || shape instanceof Circle)) {
@@ -187,12 +183,12 @@
     if( SpatialOperation.is( op,
         SpatialOperation.BBoxWithin,
         SpatialOperation.BBoxIntersects ) ) {
-        spatial = makeWithin(bbox);
+        spatial = makeWithin(fieldTypes, bbox);
     }
     else if( SpatialOperation.is( op,
       SpatialOperation.Intersects,
       SpatialOperation.IsWithin ) ) {
-      spatial = makeWithin(bbox);
+      spatial = makeWithin(fieldTypes, bbox);
       if( args.getShape() instanceof Circle) {
         Circle circle = (Circle)args.getShape();
 
@@ -206,7 +202,7 @@
       }
     }
     else if( op == SpatialOperation.IsDisjointTo ) {
-      spatial =  makeDisjoint(bbox);
+      spatial =  makeDisjoint(fieldTypes, bbox);
     }
 
     if( spatial == null ) {
@@ -229,39 +225,39 @@
   /**
    * Constructs a query to retrieve documents that fully contain the input envelope.
    */
-  private Query makeWithin(Rectangle bbox) {
+  private Query makeWithin(FieldTypes fieldTypes, Rectangle bbox) {
     BooleanQuery bq = new BooleanQuery();
     BooleanClause.Occur MUST = BooleanClause.Occur.MUST;
     if (bbox.getCrossesDateLine()) {
       //use null as performance trick since no data will be beyond the world bounds
-      bq.add(rangeQuery(fieldNameX, null/*-180*/, bbox.getMaxX()), BooleanClause.Occur.SHOULD );
-      bq.add(rangeQuery(fieldNameX, bbox.getMinX(), null/*+180*/), BooleanClause.Occur.SHOULD );
+      bq.add(rangeQuery(fieldTypes, fieldNameX, null/*-180*/, bbox.getMaxX()), BooleanClause.Occur.SHOULD );
+      bq.add(rangeQuery(fieldTypes, fieldNameX, bbox.getMinX(), null/*+180*/), BooleanClause.Occur.SHOULD );
       bq.setMinimumNumberShouldMatch(1);//must match at least one of the SHOULD
     } else {
-      bq.add(rangeQuery(fieldNameX, bbox.getMinX(), bbox.getMaxX()), MUST);
+      bq.add(rangeQuery(fieldTypes, fieldNameX, bbox.getMinX(), bbox.getMaxX()), MUST);
     }
-    bq.add(rangeQuery(fieldNameY, bbox.getMinY(), bbox.getMaxY()), MUST);
+    bq.add(rangeQuery(fieldTypes, fieldNameY, bbox.getMinY(), bbox.getMaxY()), MUST);
+
     return bq;
   }
 
-  private NumericRangeQuery<Double> rangeQuery(String fieldName, Double min, Double max) {
-    return NumericRangeQuery.newDoubleRange(
+  private Query rangeQuery(FieldTypes fieldTypes, String fieldName, Double min, Double max) {
+    return new ConstantScoreQuery(fieldTypes.newDoubleRangeFilter(
         fieldName,
-        precisionStep,
         min,
-        max,
         true,
-        true);//inclusive
+        max,
+        true));//inclusive
   }
 
   /**
    * Constructs a query to retrieve documents that fully contain the input envelope.
    */
-  private Query makeDisjoint(Rectangle bbox) {
+  private Query makeDisjoint(FieldTypes fieldTypes, Rectangle bbox) {
     if (bbox.getCrossesDateLine())
       throw new UnsupportedOperationException("makeDisjoint doesn't handle dateline cross");
-    Query qX = rangeQuery(fieldNameX, bbox.getMinX(), bbox.getMaxX());
-    Query qY = rangeQuery(fieldNameY, bbox.getMinY(), bbox.getMaxY());
+    Query qX = rangeQuery(fieldTypes, fieldNameX, bbox.getMinX(), bbox.getMaxX());
+    Query qY = rangeQuery(fieldTypes, fieldNameY, bbox.getMinY(), bbox.getMaxY());
 
     BooleanQuery bq = new BooleanQuery();
     bq.add(qX,BooleanClause.Occur.MUST_NOT);
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
index 06966c7..f10d51d 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
@@ -22,7 +22,6 @@
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.spatial.bbox.BBoxStrategy;
 import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
@@ -96,9 +95,7 @@
     super.setUp();
     if (strategy instanceof BBoxStrategy && random().nextBoolean()) {//disable indexing sometimes
       BBoxStrategy bboxStrategy = (BBoxStrategy)strategy;
-      final FieldType fieldType = new FieldType(bboxStrategy.getFieldType());
-      fieldType.setIndexOptions(IndexOptions.NONE);
-      bboxStrategy.setFieldType(fieldType);
+      bboxStrategy.setIndexOptions(fieldTypes, IndexOptions.NONE);
     }
   }
 
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java b/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java
index cc3fb02..efa0a84 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java
@@ -169,16 +169,16 @@
     //args.setDistPrecision(0.025);
     Query query;
     if (random().nextBoolean()) {
-      query = strategy.makeQuery(args);
+      query = strategy.makeQuery(fieldTypes, args);
     } else {
-      query = new FilteredQuery(new MatchAllDocsQuery(),strategy.makeFilter(args));
+      query = new FilteredQuery(new MatchAllDocsQuery(),strategy.makeFilter(fieldTypes, args));
     }
     SearchResults results = executeQuery(query, 100);
     assertEquals(""+shape,assertNumFound,results.numFound);
     if (assertIds != null) {
       Set<Integer> resultIds = new HashSet<>();
       for (SearchResult result : results.results) {
-        resultIds.add(Integer.valueOf(result.document.get("id")));
+        resultIds.add(Integer.valueOf(result.document.getString("id")));
       }
       for (int assertId : assertIds) {
         assertTrue("has " + assertId, resultIds.contains(assertId));
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
index 9eed512..5a60a8e 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
@@ -17,8 +17,13 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Shape;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.spatial.bbox.BBoxStrategy;
 import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
 import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy;
@@ -29,18 +34,18 @@
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.serialized.SerializedDVStrategy;
 import org.apache.lucene.spatial.vector.PointVectorStrategy;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Collection;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Shape;
 
 public class QueryEqualsHashCodeTest extends LuceneTestCase {
 
   private final SpatialContext ctx = SpatialContext.GEO;
 
   @Test
-  public void testEqualsHashCode() {
+  public void testEqualsHashCode() throws Exception {
 
     final SpatialPrefixTree gridQuad = new QuadPrefixTree(ctx,10);
     final SpatialPrefixTree gridGeohash = new GeohashPrefixTree(ctx,10);
@@ -56,19 +61,26 @@
     }
   }
 
-  private void testEqualsHashcode(final SpatialStrategy strategy) {
+  private void testEqualsHashcode(final SpatialStrategy strategy) throws Exception {
     final SpatialArgs args1 = makeArgs1();
     final SpatialArgs args2 = makeArgs2();
+    IndexWriterConfig iwConfig = new IndexWriterConfig(null);
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, iwConfig);
+    Document doc = writer.newDocument();
+    strategy.addFields(doc, SpatialContext.GEO.makePoint(0, 0));
+    writer.addDocument(doc);
+    final FieldTypes fieldTypes = writer.getFieldTypes();
     testEqualsHashcode(args1, args2, new ObjGenerator() {
       @Override
       public Object gen(SpatialArgs args) {
-        return strategy.makeQuery(args);
+        return strategy.makeQuery(fieldTypes, args);
       }
     });
     testEqualsHashcode(args1, args2, new ObjGenerator() {
       @Override
       public Object gen(SpatialArgs args) {
-        return strategy.makeFilter(args);
+        return strategy.makeFilter(fieldTypes, args);
       }
     });
     testEqualsHashcode(args1, args2, new ObjGenerator() {
@@ -77,6 +89,8 @@
         return strategy.makeDistanceValueSource(args.getShape().getCenter());
       }
     });
+    writer.close();
+    dir.close();
   }
 
   private void testEqualsHashcode(SpatialArgs args1, SpatialArgs args2, ObjGenerator generator) {
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialExample.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialExample.java
index 46d1e193..5791baa 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialExample.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialExample.java
@@ -17,20 +17,14 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.distance.DistanceUtils;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
+import java.io.IOException;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
@@ -47,8 +41,10 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
-
-import java.io.IOException;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
 
 /**
  * This class serves as example code to show how to use the Lucene spatial
@@ -108,33 +104,32 @@
     IndexWriter indexWriter = new IndexWriter(directory, iwConfig);
 
     //Spatial4j is x-y order for arguments
-    indexWriter.addDocument(newSampleDocument(
+    indexWriter.addDocument(newSampleDocument(indexWriter,
         2, ctx.makePoint(-80.93, 33.77)));
 
     //Spatial4j has a WKT parser which is also "x y" order
-    indexWriter.addDocument(newSampleDocument(
+    indexWriter.addDocument(newSampleDocument(indexWriter,
         4, ctx.readShapeFromWkt("POINT(60.9289094 -50.7693246)")));
 
-    indexWriter.addDocument(newSampleDocument(
+    indexWriter.addDocument(newSampleDocument(indexWriter,
         20, ctx.makePoint(0.1,0.1), ctx.makePoint(0, 0)));
 
     indexWriter.close();
   }
 
-  private Document newSampleDocument(int id, Shape... shapes) {
-    Document doc = new Document();
-    doc.add(new StoredField("id", id));
-    doc.add(new NumericDocValuesField("id", id));
+  private Document newSampleDocument(IndexWriter indexWriter, int id, Shape... shapes) {
+    Document doc = indexWriter.newDocument();
+    FieldTypes fieldTypes = indexWriter.getFieldTypes();
+    fieldTypes.setMultiValued(strategy.getFieldName() + "_stored");
+    doc.addInt("id", id);
     //Potentially more than one shape in this field is supported by some
     // strategies; see the javadocs of the SpatialStrategy impl to see.
     for (Shape shape : shapes) {
-      for (Field f : strategy.createIndexableFields(shape)) {
-        doc.add(f);
-      }
+      strategy.addFields(doc, shape);
       //store it too; the format is up to you
       //  (assume point in this example)
       Point pt = (Point) shape;
-      doc.add(new StoredField(strategy.getFieldName(), pt.getX()+" "+pt.getY()));
+      doc.addStoredString(strategy.getFieldName() + "_stored", pt.getX()+" "+pt.getY());
     }
 
     return doc;
@@ -142,6 +137,7 @@
 
   private void search() throws Exception {
     IndexReader indexReader = DirectoryReader.open(directory);
+    FieldTypes fieldTypes = indexReader.getFieldTypes();
     IndexSearcher indexSearcher = new IndexSearcher(indexReader);
     Sort idSort = new Sort(new SortField("id", SortField.Type.INT));
 
@@ -151,13 +147,13 @@
       //note: SpatialArgs can be parsed from a string
       SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects,
           ctx.makeCircle(-80.0, 33.0, DistanceUtils.dist2Degrees(200, DistanceUtils.EARTH_MEAN_RADIUS_KM)));
-      Filter filter = strategy.makeFilter(args);
+      Filter filter = strategy.makeFilter(fieldTypes, args);
       TopDocs docs = indexSearcher.search(new MatchAllDocsQuery(), filter, 10, idSort);
       assertDocMatchedIds(indexSearcher, docs, 2);
       //Now, lets get the distance for the 1st doc via computing from stored point value:
       // (this computation is usually not redundant)
-      StoredDocument doc1 = indexSearcher.doc(docs.scoreDocs[0].doc);
-      String doc1Str = doc1.getField(strategy.getFieldName()).stringValue();
+      Document doc1 = indexSearcher.doc(docs.scoreDocs[0].doc);
+      String doc1Str = doc1.getString(strategy.getFieldName() + "_stored");
       //assume doc1Str is "x y" as written in newSampleDocument()
       int spaceIdx = doc1Str.indexOf(' ');
       double x = Double.parseDouble(doc1Str.substring(0, spaceIdx));
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
index 0eef93c..e267301 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java
@@ -17,29 +17,6 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.uninverting.UninvertingReader;
-import org.apache.lucene.uninverting.UninvertingReader.Type;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
-import org.apache.lucene.util.TestUtil;
-import org.junit.After;
-import org.junit.Before;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -47,6 +24,29 @@
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.uninverting.UninvertingReader.Type;
+import org.apache.lucene.uninverting.UninvertingReader;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomGaussian;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;
 
@@ -58,6 +58,7 @@
   protected RandomIndexWriter indexWriter;
   private Directory directory;
   protected IndexSearcher indexSearcher;
+  protected FieldTypes fieldTypes;
 
   protected SpatialContext ctx;//subclass must initialize
 
@@ -77,13 +78,15 @@
     indexWriter = new RandomIndexWriter(random,directory, newIndexWriterConfig(random));
     indexReader = UninvertingReader.wrap(indexWriter.getReader(), uninvertMap);
     indexSearcher = newSearcher(indexReader);
+    fieldTypes = indexWriter.getFieldTypes();
   }
 
   protected IndexWriterConfig newIndexWriterConfig(Random random) {
     final IndexWriterConfig indexWriterConfig = LuceneTestCase.newIndexWriterConfig(random, new MockAnalyzer(random));
     //TODO can we randomly choose a doc-values supported format?
-    if (needsDocValues())
-      indexWriterConfig.setCodec( TestUtil.getDefaultCodec());
+    if (needsDocValues()) {
+      indexWriterConfig.setCodec(TestUtil.getDefaultCodec());
+    }
     return indexWriterConfig;
   }
 
@@ -233,15 +236,15 @@
   protected static class SearchResult {
 
     public float score;
-    public StoredDocument document;
+    public Document document;
 
-    public SearchResult(float score, StoredDocument storedDocument) {
+    public SearchResult(float score, Document storedDocument) {
       this.score = score;
       this.document = storedDocument;
     }
 
     public String getId() {
-      return document.get("id");
+      return document.getString("id");
     }
     
     @Override
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java
index 61a45b4..603c94b 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java
@@ -18,24 +18,6 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.FunctionQuery;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.CheckHits;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.spatial.query.SpatialArgs;
-import org.apache.lucene.spatial.query.SpatialArgsParser;
-import org.apache.lucene.spatial.query.SpatialOperation;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -48,6 +30,21 @@
 import java.util.Set;
 import java.util.logging.Logger;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.function.FunctionQuery;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialArgsParser;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Shape;
+
 public abstract class StrategyTestCase extends SpatialTestCase {
 
   public static final String DATA_SIMPLE_BBOX = "simple-bbox.txt";
@@ -91,19 +88,17 @@
     List<Document> documents = new ArrayList<>();
     while (sampleData.hasNext()) {
       SpatialTestData data = sampleData.next();
-      Document document = new Document();
-      document.add(new StringField("id", data.id, Field.Store.YES));
-      document.add(new StringField("name", data.name, Field.Store.YES));
+      Document document = indexWriter.newDocument();
+      document.addAtom("id", data.id);
+      document.addAtom("name", data.name);
       Shape shape = data.shape;
       shape = convertShapeFromGetDocuments(shape);
       if (shape != null) {
-        for (Field f : strategy.createIndexableFields(shape)) {
-          document.add(f);
+        strategy.addFields(document, shape);
+        if (storeShape) {//just for diagnostics
+          document.addStoredString(strategy.getFieldName() + "_stored", shape.toString());
         }
-        if (storeShape)//just for diagnostics
-          document.add(new StoredField(strategy.getFieldName(), shape.toString()));
       }
-
       documents.add(document);
     }
     return documents;
@@ -140,14 +135,15 @@
   public void runTestQuery(SpatialMatchConcern concern, SpatialTestQuery q) {
     String msg = q.toString(); //"Query: " + q.args.toString(ctx);
     SearchResults got = executeQuery(makeQuery(q), Math.max(100, q.ids.size()+1));
+
     if (storeShape && got.numFound > 0) {
       //check stored value is there
-      assertNotNull(got.results.get(0).document.get(strategy.getFieldName()));
+      assertNotNull(got.results.get(0).document.get(strategy.getFieldName() + "_stored"));
     }
     if (concern.orderIsImportant) {
       Iterator<String> ids = q.ids.iterator();
       for (SearchResult r : got.results) {
-        String id = r.document.get("id");
+        String id = r.document.getString("id");
         if (!ids.hasNext()) {
           fail(msg + " :: Did not get enough results.  Expect" + q.ids + ", got: " + got.toDebugString());
         }
@@ -162,7 +158,7 @@
       if (concern.resultsAreSuperset) {
         Set<String> found = new HashSet<>();
         for (SearchResult r : got.results) {
-          found.add(r.document.get("id"));
+          found.add(r.document.getString("id"));
         }
         for (String s : q.ids) {
           if (!found.contains(s)) {
@@ -172,7 +168,7 @@
       } else {
         List<String> found = new ArrayList<>();
         for (SearchResult r : got.results) {
-          found.add(r.document.get("id"));
+          found.add(r.document.getString("id"));
         }
 
         // sort both so that the order is not important
@@ -184,7 +180,7 @@
   }
 
   protected Query makeQuery(SpatialTestQuery q) {
-    return strategy.makeQuery(q.args);
+    return strategy.makeQuery(fieldTypes, q.args);
   }
 
   protected void adoc(String id, String shapeStr) throws IOException, ParseException {
@@ -196,14 +192,13 @@
   }
 
   protected Document newDoc(String id, Shape shape) {
-    Document doc = new Document();
-    doc.add(new StringField("id", id, Field.Store.YES));
+    Document doc = indexWriter.newDocument();
+    doc.addAtom("id", id);
     if (shape != null) {
-      for (Field f : strategy.createIndexableFields(shape)) {
-        doc.add(f);
+      strategy.addFields(doc, shape);
+      if (storeShape) {
+        doc.addStoredString(strategy.getFieldName() + "_stored", shape.toString());
       }
-      if (storeShape)
-        doc.add(new StoredField(strategy.getFieldName(), shape.toString()));//not to be parsed; just for debug
     }
     return doc;
   }
@@ -242,7 +237,7 @@
               (operation == SpatialOperation.Contains || operation == SpatialOperation.IsWithin));
     adoc("0", indexedShape);
     commit();
-    Query query = strategy.makeQuery(new SpatialArgs(operation, queryShape));
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(operation, queryShape));
     SearchResults got = executeQuery(query, 1);
     assert got.numFound <= 1 : "unclean test env";
     if ((got.numFound == 1) != match)
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java b/lucene/spatial/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
index 1a189f7..368c5ac 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
@@ -19,14 +19,6 @@
 
 import java.io.IOException;
 
-import com.carrotsearch.randomizedtesting.annotations.Repeat;
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.context.SpatialContextFactory;
-import com.spatial4j.core.distance.DistanceUtils;
-import com.spatial4j.core.shape.Rectangle;
-import com.spatial4j.core.shape.Shape;
-import com.spatial4j.core.shape.impl.RectangleImpl;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.search.Query;
@@ -37,6 +29,13 @@
 import org.apache.lucene.spatial.util.ShapeAreaValueSource;
 import org.junit.Ignore;
 import org.junit.Test;
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.context.SpatialContextFactory;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.impl.RectangleImpl;
 
 public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
 
@@ -111,10 +110,9 @@
     //test we can disable docValues for predicate tests
     if (random().nextBoolean()) {
       BBoxStrategy bboxStrategy = (BBoxStrategy) strategy;
-      FieldType fieldType = new FieldType(bboxStrategy.getFieldType());
-      fieldType.setDocValuesType(DocValuesType.NONE);
-      bboxStrategy.setFieldType(fieldType);
+      bboxStrategy.setDocValuesType(fieldTypes, DocValuesType.NONE);
     }
+
     for (SpatialOperation operation : SpatialOperation.values()) {
       if (operation == SpatialOperation.Overlaps)
         continue;//unsupported
@@ -189,7 +187,7 @@
 
     adoc("0", indexedShape);
     commit();
-    Query query = strategy.makeQuery(new SpatialArgs(operation, queryShape));
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(operation, queryShape));
     SearchResults got = executeQuery(query, 1);
     assert got.numFound <= 1 : "unclean test env";
     if ((got.numFound == 1) != match)
@@ -292,10 +290,9 @@
     setupGeo();
     //test we can disable indexed for this test
     BBoxStrategy bboxStrategy = (BBoxStrategy) strategy;
+    //test we can disable indexed for this test
     if (random().nextBoolean()) {
-      FieldType fieldType = new FieldType(bboxStrategy.getFieldType());
-      fieldType.setIndexOptions(IndexOptions.NONE);
-      bboxStrategy.setFieldType(fieldType);
+      bboxStrategy.setIndexOptions(fieldTypes, IndexOptions.NONE);
     }
 
     adoc("100", ctx.makeRectangle(0, 20, 40, 80));
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java
index 87b0511..40a5b66 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java
@@ -17,13 +17,10 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContextFactory;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Shape;
+import java.text.ParseException;
+import java.util.HashMap;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
@@ -34,9 +31,9 @@
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.junit.Test;
-
-import java.text.ParseException;
-import java.util.HashMap;
+import com.spatial4j.core.context.SpatialContextFactory;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Shape;
 
 public class JtsPolygonTest extends StrategyTestCase {
 
@@ -67,7 +64,7 @@
             "-93.16315546122038 45.23742639412364," +
             "-93.18100824442227 45.25676372469945))",
         LUCENE_4464_distErrPct);
-    SearchResults got = executeQuery(strategy.makeQuery(args), 100);
+    SearchResults got = executeQuery(strategy.makeQuery(fieldTypes, args), 100);
     assertEquals(1, got.numFound);
     assertEquals("poly2", got.results.get(0).document.get("id"));
     //did not find poly 1 !
@@ -91,19 +88,15 @@
     
     SpatialPrefixTree trie = new QuadPrefixTree(ctx, 12);
     TermQueryPrefixTreeStrategy strategy = new TermQueryPrefixTreeStrategy(trie, "geo");
-    Document doc = new Document();
-    doc.add(new TextField("id", "1", Store.YES));
-
-    Field[] fields = strategy.createIndexableFields(area, 0.025);
-    for (Field field : fields) {
-      doc.add(field);  
-    }
+    Document doc = indexWriter.newDocument();
+    doc.addAtom("id", "1");
+    strategy.addFields(doc, area, 0.025);
     addDocument(doc);
 
     Point upperleft = ctx.makePoint(-122.88, 48.54);
     Point lowerright = ctx.makePoint(-122.82, 48.62);
     
-    Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright)));
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright)));
     commit();
     
     TopDocs search = indexSearcher.search(query, 10);
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpFuzzyPrefixTreeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpFuzzyPrefixTreeTest.java
index 78765b8..977f656 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpFuzzyPrefixTreeTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpFuzzyPrefixTreeTest.java
@@ -17,30 +17,6 @@
  * limitations under the License.
  */
 
-import com.carrotsearch.randomizedtesting.annotations.Repeat;
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.context.SpatialContextFactory;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
-import com.spatial4j.core.shape.Shape;
-import com.spatial4j.core.shape.ShapeCollection;
-import com.spatial4j.core.shape.SpatialRelation;
-import com.spatial4j.core.shape.impl.RectangleImpl;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.spatial.StrategyTestCase;
-import org.apache.lucene.spatial.prefix.tree.Cell;
-import org.apache.lucene.spatial.prefix.tree.CellIterator;
-import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
-import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
-import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
-import org.apache.lucene.spatial.query.SpatialArgs;
-import org.apache.lucene.spatial.query.SpatialOperation;
-import org.junit.Test;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -53,6 +29,27 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.spatial.StrategyTestCase;
+import org.apache.lucene.spatial.prefix.tree.Cell;
+import org.apache.lucene.spatial.prefix.tree.CellIterator;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.junit.Test;
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.context.SpatialContextFactory;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.ShapeCollection;
+import com.spatial4j.core.shape.SpatialRelation;
+import com.spatial4j.core.shape.impl.RectangleImpl;
+
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;
@@ -144,7 +141,7 @@
     setupQuadGrid(3);
     adoc("0", new ShapePair(ctx.makeRectangle(0, 33, -128, 128), ctx.makeRectangle(33, 128, -128, 128), true));
     commit();
-    Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Contains,
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(SpatialOperation.Contains,
         ctx.makeRectangle(0, 128, -16, 128)));
     SearchResults searchResults = executeQuery(query, 1);
     assertEquals(1, searchResults.numFound);
@@ -157,7 +154,7 @@
     adoc("0", new ShapePair(ctx.makeRectangle(0, 10, -120, -100), ctx.makeRectangle(220, 240, 110, 125), false));
     commit();
     //query surrounds only the second part of the indexed shape
-    Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.IsWithin,
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(SpatialOperation.IsWithin,
         ctx.makeRectangle(210, 245, 105, 128)));
     SearchResults searchResults = executeQuery(query, 1);
     //we shouldn't find it because it's not completely within
@@ -176,13 +173,13 @@
     //query does NOT contain it; both indexed cells are leaves to the query, and
     // when expanded to the full grid cells, the top one's top row is disjoint
     // from the query and thus not a match.
-    assertTrue(executeQuery(strategy.makeQuery(
+    assertTrue(executeQuery(strategy.makeQuery(fieldTypes, 
         new SpatialArgs(SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 56))
     ), 1).numFound==0);//no-match
 
     //this time the rect is a little bigger and is considered a match. It's
     // an acceptable false-positive because of the grid approximation.
-    assertTrue(executeQuery(strategy.makeQuery(
+    assertTrue(executeQuery(strategy.makeQuery(fieldTypes, 
         new SpatialArgs(SpatialOperation.IsWithin, ctx.makeRectangle(38, 192, -72, 80))
     ), 1).numFound==1);//match
   }
@@ -201,8 +198,9 @@
   // being independent for each shape vs the whole thing
   @Override
   protected Document newDoc(String id, Shape shape) {
-    Document doc = new Document();
-    doc.add(new StringField("id", id, Field.Store.YES));
+    Document doc = indexWriter.newDocument();
+    fieldTypes.setMultiValued(strategy.getFieldName());
+    doc.addAtom("id", id);
     if (shape != null) {
       Collection<Shape> shapes;
       if (shape instanceof ShapePair) {
@@ -213,12 +211,10 @@
         shapes = Collections.singleton(shape);
       }
       for (Shape shapei : shapes) {
-        for (Field f : strategy.createIndexableFields(shapei)) {
-          doc.add(f);
-        }
+        strategy.addFields(doc, shapei);
       }
       if (storeShape)//just for diagnostics
-        doc.add(new StoredField(strategy.getFieldName(), shape.toString()));
+        doc.addStoredString(strategy.getFieldName() + "_stored", shape.toString());
     }
     return doc;
   }
@@ -226,7 +222,7 @@
   private void doTest(final SpatialOperation operation) throws IOException {
     //first show that when there's no data, a query will result in no results
     {
-      Query query = strategy.makeQuery(new SpatialArgs(operation, randomRectangle()));
+      Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(operation, randomRectangle()));
       SearchResults searchResults = executeQuery(query, 1);
       assertEquals(0, searchResults.numFound);
     }
@@ -342,7 +338,7 @@
       SpatialArgs args = new SpatialArgs(operation, queryShape);
       if (queryShape instanceof ShapePair)
         args.setDistErrPct(0.0);//a hack; we want to be more detailed than gridSnap(queryShape)
-      Query query = strategy.makeQuery(args);
+      Query query = strategy.makeQuery(fieldTypes, args);
       SearchResults got = executeQuery(query, 100);
       Set<String> remainingExpectedIds = new LinkedHashSet<>(expectedIds);
       for (SearchResult result : got.results) {
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java
index 057339a..911d9f6 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/RandomSpatialOpStrategyTestCase.java
@@ -40,13 +40,6 @@
   //Note: this is partially redundant with StrategyTestCase.runTestQuery & testOperation
 
   protected void testOperationRandomShapes(final SpatialOperation operation) throws IOException {
-    //first show that when there's no data, a query will result in no results
-    {
-      Query query = strategy.makeQuery(new SpatialArgs(operation, randomQueryShape()));
-      SearchResults searchResults = executeQuery(query, 1);
-      assertEquals(0, searchResults.numFound);
-    }
-
     final int numIndexedShapes = randomIntBetween(1, 6);
     List<Shape> indexedShapes = new ArrayList<>(numIndexedShapes);
     for (int i = 0; i < numIndexedShapes; i++) {
@@ -64,6 +57,7 @@
 
   protected void testOperation(final SpatialOperation operation,
                                List<Shape> indexedShapes, List<Shape> queryShapes, boolean havoc) throws IOException {
+
     //Main index loop:
     for (int i = 0; i < indexedShapes.size(); i++) {
       Shape shape = indexedShapes.get(i);
@@ -106,7 +100,7 @@
 
       //Search and verify results
       SpatialArgs args = new SpatialArgs(operation, queryShape);
-      Query query = strategy.makeQuery(args);
+      Query query = strategy.makeQuery(fieldTypes, args);
       SearchResults got = executeQuery(query, 100);
       Set<String> remainingExpectedIds = new LinkedHashSet<>(expectedIds);
       for (SearchResult result : got.results) {
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java
index c2f3528..6d86fbc 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java
@@ -107,12 +107,12 @@
   }
 
   private void checkHits(SpatialArgs args, int assertNumFound, int[] assertIds) {
-    SearchResults got = executeQuery(strategy.makeQuery(args), 100);
+    SearchResults got = executeQuery(strategy.makeQuery(fieldTypes, args), 100);
     assertEquals("" + args, assertNumFound, got.numFound);
     if (assertIds != null) {
       Set<Integer> gotIds = new HashSet<>();
       for (SearchResult result : got.results) {
-        gotIds.add(Integer.valueOf(result.document.get("id")));
+        gotIds.add(Integer.valueOf(result.document.getString("id")));
       }
       for (int assertId : assertIds) {
         assertTrue("has "+assertId,gotIds.contains(assertId));
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
index 97c2690..1f18be4 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
@@ -17,19 +17,16 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Shape;
+import java.io.IOException;
+import java.util.Arrays;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.spatial.SpatialTestCase;
 import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
 import org.apache.lucene.spatial.query.SpatialArgsParser;
 import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Arrays;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Shape;
 
 
 public class TestTermQueryPrefixGridStrategy extends SpatialTestCase {
@@ -41,12 +38,12 @@
 
     Shape point = ctx.makePoint(-118.243680, 34.052230);
 
-    Document losAngeles = new Document();
-    losAngeles.add(new StringField("name", "Los Angeles", Field.Store.YES));
-    for (Field field : prefixGridStrategy.createIndexableFields(point)) {
-      losAngeles.add(field);
-    }
-    losAngeles.add(new StoredField(prefixGridStrategy.getFieldName(), point.toString()));//just for diagnostics
+    Document losAngeles = indexWriter.newDocument();
+    losAngeles.addAtom("name", "Los Angeles");
+
+    fieldTypes.setMultiValued(prefixGridStrategy.getFieldName());
+    prefixGridStrategy.addFields(losAngeles, point);
+    losAngeles.addStoredString(prefixGridStrategy.getFieldName() + "_stored", point.toString());//just for diagnostics
 
     addDocumentsAndCommit(Arrays.asList(losAngeles));
 
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
index 89b0c72..fbd9a74 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTreeTest.java
@@ -17,14 +17,10 @@
  * limitations under the License.
  */
 
-import com.spatial4j.core.context.SpatialContext;
-import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
-import com.spatial4j.core.shape.Shape;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
@@ -34,9 +30,10 @@
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.junit.Before;
 import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
 
 public class SpatialPrefixTreeTest extends SpatialTestCase {
 
@@ -85,21 +82,18 @@
 
     trie = new QuadPrefixTree(ctx, 12);
     TermQueryPrefixTreeStrategy strategy = new TermQueryPrefixTreeStrategy(trie, "geo");
-    Document doc = new Document();
-    doc.add(new TextField("id", "1", Store.YES));
+    Document doc = indexWriter.newDocument();
+    doc.addAtom("id", "1");
 
     Shape area = ctx.makeRectangle(-122.82, -122.78, 48.54, 48.56);
 
-    Field[] fields = strategy.createIndexableFields(area, 0.025);
-    for (Field field : fields) {
-      doc.add(field);
-    }
+    strategy.addFields(doc, area, 0.025);
     addDocument(doc);
 
     Point upperleft = ctx.makePoint(-122.88, 48.54);
     Point lowerright = ctx.makePoint(-122.82, 48.62);
 
-    Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright)));
+    Query query = strategy.makeQuery(fieldTypes, new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright)));
 
     commit();
 
@@ -112,4 +106,4 @@
     assertEquals(1, search.totalHits);
   }
 
-}
\ No newline at end of file
+}
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/serialized/SerializedStrategyTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/serialized/SerializedStrategyTest.java
index 7ad5f2f..05cbf2c 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/serialized/SerializedStrategyTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/serialized/SerializedStrategyTest.java
@@ -47,7 +47,7 @@
   //called by StrategyTestCase; we can't let it call our makeQuery which will UOE ex.
   @Override
   protected Query makeQuery(SpatialTestQuery q) {
-    return new FilteredQuery(new MatchAllDocsQuery(), strategy.makeFilter(q.args),
+    return new FilteredQuery(new MatchAllDocsQuery(), strategy.makeFilter(fieldTypes, q.args),
         FilteredQuery.QUERY_FIRST_FILTER_STRATEGY);
   }
 
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java b/lucene/spatial/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
index e10fe7d..954c6fc 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
@@ -41,10 +41,11 @@
   }
 
   @Test
-  public void testCircleShapeSupport() {
+  public void testCircleShapeSupport() throws Exception {
+    adoc("1", ctx.makePoint(0, 0));
     Circle circle = ctx.makeCircle(ctx.makePoint(0, 0), 10);
     SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, circle);
-    Query query = this.strategy.makeQuery(args);
+    Query query = this.strategy.makeQuery(fieldTypes, args);
 
     assertNotNull(query);
   }
@@ -53,7 +54,7 @@
   public void testInvalidQueryShape() {
     Point point = ctx.makePoint(0, 0);
     SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, point);
-    this.strategy.makeQuery(args);
+    this.strategy.makeQuery(fieldTypes, args);
   }
 
   @Test
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
index 5171653..75955c8 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
@@ -23,9 +23,7 @@
 import java.util.List;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
@@ -373,7 +371,7 @@
       SuggestWord sugWord = new SuggestWord();
       for (int i = 0; i < stop; i++) {
 
-        sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word
+        sugWord.string = indexSearcher.doc(hits[i].doc).getString(F_WORD); // get orig word
 
         // don't suggest a word for itself, that would be silly
         if (sugWord.string.equals(word)) {
@@ -491,6 +489,23 @@
       ensureOpen();
       final Directory dir = this.spellIndex;
       final IndexWriter writer = new IndexWriter(dir, config);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      for(int ng=1;ng<=4;ng++) {
+        fieldTypes.disableStored("start" + ng);
+        fieldTypes.disableSorting("start" + ng);
+        fieldTypes.setMultiValued("start" + ng);
+
+        fieldTypes.disableStored("gram" + ng);
+        fieldTypes.disableFastRanges("gram" + ng);
+        fieldTypes.setIndexOptions("gram" + ng, IndexOptions.DOCS_AND_FREQS);
+        fieldTypes.disableSorting("gram" + ng);
+        fieldTypes.setMultiValued("gram" + ng);
+
+        fieldTypes.disableStored("end" + ng);
+        fieldTypes.disableSorting("end" + ng);
+        fieldTypes.setMultiValued("end" + ng);
+      }
+
       IndexSearcher indexSearcher = obtainSearcher();
       final List<TermsEnum> termsEnums = new ArrayList<>();
 
@@ -526,7 +541,7 @@
           }
   
           // ok index the word
-          Document doc = createDocument(word, getMin(len), getMax(len));
+          Document doc = createDocument(writer, word, getMin(len), getMax(len));
           writer.addDocument(doc);
         }
       } finally {
@@ -566,12 +581,12 @@
     return 2;
   }
 
-  private static Document createDocument(String text, int ng1, int ng2) {
-    Document doc = new Document();
+  private static Document createDocument(IndexWriter w, String text, int ng1, int ng2) {
+    Document doc = w.newDocument();
     // the word field is never queried on... it's indexed so it can be quickly
     // checked for rebuild (and stored for retrieval). Doesn't need norms or TF/pos
-    Field f = new StringField(F_WORD, text, Field.Store.YES);
-    doc.add(f); // orig term
+    // orig term
+    doc.addAtom(F_WORD, text);
     addGram(text, doc, ng1, ng2);
     return doc;
   }
@@ -583,23 +598,18 @@
       String end = null;
       for (int i = 0; i < len - ng + 1; i++) {
         String gram = text.substring(i, i + ng);
-        FieldType ft = new FieldType(StringField.TYPE_NOT_STORED);
-        ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-        Field ngramField = new Field(key, gram, ft);
         // spellchecker does not use positional queries, but we want freqs
         // for scoring these multivalued n-gram fields.
-        doc.add(ngramField);
+        doc.addAtom(key, gram);
         if (i == 0) {
           // only one term possible in the startXXField, TF/pos and norms aren't needed.
-          Field startField = new StringField("start" + ng, gram, Field.Store.NO);
-          doc.add(startField);
+          doc.addAtom("start" + ng, gram);
         }
         end = gram;
       }
       if (end != null) { // may not be present if len==ng1
         // only one term possible in the endXXField, TF/pos and norms aren't needed.
-        Field endField = new StringField("end" + ng, end, Field.Store.NO);
-        doc.add(endField);
+        doc.addAtom("end" + ng, end);
       }
     }
   }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java
index a3c8212..006dbf2 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentDictionary.java
@@ -18,15 +18,17 @@
  */
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -118,7 +120,7 @@
     private BytesRef currentPayload = null;
     private Set<BytesRef> currentContexts;
     private final NumericDocValues weightValues;
-    StorableField[] currentDocFields = new StorableField[0];
+    List<IndexableField> currentDocFields = new ArrayList<>();
     int nextFieldsPosition = 0;
 
     /**
@@ -143,9 +145,9 @@
     @Override
     public BytesRef next() throws IOException {
       while (true) {
-        if (nextFieldsPosition < currentDocFields.length) {
+        if (nextFieldsPosition < currentDocFields.size()) {
           // Still values left from the document
-          StorableField fieldValue =  currentDocFields[nextFieldsPosition++];
+          IndexableField fieldValue =  currentDocFields.get(nextFieldsPosition++);
           if (fieldValue.binaryValue() != null) {
             return fieldValue.binaryValue();
           } else if (fieldValue.stringValue() != null) {
@@ -165,13 +167,13 @@
           continue;
         }
 
-        StoredDocument doc = reader.document(currentDocId, relevantFields);
+        Document doc = reader.document(currentDocId, relevantFields);
 
         Set<BytesRef> tempContexts = new HashSet<>();
 
         BytesRef tempPayload;
         if (hasPayloads) {
-          StorableField payload = doc.getField(payloadField);
+          IndexableField payload = doc.getField(payloadField);
           if (payload == null) {
             continue;
           } else if (payload.binaryValue() != null) {
@@ -186,8 +188,7 @@
         }
 
         if (hasContexts) {
-          final StorableField[] contextFields = doc.getFields(contextsField);
-          for (StorableField contextField : contextFields) {
+          for (IndexableField contextField : doc.getFields(contextsField)) {
             if (contextField.binaryValue() != null) {
               tempContexts.add(contextField.binaryValue());
             } else if (contextField.stringValue() != null) {
@@ -200,10 +201,10 @@
 
         currentDocFields = doc.getFields(field);
         nextFieldsPosition = 0;
-        if (currentDocFields.length == 0) { // no values in this document
+        if (currentDocFields.size() == 0) { // no values in this document
           continue;
         }
-        StorableField fieldValue = currentDocFields[nextFieldsPosition++];
+        IndexableField fieldValue = currentDocFields.get(nextFieldsPosition++);
         BytesRef tempTerm;
         if (fieldValue.binaryValue() != null) {
           tempTerm = fieldValue.binaryValue();
@@ -239,8 +240,8 @@
      * or if it's indexed as {@link NumericDocValues} (using <code>docId</code>) for the document.
      * If no value is found, then the weight is 0.
      */
-    protected long getWeight(StoredDocument doc, int docId) {
-      StorableField weight = doc.getField(weightField);
+    protected long getWeight(Document doc, int docId) {
+      IndexableField weight = doc.getField(weightField);
       if (weight != null) { // found weight as stored
         return (weight.numericValue() != null) ? weight.numericValue().longValue() : 0;
       } else if (weightValues != null) {  // found weight as NumericDocValue
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
index 9eedbc6..cfa30bf 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
@@ -21,10 +21,10 @@
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 
@@ -135,7 +135,7 @@
      * by the <code>weightsValueSource</code>
      * */
     @Override
-    protected long getWeight(StoredDocument doc, int docId) {    
+    protected long getWeight(Document doc, int docId) {
       if (currentWeightValues == null) {
         return 0;
       }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java
index b38d54d..a36e247 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java
@@ -37,14 +37,8 @@
 import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FilterLeafReader;
@@ -104,9 +98,7 @@
  *  this suggester best applies when there is a strong
  *  a-priori ranking of all the suggestions.
  *
- *  <p>This suggester supports contexts, however the
- *  contexts must be valid utf8 (arbitrary binary terms will
- *  not work).
+ *  <p>This suggester supports arbitrary binary contexts.
  *
  * @lucene.experimental */    
 
@@ -115,6 +107,9 @@
   /** Field name used for the indexed text. */
   protected final static String TEXT_FIELD_NAME = "text";
 
+  /** Field name used for the binary doc values text. */
+  protected final static String BINARY_DV_TEXT_FIELD_NAME = "text_bdv";
+
   /** Field name used for the indexed text, as a
    *  StringField, for exact lookup. */
   protected final static String EXACT_TEXT_FIELD_NAME = "exacttext";
@@ -261,10 +256,9 @@
 
     boolean success = false;
     try {
-      // First pass: build a temporary normal Lucene index,
-      // just indexing the suggestions as they iterate:
       writer = new IndexWriter(dir,
                                getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE));
+      setFieldTypes(writer);
       //long t0 = System.nanoTime();
 
       // TODO: use threads?
@@ -332,6 +326,7 @@
       }
       writer = new IndexWriter(dir,
           getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE));
+      setFieldTypes(writer);
       searcherMgr = new SearcherManager(writer, true, null);
     }
   }
@@ -343,7 +338,7 @@
    *  see the suggestions in {@link #lookup} */
   public void add(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
     ensureOpen();
-    writer.addDocument(buildDocument(text, contexts, weight, payload));
+    writer.addDocument(buildDocument(writer, text, contexts, weight, payload));
   }
 
   /** Updates a previous suggestion, matching the exact same
@@ -356,27 +351,23 @@
   public void update(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
     ensureOpen();
     writer.updateDocument(new Term(EXACT_TEXT_FIELD_NAME, text.utf8ToString()),
-                          buildDocument(text, contexts, weight, payload));
+                          buildDocument(writer, text, contexts, weight, payload));
   }
 
-  private Document buildDocument(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
+  private Document buildDocument(IndexWriter writer, BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
     String textString = text.utf8ToString();
-    Document doc = new Document();
-    FieldType ft = getTextFieldType();
-    doc.add(new Field(TEXT_FIELD_NAME, textString, ft));
-    doc.add(new Field("textgrams", textString, ft));
-    doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO));
-    doc.add(new BinaryDocValuesField(TEXT_FIELD_NAME, text));
-    doc.add(new NumericDocValuesField("weight", weight));
+    Document doc = writer.newDocument();
+    doc.addLargeText(TEXT_FIELD_NAME, textString);
+    doc.addLargeText("textgrams", textString);
+    doc.addAtom(EXACT_TEXT_FIELD_NAME, textString);
+    doc.addBinary(BINARY_DV_TEXT_FIELD_NAME, text);
+    doc.addLong("weight", weight);
     if (payload != null) {
-      doc.add(new BinaryDocValuesField("payloads", payload));
+      doc.addBinary("payloads", payload);
     }
     if (contexts != null) {
       for(BytesRef context : contexts) {
-        // TODO: if we had a BinaryTermField we could fix
-        // this "must be valid ut8f" limitation:
-        doc.add(new StringField(CONTEXTS_FIELD_NAME, context.utf8ToString(), Field.Store.NO));
-        doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context));
+        doc.addAtom(CONTEXTS_FIELD_NAME, context);
       }
     }
     return doc;
@@ -396,12 +387,25 @@
    * Subclass can override this method to change the field type of the text field
    * e.g. to change the index options
    */
-  protected FieldType getTextFieldType(){
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS);
-    ft.setOmitNorms(true);
+  protected void setFieldTypes(IndexWriter writer) {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableHighlighting(TEXT_FIELD_NAME);
+    fieldTypes.disableNorms(TEXT_FIELD_NAME);
+    fieldTypes.disableStored(TEXT_FIELD_NAME);
+    fieldTypes.setIndexOptions(TEXT_FIELD_NAME, IndexOptions.DOCS);
 
-    return ft;
+    fieldTypes.disableStored(EXACT_TEXT_FIELD_NAME);
+
+    fieldTypes.disableHighlighting("textgrams");
+    fieldTypes.disableNorms("textgrams");
+    fieldTypes.disableStored("textgrams");
+    fieldTypes.setIndexOptions("textgrams", IndexOptions.DOCS);
+
+    fieldTypes.disableSorting(BINARY_DV_TEXT_FIELD_NAME);
+    fieldTypes.disableSorting("payloads");
+    fieldTypes.setIndexOptions("weight", IndexOptions.NONE);
+    fieldTypes.setMultiValued(CONTEXTS_FIELD_NAME);
+    fieldTypes.disableStored(CONTEXTS_FIELD_NAME);
   }
 
   @Override
@@ -518,7 +522,7 @@
         // do not make a subquery if all context booleans are must not
         if (allMustNot == true) {
           for (Map.Entry<BytesRef, BooleanClause.Occur> entry : contextInfo.entrySet()) {
-            query.add(new TermQuery(new Term(CONTEXTS_FIELD_NAME, entry.getKey().utf8ToString())), BooleanClause.Occur.MUST_NOT);
+            query.add(new TermQuery(new Term(CONTEXTS_FIELD_NAME, entry.getKey())), BooleanClause.Occur.MUST_NOT);
           }
 
         } else {
@@ -532,7 +536,7 @@
 
             // TODO: if we had a BinaryTermField we could fix
             // this "must be valid ut8f" limitation:
-            sub.add(new TermQuery(new Term(CONTEXTS_FIELD_NAME, entry.getKey().utf8ToString())), entry.getValue());
+            sub.add(new TermQuery(new Term(CONTEXTS_FIELD_NAME, entry.getKey())), entry.getValue());
           }
         }
       }
@@ -588,8 +592,8 @@
                                              boolean doHighlight, Set<String> matchedTokens, String prefixToken)
       throws IOException {
 
-    BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME);
-
+    BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), BINARY_DV_TEXT_FIELD_NAME);
+    
     // This will just be null if app didn't pass payloads to build():
     // TODO: maybe just stored fields?  they compress...
     BinaryDocValues payloadsDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads");
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index e49a886..473f0ce 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -26,11 +26,11 @@
 import java.util.TreeSet;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -157,14 +157,29 @@
   }
 
   @Override
-  protected FieldType getTextFieldType() {
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(true);
-    ft.setOmitNorms(true);
+  protected void setFieldTypes(IndexWriter writer) {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableHighlighting(TEXT_FIELD_NAME);
+    fieldTypes.disableNorms(TEXT_FIELD_NAME);
+    fieldTypes.disableStored(TEXT_FIELD_NAME);
+    fieldTypes.setIndexOptions(TEXT_FIELD_NAME, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.enableTermVectors(TEXT_FIELD_NAME);
+    fieldTypes.enableTermVectorPositions(TEXT_FIELD_NAME);
 
-    return ft;
+    fieldTypes.disableStored(EXACT_TEXT_FIELD_NAME);
+
+    fieldTypes.disableHighlighting("textgrams");
+    fieldTypes.disableNorms("textgrams");
+    fieldTypes.disableStored("textgrams");
+    fieldTypes.setIndexOptions("textgrams", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.enableTermVectors("textgrams");
+    fieldTypes.enableTermVectorPositions("textgrams");
+
+    fieldTypes.disableSorting(BINARY_DV_TEXT_FIELD_NAME);
+    fieldTypes.disableSorting("payloads");
+    fieldTypes.setIndexOptions("weight", IndexOptions.NONE);
+    fieldTypes.setMultiValued(CONTEXTS_FIELD_NAME);
+    fieldTypes.disableStored(CONTEXTS_FIELD_NAME);
   }
 
   @Override
@@ -172,8 +187,7 @@
                                                     boolean doHighlight, Set<String> matchedTokens, String prefixToken)
       throws IOException {
 
-    BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME);
-    assert textDV != null;
+    BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), BINARY_DV_TEXT_FIELD_NAME);
 
     // This will just be null if app didn't pass payloads to build():
     // TODO: maybe just stored fields?  they compress...
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
index 66b2ffd..352efd4 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
@@ -42,9 +42,7 @@
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
@@ -313,15 +311,10 @@
     iwc.setRAMBufferSizeMB(ramBufferSizeMB);
     IndexWriter writer = new IndexWriter(dir, iwc);
 
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    // TODO: if only we had IndexOptions.TERMS_ONLY...
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
-    ft.setOmitNorms(true);
-    ft.freeze();
-
-    Document doc = new Document();
-    Field field = new Field("body", "", ft);
-    doc.add(field);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableHighlighting("body");
+    fieldTypes.disableNorms("body");
+    fieldTypes.setIndexOptions("body", IndexOptions.DOCS_AND_FREQS);
 
     totTokens = 0;
     IndexReader reader = null;
@@ -334,7 +327,8 @@
         if (surfaceForm == null) {
           break;
         }
-        field.setStringValue(surfaceForm.utf8ToString());
+        Document doc = writer.newDocument();
+        doc.addLargeText("body", surfaceForm.utf8ToString());
         writer.addDocument(doc);
         count++;
       }
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java
index 647e298..c2662a7 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java
@@ -20,7 +20,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -38,8 +37,8 @@
 
     String[] termsToAdd = { "metanoia", "metanoian", "metanoiai", "metanoias", "metanoi𐑍" };
     for (int i = 0; i < termsToAdd.length; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("repentance", termsToAdd[i], Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("repentance", termsToAdd[i]);
       writer.addDocument(doc);
     }
 
@@ -67,8 +66,8 @@
         new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
 
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
       writer.addDocument(doc);
     }
 
@@ -107,8 +106,8 @@
 
     // add some more documents
     for (int i = 1000; i < 1100; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
       writer.addDocument(doc);
     }
 
@@ -131,14 +130,20 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, 
         new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
 
-    Document doc = new Document();
-    doc.add(newTextField("text", "foobar", Field.Store.NO));
+    Document doc = writer.newDocument();
+    doc.addLargeText("text", "foobar");
     writer.addDocument(doc);
-    doc.add(newTextField("text", "foobar", Field.Store.NO));
+
+    doc = writer.newDocument();
+    doc.addLargeText("text", "foobar");
     writer.addDocument(doc);
-    doc.add(newTextField("text", "foobaz", Field.Store.NO));
+
+    doc = writer.newDocument();
+    doc.addLargeText("text", "foobaz");
     writer.addDocument(doc);
-    doc.add(newTextField("text", "fobar", Field.Store.NO));
+
+    doc = writer.newDocument();
+    doc.addLargeText("text", "fobar");
     writer.addDocument(doc);
    
     IndexReader ir = writer.getReader();
@@ -199,8 +204,8 @@
         new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
 
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
       writer.addDocument(doc);
     }
 
@@ -223,8 +228,8 @@
         new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
 
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
       writer.addDocument(doc);
     }
 
@@ -248,8 +253,8 @@
         new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
 
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
       writer.addDocument(doc);
     }
 
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
index 169246a..04081b7 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
@@ -22,7 +22,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -53,24 +52,24 @@
 
     Document doc;
 
-    doc = new  Document();
-    doc.add(newTextField("aaa", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("aaa", "foo");
     writer.addDocument(doc);
 
-    doc = new  Document();
-    doc.add(newTextField("aaa", "foo", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("aaa", "foo");
     writer.addDocument(doc);
 
-    doc = new  Document();
-    doc.add(newTextField("contents", "Tom", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "Tom");
     writer.addDocument(doc);
 
-    doc = new  Document();
-    doc.add(newTextField("contents", "Jerry", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("contents", "Jerry");
     writer.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("zzz", "bar", Field.Store.YES));
+    doc = writer.newDocument();
+    doc.addLargeText("zzz", "bar");
     writer.addDocument(doc);
 
     writer.forceMerge(1);
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
index 70e8306..cf7379f 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
@@ -29,7 +29,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -58,28 +57,28 @@
     IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig(new MockAnalyzer(random())));
 
     for (int i = 0; i < 1000; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("field1", English.intToEnglish(i), Field.Store.YES));
-      doc.add(newTextField("field2", English.intToEnglish(i + 1), Field.Store.YES)); // + word thousand
-      doc.add(newTextField("field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES)); // + word thousand
+      Document doc = writer.newDocument();
+      doc.addLargeText("field1", English.intToEnglish(i));
+      doc.addLargeText("field2", English.intToEnglish(i + 1)); // + word thousand
+      doc.addLargeText("field3", "fvei" + (i % 2 == 0 ? " five" : "")); // + word thousand
       writer.addDocument(doc);
     }
     {
-      Document doc = new Document();
-      doc.add(newTextField("field1", "eight", Field.Store.YES)); // "eight" in
+      Document doc = writer.newDocument();
+      doc.addLargeText("field1", "eight"); // "eight" in
                                                                    // the index
                                                                    // twice
       writer.addDocument(doc);
     }
     {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       doc
-          .add(newTextField("field1", "twenty-one twenty-one", Field.Store.YES)); // "twenty-one" in the index thrice
+          .addLargeText("field1", "twenty-one twenty-one"); // "twenty-one" in the index thrice
       writer.addDocument(doc);
     }
     {
-      Document doc = new Document();
-      doc.add(newTextField("field1", "twenty", Field.Store.YES)); // "twenty"
+      Document doc = writer.newDocument();
+      doc.addLargeText("field1", "twenty"); // "twenty"
                                                                     // in the
                                                                     // index
                                                                     // twice
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java
index de79db1..aaaf8f2 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java
@@ -26,7 +26,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -47,25 +46,25 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true));
 
     for (int i = 900; i < 1112; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       String num = English.intToEnglish(i).replaceAll("[-]", " ").replaceAll("[,]", "");
-      doc.add(newTextField("numbers", num, Field.Store.NO));
+      doc.addLargeText("numbers", num);
       writer.addDocument(doc);
     }
     
     {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", "thou hast sand betwixt thy toes", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", "thou hast sand betwixt thy toes");
       writer.addDocument(doc);
     }
     {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", "hundredeight eightyeight yeight", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", "hundredeight eightyeight yeight");
       writer.addDocument(doc);
     }
     {
-      Document doc = new Document();
-      doc.add(newTextField("numbers", "tres y cinco", Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addLargeText("numbers", "tres y cinco");
       writer.addDocument(doc);
     }
     
@@ -293,10 +292,9 @@
         broken[0] = orig.substring(0, breakAt);
         broken[1] = orig.substring(breakAt);
         breaks.add(broken);
-        Document doc = new Document();
-        doc.add(newTextField("random_break", broken[0] + " " + broken[1],
-            Field.Store.NO));
-        doc.add(newTextField("random_combine", orig, Field.Store.NO));
+        Document doc = writer.newDocument();
+        doc.addLargeText("random_break", broken[0] + " " + broken[1]);
+        doc.addLargeText("random_combine", orig);
         writer.addDocument(doc);
       }
       writer.commit();
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
index 2ebf115..fa156c2 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
@@ -13,15 +13,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.store.Directory;
@@ -54,18 +51,23 @@
   static final String CONTEXT_FIELD_NAME = "c1";
   
   /** Returns Pair(list of invalid document terms, Map of document term -&gt; document) */
-  private Map.Entry<List<String>, Map<String, Document>> generateIndexDocuments(int ndocs, boolean requiresPayload, boolean requiresContexts) {
+  private Map.Entry<List<String>, Map<String, Document>> generateIndexDocuments(RandomIndexWriter writer, int ndocs, boolean requiresPayload, boolean requiresContexts) {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued(CONTEXT_FIELD_NAME);
+
     Map<String, Document> docs = new HashMap<>();
     List<String> invalidDocTerms = new ArrayList<>();
+    boolean useStoredFieldWeights = rarely();
     for(int i = 0; i < ndocs ; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       boolean invalidDoc = false;
-      Field field = null;
       // usually have valid term field in document
+      String term;
       if (usually()) {
-        field = new TextField(FIELD_NAME, "field_" + i, Field.Store.YES);
-        doc.add(field);
+        term = "field_" + i;
+        doc.addLargeText(FIELD_NAME, term);
       } else {
+        term = "invalid_" + i;
         invalidDoc = true;
       }
       
@@ -73,8 +75,7 @@
       if (requiresPayload || usually()) {
         // usually have valid payload field in document
         if (usually()) {
-          Field payload = new StoredField(PAYLOAD_FIELD_NAME, new BytesRef("payload_" + i));
-          doc.add(payload);
+          doc.addStoredBinary(PAYLOAD_FIELD_NAME, new BytesRef("payload_" + i));
         } else if (requiresPayload) {
           invalidDoc = true;
         }
@@ -83,7 +84,7 @@
       if (requiresContexts || usually()) {
         if (usually()) {
           for (int j = 0; j < atLeast(2); j++) {
-            doc.add(new StoredField(CONTEXT_FIELD_NAME, new BytesRef("context_" + i + "_"+ j)));
+            doc.addStoredBinary(CONTEXT_FIELD_NAME, new BytesRef("context_" + i + "_"+ j));
           }
         }
         // we should allow entries without context
@@ -91,18 +92,15 @@
       
       // usually have valid weight field in document
       if (usually()) {
-        Field weight = (rarely()) ? 
-            new StoredField(WEIGHT_FIELD_NAME, 100d + i) : 
-            new NumericDocValuesField(WEIGHT_FIELD_NAME, 100 + i);
-        doc.add(weight);
+        if (useStoredFieldWeights) {
+          doc.addStoredDouble(WEIGHT_FIELD_NAME, 100d + i);
+        } else {
+          doc.addLong(WEIGHT_FIELD_NAME, 100 + i);
+        }
       }
       
-      String term = null;
       if (invalidDoc) {
-        term = (field!=null) ? field.stringValue() : "invalid_" + i;
         invalidDocTerms.add(term);
-      } else {
-        term = field.stringValue();
       }
       
       docs.put(term, doc);
@@ -137,7 +135,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(atLeast(1000), true, false);
+    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(writer, atLeast(1000), true, false);
     Map<String, Document> docs = res.getValue();
     List<String> invalidDocTerms = res.getKey();
     for(Document doc: docs.values()) {
@@ -151,8 +149,8 @@
     BytesRef f;
     while((f = inputIterator.next())!=null) {
       Document doc = docs.remove(f.utf8ToString());
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
-      Field weightField = doc.getField(WEIGHT_FIELD_NAME);
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
+      IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME);
       assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0);
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
     }
@@ -172,7 +170,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(atLeast(1000), false, false);
+    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(writer, atLeast(1000), false, false);
     Map<String, Document> docs = res.getValue();
     List<String> invalidDocTerms = res.getKey();
     for(Document doc: docs.values()) {
@@ -186,8 +184,8 @@
     BytesRef f;
     while((f = inputIterator.next())!=null) {
       Document doc = docs.remove(f.utf8ToString());
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
-      Field weightField = doc.getField(WEIGHT_FIELD_NAME);
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
+      IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME);
       assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0);
       assertEquals(inputIterator.payload(), null);
     }
@@ -208,7 +206,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(atLeast(1000), true, true);
+    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(writer, atLeast(1000), true, true);
     Map<String, Document> docs = res.getValue();
     List<String> invalidDocTerms = res.getKey();
     for(Document doc: docs.values()) {
@@ -222,13 +220,13 @@
     BytesRef f;
     while((f = inputIterator.next())!=null) {
       Document doc = docs.remove(f.utf8ToString());
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
-      Field weightField = doc.getField(WEIGHT_FIELD_NAME);
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
+      IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME);
       assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0);
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
       Set<BytesRef> oriCtxs = new HashSet<>();
       Set<BytesRef> contextSet = inputIterator.contexts();
-      for (StorableField ctxf : doc.getFields(CONTEXT_FIELD_NAME)) {
+      for (IndexableField ctxf : doc.getFields(CONTEXT_FIELD_NAME)) {
         oriCtxs.add(ctxf.binaryValue());
       }
       assertEquals(oriCtxs.size(), contextSet.size());
@@ -249,15 +247,15 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(atLeast(1000), false, false);
+    Map.Entry<List<String>, Map<String, Document>> res = generateIndexDocuments(writer, atLeast(1000), false, false);
     Map<String, Document> docs = res.getValue();
     List<String> invalidDocTerms = res.getKey();
     Random rand = random();
     List<String> termsToDel = new ArrayList<>();
     for(Document doc : docs.values()) {
-      StorableField f = doc.getField(FIELD_NAME);
+      IndexableField f = doc.getField(FIELD_NAME);
       if(rand.nextBoolean() && f != null && !invalidDocTerms.contains(f.stringValue())) {
-        termsToDel.add(doc.get(FIELD_NAME));
+        termsToDel.add(doc.getString(FIELD_NAME));
       }
       writer.addDocument(doc);
     }
@@ -285,8 +283,8 @@
     BytesRef f;
     while((f = inputIterator.next())!=null) {
       Document doc = docs.remove(f.utf8ToString());
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
-      Field weightField = doc.getField(WEIGHT_FIELD_NAME);
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
+      IndexableField weightField = doc.getField(WEIGHT_FIELD_NAME);
       assertEquals(inputIterator.weight(), (weightField != null) ? weightField.numericValue().longValue() : 0);
       assertEquals(inputIterator.payload(), null);
     }
@@ -331,38 +329,35 @@
 
   private List<Suggestion> indexMultiValuedDocuments(int numDocs, RandomIndexWriter writer) throws IOException {
     List<Suggestion> suggestionList = new ArrayList<>(numDocs);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued(FIELD_NAME);
 
     for(int i=0; i<numDocs; i++) {
-      Document doc = new Document();
-      Field field;
+      Document doc = writer.newDocument();
       BytesRef payloadValue;
       Set<BytesRef> contextValues = new HashSet<>();
       long numericValue = -1; //-1 for missing weight
       BytesRef term;
 
       payloadValue = new BytesRef("payload_" + i);
-      field = new StoredField(PAYLOAD_FIELD_NAME, payloadValue);
-      doc.add(field);
+      doc.addStoredBinary(PAYLOAD_FIELD_NAME, payloadValue);
 
       if (usually()) {
         numericValue = 100 + i;
-        field = new NumericDocValuesField(WEIGHT_FIELD_NAME, numericValue);
-        doc.add(field);
+        doc.addLong(WEIGHT_FIELD_NAME, numericValue);
       }
 
       int numContexts = atLeast(1);
       for (int j=0; j<numContexts; j++) {
         BytesRef contextValue = new BytesRef("context_" + i + "_" + j);
-        field = new StoredField(CONTEXT_FIELD_NAME, contextValue);
-        doc.add(field);
+        doc.addStoredBinary(CONTEXT_FIELD_NAME, contextValue);
         contextValues.add(contextValue);
       }
 
       int numSuggestions = atLeast(2);
       for (int j=0; j<numSuggestions; j++) {
         term = new BytesRef("field_" + i + "_" + j);
-        field = new StoredField(FIELD_NAME, term);
-        doc.add(field);
+        doc.addStoredBinary(FIELD_NAME, term);
 
         Suggestion suggestionValue = new Suggestion();
         suggestionValue.payload = payloadValue;
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
index 0bacfbb..e30d65c 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
@@ -28,13 +28,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
@@ -56,27 +54,22 @@
   static final String PAYLOAD_FIELD_NAME = "p1";
   static final String CONTEXTS_FIELD_NAME = "c1";
 
-  private Map<String, Document> generateIndexDocuments(int ndocs) {
+  private Map<String, Document> generateIndexDocuments(RandomIndexWriter writer, int ndocs) {
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued(CONTEXTS_FIELD_NAME);
     Map<String, Document> docs = new HashMap<>();
     for(int i = 0; i < ndocs ; i++) {
-      Field field = new TextField(FIELD_NAME, "field_" + i, Field.Store.YES);
-      Field payload = new StoredField(PAYLOAD_FIELD_NAME, new BytesRef("payload_" + i));
-      Field weight1 = new NumericDocValuesField(WEIGHT_FIELD_NAME_1, 10 + i);
-      Field weight2 = new NumericDocValuesField(WEIGHT_FIELD_NAME_2, 20 + i);
-      Field weight3 = new NumericDocValuesField(WEIGHT_FIELD_NAME_3, 30 + i);
-      Field contexts = new StoredField(CONTEXTS_FIELD_NAME, new BytesRef("ctx_"  + i + "_0"));
-      Document doc = new Document();
-      doc.add(field);
-      doc.add(payload);
-      doc.add(weight1);
-      doc.add(weight2);
-      doc.add(weight3);
-      doc.add(contexts);
+      Document doc = writer.newDocument();
+      doc.addLargeText(FIELD_NAME, "field_" + i);
+      doc.addStoredBinary(PAYLOAD_FIELD_NAME, new BytesRef("payload_" + i));
+      doc.addInt(WEIGHT_FIELD_NAME_1, 10 + i);
+      doc.addInt(WEIGHT_FIELD_NAME_2, 20 + i);
+      doc.addInt(WEIGHT_FIELD_NAME_3, 30 + i);
+      doc.addStoredBinary(CONTEXTS_FIELD_NAME, new BytesRef("ctx_"  + i + "_0"));
       for(int j = 1; j < atLeast(3); j++) {
-        contexts.setBytesValue(new BytesRef("ctx_" + i + "_" + j));
-        doc.add(contexts);
+        doc.addStoredBinary(CONTEXTS_FIELD_NAME, new BytesRef("ctx_" + i + "_" + j));
       }
-      docs.put(field.stringValue(), doc);
+      docs.put("field_" + i, doc);
     }
     return docs;
   }
@@ -108,7 +101,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Map<String, Document> docs = generateIndexDocuments(writer, atLeast(100));
     for(Document doc: docs.values()) {
       writer.addDocument(doc);
     }
@@ -125,7 +118,7 @@
       long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
       long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
       long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
       assertEquals(inputIterator.weight(), (w1 + w2 + w3));
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
     }
@@ -140,7 +133,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Map<String, Document> docs = generateIndexDocuments(writer, atLeast(100));
     for(Document doc: docs.values()) {
       writer.addDocument(doc);
     }
@@ -157,11 +150,11 @@
       long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
       long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
       long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
       assertEquals(inputIterator.weight(), (w1 + w2 + w3));
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
       Set<BytesRef> originalCtxs = new HashSet<>();
-      for (Field ctxf: doc.getFields(CONTEXTS_FIELD_NAME)) {
+      for (IndexableField ctxf: doc.getFields(CONTEXTS_FIELD_NAME)) {
         originalCtxs.add(ctxf.binaryValue());
       }
       assertEquals(originalCtxs, inputIterator.contexts());
@@ -177,7 +170,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Map<String, Document> docs = generateIndexDocuments(writer, atLeast(100));
     for(Document doc: docs.values()) {
       writer.addDocument(doc);
     }
@@ -194,7 +187,7 @@
       long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
       long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
       long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
       assertEquals(inputIterator.weight(), (w1 + w2 + w3));
       assertEquals(inputIterator.payload(), null);
     }
@@ -209,12 +202,12 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Map<String, Document> docs = generateIndexDocuments(writer, atLeast(100));
     Random rand = random();
     List<String> termsToDel = new ArrayList<>();
     for(Document doc : docs.values()) {
       if(rand.nextBoolean() && termsToDel.size() < docs.size()-1) {
-        termsToDel.add(doc.get(FIELD_NAME));
+        termsToDel.add(doc.getString(FIELD_NAME));
       }
       writer.addDocument(doc);
     }
@@ -247,7 +240,7 @@
       Document doc = docs.remove(f.utf8ToString());
       long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
       long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
       assertEquals(inputIterator.weight(), w2+w1);
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
     }
@@ -263,7 +256,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Map<String, Document> docs = generateIndexDocuments(writer, atLeast(100));
     for(Document doc: docs.values()) {
       writer.addDocument(doc);
     }
@@ -276,7 +269,7 @@
     BytesRef f;
     while((f = inputIterator.next())!=null) {
       Document doc = docs.remove(f.utf8ToString());
-      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertTrue(f.equals(new BytesRef(doc.getString(FIELD_NAME))));
       assertEquals(inputIterator.weight(), 10);
       assertTrue(inputIterator.payload().equals(doc.getField(PAYLOAD_FIELD_NAME).binaryValue()));
     }
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
index c8fc92d..2717286 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
@@ -922,10 +922,64 @@
     for(String value : values) {
       result.add(new BytesRef(value));
     }
-
     return result;
   }
 
+  private Set<BytesRef> asSet(byte[]... values) {
+    HashSet<BytesRef> result = new HashSet<>();
+    for(byte[] value : values) {
+      result.add(new BytesRef(value));
+    }
+    return result;
+  }
+
+  public void testBinaryContext() throws Exception {
+    Input keys[] = new Input[] {
+      new Input("lend me your ear", 8, new BytesRef("foobar"), asSet(new byte[1], new byte[4])),
+      new Input("a penny saved is a penny earned", 10, new BytesRef("foobaz"), asSet(new byte[1], new byte[3]))
+    };
+
+    Path tempDir = createTempDir();
+
+    for(int iter=0;iter<2;iter++) {
+      AnalyzingInfixSuggester suggester;
+      Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
+      if (iter == 0) {
+        suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
+        suggester.build(new InputArrayIterator(keys));
+      } else {
+        // Test again, after close/reopen:
+        suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
+      }
+
+      // Both have new byte[1] context:
+      List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), asSet(new byte[1]), 10, true, true);
+      assertEquals(2, results.size());
+
+      LookupResult result = results.get(0);
+      assertEquals("a penny saved is a penny earned", result.key);
+      assertEquals("a penny saved is a penny <b>ear</b>ned", result.highlightKey);
+      assertEquals(10, result.value);
+      assertEquals(new BytesRef("foobaz"), result.payload);
+      assertNotNull(result.contexts);
+      assertEquals(2, result.contexts.size());
+      assertTrue(result.contexts.contains(new BytesRef(new byte[1])));
+      assertTrue(result.contexts.contains(new BytesRef(new byte[3])));
+
+      result = results.get(1);
+      assertEquals("lend me your ear", result.key);
+      assertEquals("lend me your <b>ear</b>", result.highlightKey);
+      assertEquals(8, result.value);
+      assertEquals(new BytesRef("foobar"), result.payload);
+      assertNotNull(result.contexts);
+      assertEquals(2, result.contexts.size());
+      assertTrue(result.contexts.contains(new BytesRef(new byte[1])));
+      assertTrue(result.contexts.contains(new BytesRef(new byte[4])));
+
+      suggester.close();
+    }
+  }
+
   // LUCENE-5528
   public void testBasicContext() throws Exception {
     Input keys[] = new Input[] {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
index 8393759..4458212 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
@@ -47,13 +47,11 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.search.suggest.Lookup.LookupResult;
 import org.apache.lucene.search.suggest.Input;
 import org.apache.lucene.search.suggest.InputArrayIterator;
-import org.apache.lucene.util.AttributeFactory;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LineFileDocsText;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -159,14 +157,13 @@
   }
   
   public void testRandomRealisticKeys() throws IOException {
-    LineFileDocs lineFile = new LineFileDocs(random());
+    LineFileDocsText lineFile = new LineFileDocsText(random());
     Map<String, Long> mapping = new HashMap<>();
     List<Input> keys = new ArrayList<>();
     
     int howMany = atLeast(100); // this might bring up duplicates
     for (int i = 0; i < howMany; i++) {
-      Document nextDoc = lineFile.nextDoc();
-      String title = nextDoc.getField("title").stringValue();
+      String title = lineFile.nextDoc().title;
       int randomWeight = random().nextInt(100);
       keys.add(new Input(title, randomWeight));
       if (!mapping.containsKey(title) || mapping.get(title) < randomWeight) {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
index 84bdf2b..2a24ab0 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
@@ -39,10 +39,10 @@
 import org.apache.lucene.analysis.core.StopFilter;
 import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.search.suggest.Lookup.LookupResult;
 import org.apache.lucene.search.suggest.Input;
 import org.apache.lucene.search.suggest.InputArrayIterator;
 import org.apache.lucene.search.suggest.InputIterator;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase;
@@ -131,7 +131,7 @@
 
   @Ignore
   public void testWiki() throws Exception {
-    final LineFileDocs lfd = new LineFileDocs(null, "/lucenedata/enwiki/enwiki-20120502-lines-1k.txt", false);
+    final LineFileDocs lfd = new LineFileDocs(null, null, "/lucenedata/enwiki/enwiki-20120502-lines-1k.txt");
     // Skip header:
     lfd.nextDoc();
     FreeTextSuggester sug = new FreeTextSuggester(new MockAnalyzer(random()));
@@ -158,7 +158,7 @@
           if (count++ == 10000) {
             return null;
           }
-          return new BytesRef(doc.get("body"));
+          return new BytesRef(doc.getString("body"));
         }
 
         @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
index 93a07a5..9806efb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
@@ -31,9 +31,7 @@
 
 import org.apache.lucene.analysis.tokenattributes.*;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
@@ -41,7 +39,7 @@
 import org.apache.lucene.util.AttributeFactory;
 import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LineFileDocsText;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Rethrow;
 import org.apache.lucene.util.TestUtil;
@@ -546,37 +544,54 @@
 
   private static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter, boolean simple, boolean offsetsAreCorrect, RandomIndexWriter iw) throws IOException {
 
-    final LineFileDocs docs = new LineFileDocs(random);
-    Document doc = null;
-    Field field = null, currentField = null;
     StringReader bogus = new StringReader("");
+    Document doc = null;
+    final LineFileDocsText docs = new LineFileDocsText(random);
     if (iw != null) {
-      doc = new Document();
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
+      FieldTypes fieldTypes = iw.getFieldTypes();
+      doc = iw.newDocument();
+
+      // Randomize how we index the field:
       if (random.nextBoolean()) {
-        ft.setStoreTermVectors(true);
-        ft.setStoreTermVectorOffsets(random.nextBoolean());
-        ft.setStoreTermVectorPositions(random.nextBoolean());
-        if (ft.storeTermVectorPositions()) {
-          ft.setStoreTermVectorPayloads(random.nextBoolean());
+        fieldTypes.enableTermVectors("dummy");
+        if (random().nextBoolean()) {
+          fieldTypes.enableTermVectorOffsets("dummy");
+        }
+        if (random().nextBoolean()) {
+          fieldTypes.enableTermVectorPositions("dummy");
+          if (random().nextBoolean()) {
+            fieldTypes.enableTermVectorPayloads("dummy");
+          }
         }
       }
+
       if (random.nextBoolean()) {
-        ft.setOmitNorms(true);
+        fieldTypes.disableNorms("dummy");
       }
+
+      IndexOptions indexOptions;
+
       switch(random.nextInt(4)) {
-        case 0: ft.setIndexOptions(IndexOptions.DOCS); break;
-        case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break;
-        case 2: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break;
-        default:
-          if (offsetsAreCorrect) {
-            ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-          } else {
-            ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-          }
+      case 0:
+        indexOptions = IndexOptions.DOCS;
+        break;
+      case 1:
+        indexOptions = IndexOptions.DOCS_AND_FREQS;
+        break;
+      case 2:
+        indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        break;
+      default:
+        if (offsetsAreCorrect) {
+          indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+        } else {
+          indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        }
       }
-      currentField = field = new Field("dummy", bogus, ft);
-      doc.add(currentField);
+      fieldTypes.disableHighlighting("dummy");
+      fieldTypes.disableStored("dummy");
+      fieldTypes.setIndexOptions("dummy", indexOptions);
+      fieldTypes.setMultiValued("dummy");
     }
     
     try {
@@ -585,7 +600,7 @@
         
         if (random.nextInt(10) == 7) {
           // real data from linedocs
-          text = docs.nextDoc().get("body");
+          text = docs.nextDoc().body;
           if (text.length() > maxWordLength) {
             
             // Take a random slice from the text...:
@@ -608,22 +623,11 @@
         }
         
         try {
-          checkAnalysisConsistency(random, a, useCharFilter, text, offsetsAreCorrect, currentField);
-          if (iw != null) {
-            if (random.nextInt(7) == 0) {
-              // pile up a multivalued field
-              FieldType ft = field.fieldType();
-              currentField = new Field("dummy", bogus, ft);
-              doc.add(currentField);
-            } else {
-              iw.addDocument(doc);
-              if (doc.getFields().size() > 1) {
-                // back to 1 field
-                currentField = field;
-                doc.removeFields("dummy");
-                doc.add(currentField);
-              }
-            }
+          checkAnalysisConsistency(random, a, useCharFilter, text, offsetsAreCorrect, doc);
+          // We randomly accumulate multiple values for this field:
+          if (iw != null && random.nextInt(7) != 0) {
+            iw.addDocument(doc);
+            doc = iw.newDocument();
           }
         } catch (Throwable t) {
           // TODO: really we should pass a random seed to
@@ -674,7 +678,7 @@
     checkAnalysisConsistency(random, a, useCharFilter, text, offsetsAreCorrect, null);
   }
   
-  private static void checkAnalysisConsistency(Random random, Analyzer a, boolean useCharFilter, String text, boolean offsetsAreCorrect, Field field) throws IOException {
+  private static void checkAnalysisConsistency(Random random, Analyzer a, boolean useCharFilter, String text, boolean offsetsAreCorrect, Document doc) throws IOException {
 
     if (VERBOSE) {
       System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: get first token stream now text=" + text);
@@ -868,7 +872,7 @@
                                 tokens.toArray(new String[tokens.size()]));
     }
     
-    if (field != null) {
+    if (doc != null) {
       reader = new StringReader(text);
       random = new Random(seed);
       if (random.nextInt(30) == 7) {
@@ -878,8 +882,7 @@
 
         reader = new MockReaderWrapper(random, reader);
       }
-
-      field.setReaderValue(useCharFilter ? new MockCharFilter(reader, remainder) : reader);
+      doc.addLargeText("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader);
     }
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
index e4e0877..fde5c34 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
@@ -24,15 +24,11 @@
 
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -62,9 +58,9 @@
                                             BytesRef secondEnd) throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
-    Document doc = new Document();
-    doc.add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
-    doc.add(new StringField("body", "body", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "\u0633\u0627\u0628");
+    doc.addAtom("body", "body");
     writer.addDocument(doc);
     writer.close();
     IndexReader reader = DirectoryReader.open(dir);
@@ -93,13 +89,13 @@
                                             BytesRef secondEnd) throws Exception {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
-    Document doc = new Document();
+    Document doc = writer.newDocument();
 
     // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
     // orders the U+0698 character before the U+0633 character, so the single
     // index Term below should NOT be returned by a TermRangeQuery with a Farsi
     // Collator (or an Arabic one for the case when Farsi is not supported).
-    doc.add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
+    doc.addLargeText("content", "\u0633\u0627\u0628");
     writer.addDocument(doc);
     writer.close();
     IndexReader reader = DirectoryReader.open(dir);
@@ -121,9 +117,9 @@
 
     Directory farsiIndex = newDirectory();
     IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(analyzer));
-    Document doc = new Document();
-    doc.add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
-    doc.add(new StringField("body", "body", Field.Store.YES));
+    Document doc = writer.newDocument();
+    doc.addLargeText("content", "\u0633\u0627\u0628");
+    doc.addAtom("body", "body");
     writer.addDocument(doc);
     writer.close();
 
@@ -156,10 +152,9 @@
     StringBuilder buff = new StringBuilder(10);
     int n = result.length;
     for (int i = 0 ; i < n ; ++i) {
-      StoredDocument doc = searcher.doc(result[i].doc);
-      StorableField[] v = doc.getFields("tracer");
-      for (int j = 0 ; j < v.length ; ++j) {
-        buff.append(v[j].stringValue());
+      Document doc = searcher.doc(result[i].doc);
+      for (IndexableField f : doc.getFields("tracer")) {
+        buff.append(f.stringValue());
       }
     }
     assertEquals(expectedResult, buff.toString());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
index 2ef8663..d3f6c3b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
@@ -68,6 +68,11 @@
     assert producer != null;
     return new AssertingDocValuesProducer(producer, state.segmentInfo.getDocCount());
   }
+
+  @Override
+  public String toString() {
+    return "AssertingDocValuesFormat(" + in + ")";
+  }
   
   static class AssertingDocValuesConsumer extends DocValuesConsumer {
     private final DocValuesConsumer in;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java
index 2fdb1a9..2d80fc4 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingStoredFieldsFormat.java
@@ -25,8 +25,8 @@
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredFieldVisitor;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -139,7 +139,7 @@
     }
 
     @Override
-    public void writeField(FieldInfo info, StorableField field) throws IOException {
+    public void writeField(FieldInfo info, IndexableField field) throws IOException {
       assert docStatus == Status.STARTED;
       in.writeField(info, field);
     }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java
index 105e309..0a73f2e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyStoredFieldsFormat.java
@@ -25,9 +25,9 @@
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MergeState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.StorableField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 
@@ -106,7 +106,7 @@
     }
 
     @Override
-    public void writeField(FieldInfo info, StorableField field) throws IOException {
+    public void writeField(FieldInfo info, IndexableField field) throws IOException {
       if (random.nextInt(10000) == 0) {
         throw new IOException("Fake IOException from StoredFieldsWriter.writeField()");
       }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
index 2a53199..fee2cf7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
@@ -98,7 +98,7 @@
     int skipInterval = TestUtil.nextInt(seedRandom, minSkipInterval, 10);
     
     if (LuceneTestCase.VERBOSE) {
-      System.out.println("MockRandomCodec: skipInterval=" + skipInterval);
+      System.out.println("MockRandomCodec: skipInterval=" + skipInterval + " seedRandom=" + seedRandom);
     }
     
     final long seed = seedRandom.nextLong();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index aeb3521..7ef31a5 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -109,7 +109,7 @@
 
   static class RAMField extends Terms implements Accountable {
     final String field;
-    final SortedMap<String,RAMTerm> termToDocs = new TreeMap<>();
+    final SortedMap<BytesRef,RAMTerm> termToDocs = new TreeMap<>();
     long sumTotalTermFreq;
     long sumDocFreq;
     int docCount;
@@ -176,11 +176,11 @@
   }
 
   static class RAMTerm implements Accountable {
-    final String term;
+    final BytesRef term;
     long totalTermFreq;
     final List<RAMDoc> docs = new ArrayList<>();
-    public RAMTerm(String term) {
-      this.term = term;
+    public RAMTerm(BytesRef term) {
+      this.term = BytesRef.deepCopyOf(term);
     }
 
     @Override
@@ -200,7 +200,11 @@
 
     public RAMDoc(int docID, int freq) {
       this.docID = docID;
-      positions = new int[freq];
+      if (freq >= 0) {
+        positions = new int[freq];
+      } else {
+        positions = null;
+      }
     }
 
     @Override
@@ -333,12 +337,14 @@
 
             postingsWriter.finishDoc();
           }
-          termsConsumer.finishTerm(term, new TermStats(docFreq, totalTermFreq));
-          sumDocFreq += docFreq;
-          sumTotalTermFreq += totalTermFreq;
+          if (docFreq > 0) {
+            termsConsumer.finishTerm(term, new TermStats(docFreq, writeFreqs ? totalTermFreq : -1));
+            sumDocFreq += docFreq;
+            sumTotalTermFreq += totalTermFreq;
+          }
         }
 
-        termsConsumer.finish(sumTotalTermFreq, sumDocFreq, docsSeen.cardinality());
+        termsConsumer.finish(writeFreqs ? sumTotalTermFreq : -1, sumDocFreq, docsSeen.cardinality());
       }
     }
 
@@ -357,8 +363,7 @@
     }
       
     public RAMPostingsWriterImpl startTerm(BytesRef text) {
-      final String term = text.utf8ToString();
-      current = new RAMTerm(term);
+      current = new RAMTerm(text);
       postingsWriter.reset(current);
       return postingsWriter;
     }
@@ -407,13 +412,13 @@
     }
 
     public void finishDoc() {
-      assert posUpto == current.positions.length;
+      assert current.positions == null || posUpto == current.positions.length;
     }
   }
 
   static class RAMTermsEnum extends TermsEnum {
-    Iterator<String> it;
-    String current;
+    Iterator<BytesRef> it;
+    BytesRef current;
     private final RAMField ramField;
 
     public RAMTermsEnum(RAMField field) {
@@ -431,7 +436,7 @@
       }
       if (it.hasNext()) {
         current = it.next();
-        return new BytesRef(current);
+        return current;
       } else {
         return null;
       }
@@ -439,7 +444,7 @@
 
     @Override
     public SeekStatus seekCeil(BytesRef term) {
-      current = term.utf8ToString();
+      current = term;
       it = null;
       if (ramField.termToDocs.containsKey(current)) {
         return SeekStatus.FOUND;
@@ -464,8 +469,7 @@
 
     @Override
     public BytesRef term() {
-      // TODO: reuse BytesRef
-      return new BytesRef(current);
+      return current;
     }
 
     @Override
@@ -525,7 +529,12 @@
 
     @Override
     public int freq() throws IOException {
-      return current.positions.length;
+      if (current.positions == null) {
+        // Big fat lie:
+        return 1;
+      } else {
+        return current.positions.length;
+      }
     }
 
     @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/document/LowSchemaField.java b/lucene/test-framework/src/java/org/apache/lucene/document/LowSchemaField.java
new file mode 100644
index 0000000..bc9ec55
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/document/LowSchemaField.java
@@ -0,0 +1,212 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A minimal implementation of Lucene's low-schema API, that does absolutely no "user space" validation, for abusing your schema and
+ * stressing out Lucene's IndexWriter to ensure it never corrupts the index on abuse.
+ */
+
+public class LowSchemaField implements IndexableFieldType, IndexableField {
+  private final Analyzer analyzer;
+  private final String fieldName;
+  private final Object value;
+  private final boolean tokenized;
+  private final IndexOptions indexOptions;
+  private DocValuesType docValuesType = DocValuesType.NONE;
+  private boolean stored = true;
+  private boolean termVectors;
+  private boolean termVectorPositions;
+  private boolean termVectorOffsets;
+  private boolean termVectorPayloads;
+  private TokenStream tokenStream;
+  private float boost = 1.0f;
+  private boolean omitNorms;
+
+  public LowSchemaField(Analyzer analyzer, String fieldName, Object value, IndexOptions indexOptions, boolean tokenized) {
+    this.analyzer = analyzer;
+    this.fieldName = fieldName;
+    this.value = value;
+    this.indexOptions = indexOptions;
+    this.tokenized = tokenized;
+  }
+
+  public void doNotStore() {
+    this.stored = false;
+  }
+
+  public void disableNorms() {
+    this.omitNorms = true;
+  }
+
+  public void setDocValuesType(DocValuesType docValuesType) {
+    this.docValuesType = docValuesType;
+  }
+
+  public void enableTermVectors(boolean positions, boolean offsets, boolean payloads) {
+    termVectors = true;
+    termVectorPositions = positions;
+    termVectorOffsets = offsets;
+    termVectorPayloads = payloads;
+  }
+
+  public void setTokenStream(TokenStream tokenStream) {
+    this.tokenStream = tokenStream;
+  }
+
+  public void setBoost(float boost) {
+    this.boost = boost;
+  }
+
+  @Override
+  public boolean stored() {
+    return stored;
+  }
+
+  @Override
+  public boolean storeTermVectors() {
+    return termVectors;
+  }
+
+  @Override
+  public boolean storeTermVectorPositions() {
+    return termVectorPositions;
+  }
+
+  @Override
+  public boolean storeTermVectorOffsets() {
+    return termVectorOffsets;
+  }
+
+  @Override
+  public boolean storeTermVectorPayloads() {
+    return termVectorPayloads;
+  }
+
+  @Override
+  public boolean omitNorms() {
+    return omitNorms;
+  }
+
+  @Override
+  public IndexOptions indexOptions() {
+    return indexOptions;
+  }
+
+  @Override
+  public DocValuesType docValuesType() {
+    return docValuesType;
+  }
+
+  @Override
+  public String name() {
+    return fieldName;
+  }
+
+  @Override
+  public IndexableFieldType fieldType() {
+    return this;
+  }
+
+  @Override
+  public TokenStream tokenStream(TokenStream reuse) throws IOException {
+    if (tokenStream != null) {
+      return tokenStream;
+    } else if (indexOptions != IndexOptions.NONE) {
+      if (value instanceof String) {
+        String s = (String) value;
+        if (tokenized == false) {
+          if (!(reuse instanceof StringTokenStream)) {
+            // lazy init the TokenStream as it is heavy to instantiate
+            // (attributes,...) if not needed (stored field loading)
+            reuse = new StringTokenStream();
+          }
+          ((StringTokenStream) reuse).setValue(s);
+          return reuse;
+        } else {
+          return analyzer.tokenStream(fieldName, s);
+        }
+      } else if (value instanceof Reader) {
+        return analyzer.tokenStream(fieldName, (Reader) value);
+      }
+    }
+
+    return null;
+  }
+
+  @Override
+  public float boost() {
+    return boost;
+  }
+
+  @Override
+  public BytesRef binaryValue() {
+    if (stored && value instanceof BytesRef) {
+      return (BytesRef) value;
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public BytesRef binaryDocValue() {
+    if (docValuesType != DocValuesType.NONE && value instanceof BytesRef) {
+      return (BytesRef) value;
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public String stringValue() {
+    if (value instanceof String) {
+      return (String) value;
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public Number numericValue() {
+    if (value instanceof Number) {
+      return (Number) value;
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public Number numericDocValue() {
+    if (docValuesType != DocValuesType.NONE && value instanceof Number) {
+      return (Number) value;
+    } else {
+      return null;
+    }
+  }
+}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
index 29b099c..e94e088 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
@@ -254,14 +254,14 @@
     public TermState termState() throws IOException {
       assertThread("Terms enums", creationThread);
       assert state == State.POSITIONED : "termState() called on unpositioned TermsEnum";
-      return super.termState();
+      return in.termState();
     }
 
     @Override
     public void seekExact(BytesRef term, TermState state) throws IOException {
       assertThread("Terms enums", creationThread);
       assert term.isValid();
-      super.seekExact(term, state);
+      in.seekExact(term, state);
       this.state = State.POSITIONED;
     }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
index b7191d0..661099c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
@@ -26,8 +26,6 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StoredField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FilterDirectory;
 import org.apache.lucene.store.FlushInfo;
@@ -189,15 +187,10 @@
     }
     // riw should sometimes create docvalues fields, etc
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    // these fields should sometimes get term vectors, etc
-    Field idField = newStringField("id", "", Field.Store.NO);
-    Field bodyField = newTextField("body", "", Field.Store.NO);
-    doc.add(idField);
-    doc.add(bodyField);
     for (int i = 0; i < 100; i++) {
-      idField.setStringValue(Integer.toString(i));
-      bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
+      Document doc = riw.newDocument();
+      doc.addAtom("id", Integer.toString(i));
+      doc.addLargeText("body", TestUtil.randomUnicodeString(random()));
       riw.addDocument(doc);
       if (random().nextInt(7) == 0) {
         riw.commit();
@@ -750,7 +743,7 @@
 
   @Override
   protected void addRandomFields(Document doc) {
-    doc.add(new StoredField("foobar", TestUtil.randomSimpleString(random())));
+    doc.addStoredString("foobar", TestUtil.randomSimpleString(random()));
   }
 
   @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java
index 44f959c..f7c1bb3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java
@@ -23,12 +23,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.packed.PackedInts;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 /** Extends {@link BaseDocValuesFormatTestCase} to add compression checks. */
@@ -46,13 +45,14 @@
     final Directory dir = new RAMDirectory();
     final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter iwriter = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setIndexOptions("dv", IndexOptions.NONE);
+    fieldTypes.disableStored("dv");
+
 
     final int uniqueValueCount = TestUtil.nextInt(random(), 1, 256);
     final List<Long> values = new ArrayList<>();
 
-    final Document doc = new Document();
-    final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
-    doc.add(dvf);
     for (int i = 0; i < 300; ++i) {
       final long value;
       if (values.size() < uniqueValueCount) {
@@ -61,13 +61,15 @@
       } else {
         value = RandomPicks.randomFrom(random(), values);
       }
-      dvf.setLongValue(value);
+      final Document doc = iwriter.newDocument();
+      doc.addLong("dv", value);
       iwriter.addDocument(doc);
     }
     iwriter.forceMerge(1);
     final long size1 = dirSize(dir);
     for (int i = 0; i < 20; ++i) {
-      dvf.setLongValue(RandomPicks.randomFrom(random(), values));
+      final Document doc = iwriter.newDocument();
+      doc.addLong("dv", RandomPicks.randomFrom(random(), values));
       iwriter.addDocument(doc);
     }
     iwriter.forceMerge(1);
@@ -80,21 +82,23 @@
     final Directory dir = new RAMDirectory();
     final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter iwriter = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setIndexOptions("dv", IndexOptions.NONE);
+    fieldTypes.disableStored("dv");
 
     final long base = 13; // prime
     final long day = 1000L * 60 * 60 * 24;
 
-    final Document doc = new Document();
-    final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
-    doc.add(dvf);
     for (int i = 0; i < 300; ++i) {
-      dvf.setLongValue(base + random().nextInt(1000) * day);
+      final Document doc = iwriter.newDocument();
+      doc.addLong("dv", base + random().nextInt(1000) * day);
       iwriter.addDocument(doc);
     }
     iwriter.forceMerge(1);
     final long size1 = dirSize(dir);
     for (int i = 0; i < 50; ++i) {
-      dvf.setLongValue(base + random().nextInt(1000) * day);
+      final Document doc = iwriter.newDocument();
+      doc.addLong("dv", base + random().nextInt(1000) * day);
       iwriter.addDocument(doc);
     }
     iwriter.forceMerge(1);
@@ -107,17 +111,19 @@
     final Directory dir = new RAMDirectory();
     final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter iwriter = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setIndexOptions("dv", IndexOptions.NONE);
+    fieldTypes.disableStored("dv");
 
-    final Document doc = new Document();
-    final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
-    doc.add(dvf);
     for (int i = 0; i < 20000; ++i) {
-      dvf.setLongValue(i & 1023);
+      final Document doc = iwriter.newDocument();
+      doc.addLong("dv", i & 1023);
       iwriter.addDocument(doc);
     }
     iwriter.forceMerge(1);
     final long size1 = dirSize(dir);
-    dvf.setLongValue(Long.MAX_VALUE);
+    final Document doc = iwriter.newDocument();
+    doc.addLong("dv", Long.MAX_VALUE);
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     final long size2 = dirSize(dir);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index fcbf176..5429fe6 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -17,8 +17,6 @@
  * limitations under the License.
  */
 
-import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
-
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -27,8 +25,8 @@
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.CountDownLatch;
@@ -36,17 +34,8 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -63,6 +52,8 @@
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.TestUtil;
 
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
 /**
  * Abstract class to do basic tests for a docvalues format.
  * NOTE: This test focuses on the docvalues impl, nothing else.
@@ -75,21 +66,29 @@
 
   @Override
   protected void addRandomFields(Document doc) {
+    FieldTypes fieldTypes = doc.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    for(String fieldName : new String[] {"ndv", "bdv", "sdv", "ssdv", "sndv"}) {
+      fieldTypes.setIndexOptions(fieldName, IndexOptions.NONE);
+    }
+    fieldTypes.disableSorting("bdv");
+    fieldTypes.setMultiValued("ssdv");
+    fieldTypes.setMultiValued("sndv");
     if (usually()) {
-      doc.add(new NumericDocValuesField("ndv", random().nextInt(1 << 12)));
-      doc.add(new BinaryDocValuesField("bdv", new BytesRef(TestUtil.randomSimpleString(random()))));
-      doc.add(new SortedDocValuesField("sdv", new BytesRef(TestUtil.randomSimpleString(random(), 2))));
+      doc.addInt("ndv", random().nextInt(1 << 12));
+      doc.addBinary("bdv", new BytesRef(TestUtil.randomSimpleString(random())));
+      doc.addAtom("sdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)));
     }
     if (codecSupportsSortedSet()) {
       final int numValues = random().nextInt(5);
       for (int i = 0; i < numValues; ++i) {
-        doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(TestUtil.randomSimpleString(random(), 2))));
+        doc.addAtom("ssdv", new BytesRef(TestUtil.randomSimpleString(random(), 2)));
       }
     }
     if (codecSupportsSortedNumeric()) {
       final int numValues = random().nextInt(5);
       for (int i = 0; i < numValues; ++i) {
-        doc.add(new SortedNumericDocValuesField("sndv", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
+        doc.addLong("sndv", TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
       }
     }
   }
@@ -97,11 +96,11 @@
   public void testOneNumber() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv", 5));
+    doc.addLargeText("fieldname", text);
+    doc.addInt("dv", 5);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -115,8 +114,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
       assertEquals(5, dv.get(hits.scoreDocs[i].doc));
@@ -129,11 +128,11 @@
   public void testOneFloat() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new FloatDocValuesField("dv", 5.7f));
+    doc.addLargeText("fieldname", text);
+    doc.addFloat("dv", 5.7f);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -147,8 +146,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
       assertEquals(Float.floatToRawIntBits(5.7f), dv.get(hits.scoreDocs[i].doc));
@@ -161,12 +160,12 @@
   public void testTwoNumbers() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 5));
-    doc.add(new NumericDocValuesField("dv2", 17));
+    doc.addLargeText("fieldname", text);
+    doc.addInt("dv1", 5);
+    doc.addInt("dv2", 17);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -180,8 +179,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1");
       assertEquals(5, dv.get(hits.scoreDocs[i].doc));
@@ -196,12 +195,15 @@
   public void testTwoBinaryValues() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv1");
+    fieldTypes.disableSorting("dv2");
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef(longTerm)));
-    doc.add(new BinaryDocValuesField("dv2", new BytesRef(text)));
+    doc.addLargeText("fieldname", text);
+    doc.addBinary("dv1", new BytesRef(longTerm));
+    doc.addBinary("dv2", new BytesRef(text));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -215,8 +217,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv1");
       BytesRef scratch = dv.get(hits.scoreDocs[i].doc);
@@ -233,12 +235,14 @@
   public void testTwoFieldsMixed() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv2");
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 5));
-    doc.add(new BinaryDocValuesField("dv2", new BytesRef("hello world")));
+    doc.addLargeText("fieldname", text);
+    doc.addInt("dv1", 5);
+    doc.addBinary("dv2", new BytesRef("hello world"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -252,8 +256,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1");
       assertEquals(5, dv.get(hits.scoreDocs[i].doc));
@@ -269,13 +273,15 @@
   public void testThreeFieldsMixed() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv3");
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new SortedDocValuesField("dv1", new BytesRef("hello hello")));
-    doc.add(new NumericDocValuesField("dv2", 5));
-    doc.add(new BinaryDocValuesField("dv3", new BytesRef("hello world")));
+    doc.addLargeText("fieldname", text);
+    doc.addBinary("dv1", new BytesRef("hello hello"));
+    doc.addInt("dv2", 5);
+    doc.addBinary("dv3", new BytesRef("hello world"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -289,8 +295,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv1");
       int ord = dv.getOrd(0);
@@ -310,13 +316,16 @@
   public void testThreeFieldsMixed2() throws IOException {
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv1");
+
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef("hello world")));
-    doc.add(new SortedDocValuesField("dv2", new BytesRef("hello hello")));
-    doc.add(new NumericDocValuesField("dv3", 5));
+    doc.addLargeText("fieldname", text);
+    doc.addBinary("dv1", new BytesRef("hello world"));
+    doc.addBinary("dv2", new BytesRef("hello hello"));
+    doc.addInt("dv3", 5);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -331,8 +340,8 @@
     BytesRef scratch = new BytesRef();
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv2");
       int ord = dv.getOrd(0);
@@ -356,11 +365,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 1));
+    Document doc = iwriter.newDocument();
+    doc.addInt("dv", 1);
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 2));
+    doc = iwriter.newDocument();
+    doc.addInt("dv", 2);
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -383,14 +392,14 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(newField("id", "0", StringField.TYPE_STORED));
-    doc.add(new NumericDocValuesField("dv", -10));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
+    doc.addInt("dv", -10);
     iwriter.addDocument(doc);
     iwriter.commit();
-    doc = new Document();
-    doc.add(newField("id", "1", StringField.TYPE_STORED));
-    doc.add(new NumericDocValuesField("dv", 99));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addInt("dv", 99);
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -400,9 +409,9 @@
     assert ireader.leaves().size() == 1;
     NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
     for(int i=0;i<2;i++) {
-      StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
+      Document doc2 = ireader.leaves().get(0).reader().document(i);
       long expected;
-      if (doc2.get("id").equals("0")) {
+      if (doc2.getString("id").equals("0")) {
         expected = -10;
       } else {
         expected = 99;
@@ -421,11 +430,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", Long.MIN_VALUE));
+    Document doc = iwriter.newDocument();
+    doc.addLong("dv", Long.MIN_VALUE);
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("dv", Long.MAX_VALUE));
+    doc = iwriter.newDocument();
+    doc.addLong("dv", Long.MAX_VALUE);
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -448,11 +457,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv", -8841491950446638677L));
+    Document doc = iwriter.newDocument();
+    doc.addLong("dv", -8841491950446638677L);
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("dv", 9062230939892376225L));
+    doc = iwriter.newDocument();
+    doc.addLong("dv", 9062230939892376225L);
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -474,11 +483,13 @@
     Directory directory = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world")));
+    doc.addLargeText("fieldname", text);
+    doc.addBinary("dv", new BytesRef("hello world"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -492,8 +503,8 @@
     assertEquals(1, hits.totalHits);
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
       BytesRef scratch = dv.get(hits.scoreDocs[i].doc);
@@ -511,14 +522,17 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(newField("id", "0", StringField.TYPE_STORED));
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world 1")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
+    doc.addBinary("dv", new BytesRef("hello world 1"));
     iwriter.addDocument(doc);
     iwriter.commit();
-    doc = new Document();
-    doc.add(newField("id", "1", StringField.TYPE_STORED));
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("hello 2")));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addBinary("dv", new BytesRef("hello 2"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -529,9 +543,9 @@
     BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
     BytesRef scratch = new BytesRef();
     for(int i=0;i<2;i++) {
-      StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
+      Document doc2 = ireader.leaves().get(0).reader().document(i);
       String expected;
-      if (doc2.get("id").equals("0")) {
+      if (doc2.getString("id").equals("0")) {
         expected = "hello world 1";
       } else {
         expected = "hello 2";
@@ -550,13 +564,15 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("field");
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
     iwriter.addDocument(doc);    
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.NO));
-    doc.add(new BinaryDocValuesField("field", new BytesRef("hi")));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addBinary("field", new BytesRef("hi"));
     iwriter.addDocument(doc);
     iwriter.commit();
     iwriter.deleteDocuments(new Term("id", "1"));
@@ -578,11 +594,11 @@
     Directory directory = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world")));
+    doc.addLargeText("fieldname", text);
+    doc.addBinary("dv", new BytesRef("hello world"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -597,8 +613,8 @@
     BytesRef scratch = new BytesRef();
     // Iterate through the results:
     for (int i = 0; i < hits.scoreDocs.length; i++) {
-      StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
-      assertEquals(text, hitDoc.get("fieldname"));
+      Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+      assertEquals(text, hitDoc.getString("fieldname"));
       assert ireader.leaves().size() == 1;
       SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
       scratch = dv.lookupOrd(dv.getOrd(hits.scoreDocs[i].doc));
@@ -616,11 +632,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 1"));
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
+    doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 2"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -646,14 +662,14 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 1"));
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
+    doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 2"));
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
+    doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 1"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -682,14 +698,14 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(newField("id", "0", StringField.TYPE_STORED));
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
+    doc.addBinary("dv", new BytesRef("hello world 1"));
     iwriter.addDocument(doc);
     iwriter.commit();
-    doc = new Document();
-    doc.add(newField("id", "1", StringField.TYPE_STORED));
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addBinary("dv", new BytesRef("hello world 2"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -704,9 +720,9 @@
     scratch = dv.lookupOrd(1);
     assertEquals(new BytesRef("hello world 2"), scratch);
     for(int i=0;i<2;i++) {
-      StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
+      Document doc2 = ireader.leaves().get(0).reader().document(i);
       String expected;
-      if (doc2.get("id").equals("0")) {
+      if (doc2.getString("id").equals("0")) {
         expected = "hello world 1";
       } else {
         expected = "hello world 2";
@@ -726,12 +742,12 @@
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
     iwriter.addDocument(doc);    
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.NO));
-    doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     iwriter.commit();
     iwriter.deleteDocuments(new Term("id", "1"));
@@ -762,8 +778,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("hello\nworld\r1")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello\nworld\r1"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -785,11 +804,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("hello world 2"));
     iwriter.addDocument(doc);
     // 2nd doc missing the DV field
-    iwriter.addDocument(new Document());
+    iwriter.addDocument(iwriter.newDocument());
     iwriter.close();
     
     // Now search the index:
@@ -814,16 +833,16 @@
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
     
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new SortedDocValuesField("field", new BytesRef("world")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("world"));
     iwriter.addDocument(doc);
 
-    doc = new Document();
-    doc.add(new SortedDocValuesField("field", new BytesRef("beer")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("beer"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     
@@ -887,11 +906,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef(""));
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("")));
+    doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef(""));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -916,11 +935,14 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef(""));
     iwriter.addDocument(doc);
-    doc = new Document();
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("")));
+    doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef(""));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     iwriter.close();
@@ -945,11 +967,13 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+    Document doc = iwriter.newDocument();
     byte bytes[] = new byte[32766];
     BytesRef b = new BytesRef(bytes);
     random().nextBytes(bytes);
-    doc.add(new BinaryDocValuesField("dv", b));
+    doc.addBinary("dv", b);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -971,11 +995,11 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     byte bytes[] = new byte[32766];
     BytesRef b = new BytesRef(bytes);
     random().nextBytes(bytes);
-    doc.add(new SortedDocValuesField("dv", b));
+    doc.addBinary("dv", b);
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -996,8 +1020,10 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("dv", new BytesRef("boo!")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.disableSorting("dv");
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("boo!"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -1021,8 +1047,8 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new SortedDocValuesField("dv", new BytesRef("boo!")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("dv", new BytesRef("boo!"));
     iwriter.addDocument(doc);
     iwriter.close();
     
@@ -1049,9 +1075,9 @@
     conf.setMergePolicy(newLogMergePolicy());
     IndexWriter writer = new IndexWriter(dir, conf);
     for (int i = 0; i < 5; i++) {
-      Document doc = new Document();
-      doc.add(new NumericDocValuesField("docId", i));
-      doc.add(new TextField("docId", "" + i, Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addInt("docId", i);
+      doc.addLargeText("text", "" + i);
       writer.addDocument(doc);
     }
     writer.commit();
@@ -1065,11 +1091,11 @@
     IndexSearcher searcher = new IndexSearcher(reader);
 
     BooleanQuery query = new BooleanQuery();
-    query.add(new TermQuery(new Term("docId", "0")), BooleanClause.Occur.SHOULD);
-    query.add(new TermQuery(new Term("docId", "1")), BooleanClause.Occur.SHOULD);
-    query.add(new TermQuery(new Term("docId", "2")), BooleanClause.Occur.SHOULD);
-    query.add(new TermQuery(new Term("docId", "3")), BooleanClause.Occur.SHOULD);
-    query.add(new TermQuery(new Term("docId", "4")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("text", "0")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("text", "1")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("text", "2")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("text", "3")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("text", "4")), BooleanClause.Occur.SHOULD);
 
     TopDocs search = searcher.search(query, 10);
     assertEquals(5, search.totalHits);
@@ -1097,11 +1123,11 @@
     Map<String, String> docToString = new HashMap<>();
     int maxLength = TestUtil.nextInt(random(), 1, 50);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("id", "" + i, Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addLargeText("id", "" + i);
       String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength);
       BytesRef br = new BytesRef(string);
-      doc.add(new SortedDocValuesField("field", br));
+      doc.addBinary("field", br);
       hash.add(br);
       docToString.put("" + i, string);
       w.addDocument(doc);
@@ -1111,8 +1137,8 @@
     }
     int numDocsNoValue = atLeast(10);
     for (int i = 0; i < numDocsNoValue; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("id", "noValue", Field.Store.YES));
+      Document doc = w.newDocument();
+      doc.addLargeText("id", "noValue");
       w.addDocument(doc);
     }
     if (!codecSupportsDocsWithField()) {
@@ -1128,14 +1154,14 @@
       w.forceMerge(1);
     }
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = w.newDocument();
       String id = "" + i + numDocs;
-      doc.add(newTextField("id", id, Field.Store.YES));
+      doc.addLargeText("id", id);
       String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength);
       BytesRef br = new BytesRef(string);
       hash.add(br);
       docToString.put(id, string);
-      doc.add(new SortedDocValuesField("field", br));
+      doc.addBinary("field", br);
       w.addDocument(doc);
     }
     w.commit();
@@ -1185,13 +1211,6 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedField = newStringField("stored", "", Field.Store.YES);
-    Field dvField = new NumericDocValuesField("dv", 0);
-    doc.add(idField);
-    doc.add(storedField);
-    doc.add(dvField);
     
     // index some docs
     int numDocs = atLeast(300);
@@ -1199,10 +1218,11 @@
     // for numbers of values <= 256, all storage layouts are tested
     assert numDocs > 256;
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       long value = longs.next();
-      storedField.setStringValue(Long.toString(value));
-      dvField.setLongValue(value);
+      doc.addAtom("stored", Long.toString(value));
+      doc.addLong("dv", value);
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -1228,7 +1248,7 @@
       LeafReader r = context.reader();
       NumericDocValues docValues = r.getNumericDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        long storedValue = Long.parseLong(r.document(i).get("stored"));
+        long storedValue = Long.parseLong(r.document(i).getString("stored"));
         assertEquals(storedValue, docValues.get(i));
       }
     }
@@ -1240,6 +1260,9 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("stored");
+    fieldTypes.setMultiValued("dv");
     
     // index some docs
     int numDocs = atLeast(300);
@@ -1247,19 +1270,19 @@
     // for numbers of values <= 256, all storage layouts are tested
     assert numDocs > 256;
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       
       int valueCount = (int) counts.next();
       long valueArray[] = new long[valueCount];
       for (int j = 0; j < valueCount; j++) {
         long value = values.next();
         valueArray[j] = value;
-        doc.add(new SortedNumericDocValuesField("dv", value));
+        doc.addLong("dv", value);
       }
       Arrays.sort(valueArray);
       for (int j = 0; j < valueCount; j++) {
-        doc.add(new StoredField("stored", Long.toString(valueArray[j])));
+        doc.addStoredString("stored", Long.toString(valueArray[j]));
       }
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
@@ -1286,7 +1309,7 @@
       LeafReader r = context.reader();
       SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        String expected[] = r.document(i).getValues("stored");
+        String[] expected = r.document(i).getStrings("stored");
         docValues.setDocument(i);
         String actual[] = new String[docValues.count()];
         for (int j = 0; j < actual.length; j++) {
@@ -1338,18 +1361,14 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedField = new StoredField("stored", new byte[0]);
-    Field dvField = new BinaryDocValuesField("dv", new BytesRef());
-    doc.add(idField);
-    doc.add(storedField);
-    doc.add(dvField);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("dv");
     
     // index some docs
     int numDocs = atLeast(300);
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       final int length;
       if (minLength == maxLength) {
         length = minLength; // fixed length
@@ -1358,8 +1377,8 @@
       }
       byte buffer[] = new byte[length];
       random().nextBytes(buffer);
-      storedField.setBytesValue(buffer);
-      dvField.setBytesValue(buffer);
+      doc.addStoredBinary("stored", buffer);
+      doc.addBinary("dv", buffer);
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -1379,7 +1398,7 @@
       LeafReader r = context.reader();
       BinaryDocValues docValues = r.getBinaryDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        BytesRef binaryValue = r.document(i).getBinaryValue("stored");
+        BytesRef binaryValue = r.document(i).getBinary("stored");
         BytesRef scratch = docValues.get(i);
         assertEquals(binaryValue, scratch);
       }
@@ -1393,7 +1412,7 @@
       LeafReader r = context.reader();
       BinaryDocValues docValues = r.getBinaryDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        BytesRef binaryValue = r.document(i).getBinaryValue("stored");
+        BytesRef binaryValue = r.document(i).getBinary("stored");
         BytesRef scratch = docValues.get(i);
         assertEquals(binaryValue, scratch);
       }
@@ -1422,17 +1441,11 @@
     Directory dir = newFSDirectory(createTempDir("dvduel"));
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedField = new StoredField("stored", new byte[0]);
-    Field dvField = new SortedDocValuesField("dv", new BytesRef());
-    doc.add(idField);
-    doc.add(storedField);
-    doc.add(dvField);
-    
+
     // index some docs
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       final int length;
       if (minLength == maxLength) {
         length = minLength; // fixed length
@@ -1441,8 +1454,8 @@
       }
       byte buffer[] = new byte[length];
       random().nextBytes(buffer);
-      storedField.setBytesValue(buffer);
-      dvField.setBytesValue(buffer);
+      doc.addStoredBinary("stored", buffer);
+      doc.addBinary("dv", buffer);
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -1462,7 +1475,7 @@
       LeafReader r = context.reader();
       BinaryDocValues docValues = r.getSortedDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        BytesRef binaryValue = r.document(i).getBinaryValue("stored");
+        BytesRef binaryValue = r.document(i).getBinary("stored");
         BytesRef scratch = docValues.get(i);
         assertEquals(binaryValue, scratch);
       }
@@ -1476,7 +1489,7 @@
       LeafReader r = context.reader();
       BinaryDocValues docValues = r.getSortedDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        BytesRef binaryValue = r.document(i).getBinaryValue("stored");
+        BytesRef binaryValue = r.document(i).getBinary("stored");
         BytesRef scratch = docValues.get(i);
         assertEquals(binaryValue, scratch);
       }
@@ -1505,9 +1518,11 @@
     assumeTrue("Codec does not support SORTED_SET", codecSupportsSortedSet());
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
-    
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     
     DirectoryReader ireader = iwriter.getReader();
@@ -1530,10 +1545,13 @@
     assumeTrue("Codec does not support SORTED_SET", codecSupportsSortedSet());
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    fieldTypes.setMultiValued("field2");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
-    doc.add(new SortedSetDocValuesField("field2", new BytesRef("world")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
+    doc.addBinary("field2", new BytesRef("world"));
     iwriter.addDocument(doc);
     
     DirectoryReader ireader = iwriter.getReader();
@@ -1568,14 +1586,16 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
   
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     iwriter.commit();
     
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("world"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     
@@ -1608,9 +1628,12 @@
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
+    doc.addBinary("field", new BytesRef("world"));
     iwriter.addDocument(doc);
     
     DirectoryReader ireader = iwriter.getReader();
@@ -1637,10 +1660,12 @@
     assumeTrue("Codec does not support SORTED_SET", codecSupportsSortedSet());
     Directory directory = newDirectory();
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("world"));
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     
     DirectoryReader ireader = iwriter.getReader();
@@ -1670,16 +1695,18 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
+    doc.addBinary("field", new BytesRef("world"));
     iwriter.addDocument(doc);
     iwriter.commit();
     
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("beer")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
+    doc.addBinary("field", new BytesRef("beer"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     
@@ -1719,12 +1746,14 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     
-    doc = new Document();
+    doc = iwriter.newDocument();
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     DirectoryReader ireader = iwriter.getReader();
@@ -1751,13 +1780,15 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     iwriter.commit();
     
-    doc = new Document();
+    doc = iwriter.newDocument();
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
    
@@ -1785,12 +1816,14 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     iwriter.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     
     iwriter.forceMerge(1);
@@ -1818,13 +1851,15 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
+    Document doc = iwriter.newDocument();
     iwriter.addDocument(doc);
     iwriter.commit();
     
-    doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     iwriter.forceMerge(1);
     
@@ -1852,13 +1887,15 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
     iwriter.addDocument(doc);    
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.NO));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addBinary("field", new BytesRef("hello"));
     iwriter.addDocument(doc);
     iwriter.commit();
     iwriter.deleteDocuments(new Term("id", "1"));
@@ -1881,11 +1918,13 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
-    doc.add(new SortedSetDocValuesField("field", new BytesRef("beer")));
+    Document doc = iwriter.newDocument();
+    doc.addBinary("field", new BytesRef("hello"));
+    doc.addBinary("field", new BytesRef("world"));
+    doc.addBinary("field", new BytesRef("beer"));
     iwriter.addDocument(doc);
     
     DirectoryReader ireader = iwriter.getReader();
@@ -1943,12 +1982,14 @@
     Directory dir = newFSDirectory(createTempDir("dvduel"));
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("stored");
+    fieldTypes.setMultiValued("dv");
+
     // index some docs
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
-      doc.add(idField);
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       final int length;
       if (minLength == maxLength) {
         length = minLength; // fixed length
@@ -1964,14 +2005,14 @@
       
       // add ordered to the stored field
       for (String v : values) {
-        doc.add(new StoredField("stored", v));
+        doc.addStoredString("stored", v);
       }
 
       // add in any order to the dv field
       ArrayList<String> unordered = new ArrayList<>(values);
       Collections.shuffle(unordered, random());
       for (String v : unordered) {
-        doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
+        doc.addBinary("dv", new BytesRef(v));
       }
 
       writer.addDocument(doc);
@@ -1993,7 +2034,7 @@
       LeafReader r = context.reader();
       SortedSetDocValues docValues = r.getSortedSetDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        String stringValues[] = r.document(i).getValues("stored");
+        String stringValues[] = r.document(i).getStrings("stored");
         if (docValues != null) {
           docValues.setDocument(i);
         }
@@ -2016,7 +2057,7 @@
       LeafReader r = context.reader();
       SortedSetDocValues docValues = r.getSortedSetDocValues("dv");
       for (int i = 0; i < r.maxDoc(); i++) {
-        String stringValues[] = r.document(i).getValues("stored");
+        String stringValues[] = r.document(i).getStrings("stored");
         if (docValues != null) {
           docValues.setDocument(i);
         }
@@ -2172,12 +2213,12 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 0));
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addLong("dv1", 0);
     iw.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2201,13 +2242,13 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 0));
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addLong("dv1", 0);
     iw.addDocument(doc);
     iw.commit();
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2231,17 +2272,17 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 0));
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addLong("dv1", 0);
     iw.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.commit();
-    doc = new Document();
-    doc.add(new StringField("id", "2", Field.Store.YES));
-    doc.add(new NumericDocValuesField("dv1", 5));
+    doc = iw.newDocument();
+    doc.addAtom("id", "2");
+    doc.addLong("dv1", 5);
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2267,12 +2308,14 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("dv1");
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addBinary("dv1", new BytesRef());
     iw.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2298,13 +2341,15 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("dv1");
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addBinary("dv1", new BytesRef());
     iw.addDocument(doc);
     iw.commit();
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2330,17 +2375,19 @@
     IndexWriterConfig conf = newIndexWriterConfig(null);
     conf.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("dv1");
+    Document doc = iw.newDocument();
+    doc.addAtom("id", "0");
+    doc.addBinary("dv1", new BytesRef());
     iw.addDocument(doc);
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc = iw.newDocument();
+    doc.addAtom("id", "1");
     iw.addDocument(doc);
     iw.commit();
-    doc = new Document();
-    doc.add(new StringField("id", "2", Field.Store.YES));
-    doc.add(new BinaryDocValuesField("dv1", new BytesRef("boo")));
+    doc = iw.newDocument();
+    doc.addAtom("id", "2");
+    doc.addBinary("dv1", new BytesRef("boo"));
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.close();
@@ -2368,33 +2415,23 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedBinField = new StoredField("storedBin", new byte[0]);
-    Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef());
-    Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef());
-    Field storedNumericField = new StoredField("storedNum", "");
-    Field dvNumericField = new NumericDocValuesField("dvNum", 0);
-    doc.add(idField);
-    doc.add(storedBinField);
-    doc.add(dvBinField);
-    doc.add(dvSortedField);
-    doc.add(storedNumericField);
-    doc.add(dvNumericField);
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("dvBin");
+
     // index some docs
     int numDocs = atLeast(300);
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id",Integer.toString(i));
       int length = TestUtil.nextInt(random(), 0, 8);
       byte buffer[] = new byte[length];
       random().nextBytes(buffer);
-      storedBinField.setBytesValue(buffer);
-      dvBinField.setBytesValue(buffer);
-      dvSortedField.setBytesValue(buffer);
+      doc.addStoredBinary("storedBin", buffer);
+      doc.addBinary("dvBin", buffer);
+      doc.addBinary("dvSorted", buffer);
       long numericValue = random().nextLong();
-      storedNumericField.setStringValue(Long.toString(numericValue));
-      dvNumericField.setLongValue(numericValue);
+      doc.addStoredString("storedNum", Long.toString(numericValue));
+      doc.addLong("dvNum", numericValue);
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -2427,12 +2464,12 @@
               SortedDocValues sorted = r.getSortedDocValues("dvSorted");
               NumericDocValues numerics = r.getNumericDocValues("dvNum");
               for (int j = 0; j < r.maxDoc(); j++) {
-                BytesRef binaryValue = r.document(j).getBinaryValue("storedBin");
+                BytesRef binaryValue = r.document(j).getBinary("storedBin");
                 BytesRef scratch = binaries.get(j);
                 assertEquals(binaryValue, scratch);
                 scratch = sorted.get(j);
                 assertEquals(binaryValue, scratch);
-                String expected = r.document(j).get("storedNum");
+                String expected = r.document(j).getString("storedNum");
                 assertEquals(Long.parseLong(expected), numerics.get(j));
               }
             }
@@ -2461,36 +2498,30 @@
     Directory dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedBinField = new StoredField("storedBin", new byte[0]);
-    Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef());
-    Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef());
-    Field storedNumericField = new StoredField("storedNum", "");
-    Field dvNumericField = new NumericDocValuesField("dvNum", 0);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableSorting("dvBin");
+    fieldTypes.setMultiValued("dvSortedSet");
+    fieldTypes.setMultiValued("storedSortedSet");
+    fieldTypes.setMultiValued("dvSortedNumeric");
+    fieldTypes.setMultiValued("storedSortedNumeric");
     
     // index some docs
     int numDocs = TestUtil.nextInt(random(), 1025, 2047);
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       int length = TestUtil.nextInt(random(), 0, 8);
       byte buffer[] = new byte[length];
       random().nextBytes(buffer);
-      storedBinField.setBytesValue(buffer);
-      dvBinField.setBytesValue(buffer);
-      dvSortedField.setBytesValue(buffer);
-      long numericValue = random().nextLong();
-      storedNumericField.setStringValue(Long.toString(numericValue));
-      dvNumericField.setLongValue(numericValue);
-      Document doc = new Document();
-      doc.add(idField);
       if (random().nextInt(4) > 0) {
-        doc.add(storedBinField);
-        doc.add(dvBinField);
-        doc.add(dvSortedField);
+        doc.addStoredBinary("storedBin", buffer);
+        doc.addBinary("dvBin", buffer);
+        doc.addBinary("dvSorted", buffer);
       }
+      long numericValue = random().nextLong();
       if (random().nextInt(4) > 0) {
-        doc.add(storedNumericField);
-        doc.add(dvNumericField);
+        doc.addStoredString("storedNum", Long.toString(numericValue));
+        doc.addLong("dvNum", numericValue);
       }
       int numSortedSetFields = random().nextInt(3);
       Set<String> values = new TreeSet<>();
@@ -2498,8 +2529,8 @@
         values.add(TestUtil.randomSimpleString(random()));
       }
       for (String v : values) {
-        doc.add(new SortedSetDocValuesField("dvSortedSet", new BytesRef(v)));
-        doc.add(new StoredField("storedSortedSet", v));
+        doc.addBinary("dvSortedSet", new BytesRef(v));
+        doc.addStoredString("storedSortedSet", v);
       }
       int numSortedNumericFields = random().nextInt(3);
       Set<Long> numValues = new TreeSet<>();
@@ -2507,8 +2538,8 @@
         numValues.add(TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
       }
       for (Long l : numValues) {
-        doc.add(new SortedNumericDocValuesField("dvSortedNumeric", l));
-        doc.add(new StoredField("storedSortedNumeric", Long.toString(l)));
+        doc.addLong("dvSortedNumeric", l);
+        doc.addStoredString("storedSortedNumeric", Long.toString(l));
       }
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
@@ -2549,7 +2580,7 @@
               SortedNumericDocValues sortedNumeric = r.getSortedNumericDocValues("dvSortedNumeric");
               Bits sortedNumericBits = r.getDocsWithField("dvSortedNumeric");
               for (int j = 0; j < r.maxDoc(); j++) {
-                BytesRef binaryValue = r.document(j).getBinaryValue("storedBin");
+                BytesRef binaryValue = r.document(j).getBinary("storedBin");
                 if (binaryValue != null) {
                   if (binaries != null) {
                     BytesRef scratch = binaries.get(j);
@@ -2565,7 +2596,7 @@
                   assertEquals(-1, sorted.getOrd(j));
                 }
                
-                String number = r.document(j).get("storedNum");
+                String number = r.document(j).getString("storedNum");
                 if (number != null) {
                   if (numerics != null) {
                     assertEquals(Long.parseLong(number), numerics.get(j));
@@ -2575,7 +2606,7 @@
                   assertEquals(0, numerics.get(j));
                 }
                 
-                String values[] = r.document(j).getValues("storedSortedSet");
+                String values[] = r.document(j).getStrings("storedSortedSet");
                 if (values.length > 0) {
                   assertNotNull(sortedSet);
                   sortedSet.setDocument(j);
@@ -2593,7 +2624,7 @@
                   assertFalse(sortedSetBits.get(j));
                 }
                 
-                String numValues[] = r.document(j).getValues("storedSortedNumeric");
+                String numValues[] = r.document(j).getStrings("storedSortedNumeric");
                 if (numValues.length > 0) {
                   assertNotNull(sortedNumeric);
                   sortedNumeric.setDocument(j);
@@ -2634,27 +2665,38 @@
     Directory dir = newFSDirectory(createTempDir());
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     int numSortedSets = random().nextInt(21);
     int numBinaries = random().nextInt(21);
     int numSortedNums = random().nextInt(21);
+
+    for (int j = 0; j < numSortedSets; j++) {
+      fieldTypes.setMultiValued("ss" + j);
+    }
+    for (int j = 0; j < numBinaries; j++) {
+      fieldTypes.disableSorting("b" + j);
+    }
+    for (int j = 0; j < numSortedNums; j++) {
+      fieldTypes.setMultiValued("sn" + j);
+    }
     
     int numDocs = TestUtil.nextInt(random(), 2025, 2047);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document doc = writer.newDocument();
       
       for (int j = 0; j < numSortedSets; j++) {
-        doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
-        doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
+        doc.addBinary("ss" + j, new BytesRef(TestUtil.randomSimpleString(random())));
+        doc.addBinary("ss" + j, new BytesRef(TestUtil.randomSimpleString(random())));
       }
       
       for (int j = 0; j < numBinaries; j++) {
-        doc.add(new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
+        doc.addBinary("b" + j, new BytesRef(TestUtil.randomSimpleString(random())));
       }
       
       for (int j = 0; j < numSortedNums; j++) {
-        doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
-        doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
+        doc.addLong("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
+        doc.addLong("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
       }
       writer.addDocument(doc);
     }
@@ -2705,17 +2747,20 @@
       }
       Directory dir = newDirectory();
       RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+      FieldTypes fieldTypes = w.getFieldTypes();
+      fieldTypes.disableSorting("field");
+
       BytesRef bytes = new BytesRef();
       bytes.bytes = new byte[1<<i];
       bytes.length = 1<<i;
       for(int j=0;j<4;j++) {
-        Document doc = new Document();
-        doc.add(new BinaryDocValuesField("field", bytes));
+        Document doc = w.newDocument();
+        doc.addBinary("field", bytes);
         w.addDocument(doc);
       }
-      Document doc = new Document();
-      doc.add(new StoredField("id", "5"));
-      doc.add(new BinaryDocValuesField("field", new BytesRef()));
+      Document doc = w.newDocument();
+      doc.addStoredString("id", "5");
+      doc.addBinary("field", new BytesRef());
       w.addDocument(doc);
       IndexReader r = w.getReader();
       w.close();
@@ -2735,8 +2780,10 @@
     assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 5));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 5);
     writer.addDocument(doc);
     writer.close();
     
@@ -2756,10 +2803,12 @@
     assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
     Directory directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 5));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 5);
     writer.addDocument(doc);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.close();
     
     // Now search the index:
@@ -2788,12 +2837,12 @@
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
     iwriter.addDocument(doc);    
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.NO));
-    doc.add(new NumericDocValuesField("field", 5));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addLong("field", 5);
     iwriter.addDocument(doc);
     iwriter.commit();
     iwriter.deleteDocuments(new Term("id", "1"));
@@ -2813,9 +2862,11 @@
     assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 11));
-    doc.add(new SortedNumericDocValuesField("dv", -5));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 11);
+    doc.addLong("dv", -5);
     writer.addDocument(doc);
     writer.close();
     
@@ -2836,9 +2887,11 @@
     assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 11));
-    doc.add(new SortedNumericDocValuesField("dv", 11));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 11);
+    doc.addLong("dv", 11);
     writer.addDocument(doc);
     writer.close();
     
@@ -2859,11 +2912,13 @@
     assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
     Directory directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(null));
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 11));
-    doc.add(new SortedNumericDocValuesField("dv", -5));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 11);
+    doc.addLong("dv", -5);
     writer.addDocument(doc);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.close();
     
     // Now search the index:
@@ -2892,12 +2947,14 @@
     IndexWriterConfig iwc = new IndexWriterConfig(null);
     iwc.setMergePolicy(newLogMergePolicy());
     IndexWriter writer = new IndexWriter(directory, iwc);
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 11));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.setMultiValued("dv");
+    Document doc = writer.newDocument();
+    doc.addLong("dv", 11);
     writer.addDocument(doc);
     writer.commit();
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", -5));
+    doc = writer.newDocument();
+    doc.addLong("dv", -5);
     writer.addDocument(doc);
     writer.forceMerge(1);
     writer.close();
@@ -2924,13 +2981,15 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
     
-    Document doc = new Document();
-    doc.add(new StringField("id", "0", Field.Store.NO));
+    Document doc = iwriter.newDocument();
+    doc.addAtom("id", "0");
     iwriter.addDocument(doc);    
-    doc = new Document();
-    doc.add(new StringField("id", "1", Field.Store.NO));
-    doc.add(new SortedNumericDocValuesField("field", 5));
+    doc = iwriter.newDocument();
+    doc.addAtom("id", "1");
+    doc.addLong("field", 5);
     iwriter.addDocument(doc);
     iwriter.commit();
     iwriter.deleteDocuments(new Term("id", "1"));
@@ -2954,13 +3013,16 @@
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
     
-    Document doc = new Document();
-    SortedDocValuesField field = new SortedDocValuesField("field", new BytesRef("2"));
-    doc.add(field);
+    Document doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("2"));
     iwriter.addDocument(doc);
-    field.setBytesValue(new BytesRef("1"));
+
+    doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("1"));
     iwriter.addDocument(doc);
-    field.setBytesValue(new BytesRef("3"));
+
+    doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("3"));
     iwriter.addDocument(doc);
 
     iwriter.commit();
@@ -2982,16 +3044,22 @@
     IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
     iwconfig.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
-    
-    Document doc = new Document();
-    SortedSetDocValuesField field1 = new SortedSetDocValuesField("field", new BytesRef("2"));
-    SortedSetDocValuesField field2 = new SortedSetDocValuesField("field", new BytesRef("3"));
-    doc.add(field1);
-    doc.add(field2);
+    FieldTypes fieldTypes = iwriter.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+
+    Document doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("2"));
+    doc.addAtom("field", new BytesRef("3"));
     iwriter.addDocument(doc);
-    field1.setBytesValue(new BytesRef("1"));
+
+    doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("1"));
+    doc.addAtom("field", new BytesRef("3"));
     iwriter.addDocument(doc);
-    field2.setBytesValue(new BytesRef("2"));
+
+    doc = iwriter.newDocument();
+    doc.addAtom("field", new BytesRef("1"));
+    doc.addAtom("field", new BytesRef("2"));
     iwriter.addDocument(doc);
 
     iwriter.commit();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
index 3ef4f2d..2ade5de 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
@@ -25,9 +25,6 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.StringHelper;
@@ -51,7 +48,7 @@
     SegmentInfo segmentInfo = newSegmentInfo(dir, "_123");
     FieldInfos.Builder builder = new FieldInfos.Builder();
     FieldInfo fi = builder.getOrAdd("field");
-    fi.setIndexOptions(TextField.TYPE_STORED.indexOptions());
+    fi.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
     addAttributes(fi);
     FieldInfos infos = builder.finish();
     codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT);
@@ -82,17 +79,33 @@
     }
     FieldInfos.Builder builder = new FieldInfos.Builder();
     for (String field : fieldNames) {
-      IndexableFieldType fieldType = randomFieldType(random());
       FieldInfo fi = builder.getOrAdd(field);
-      IndexOptions indexOptions = fieldType.indexOptions();
+      IndexOptions indexOptions;
+      boolean omitNorms = false;
+      if (random().nextBoolean()) {
+        IndexOptions values[] = IndexOptions.values();
+        indexOptions = values[random().nextInt(values.length)];
+        omitNorms = random().nextBoolean();
+      } else {
+        indexOptions = IndexOptions.NONE;
+      }
+
+      DocValuesType docValuesType;
+      if (random().nextBoolean()) {
+        DocValuesType values[] = getDocValuesTypes();
+        docValuesType = values[random().nextInt(values.length)];
+      } else {
+        docValuesType = DocValuesType.NONE;
+      }
+
       if (indexOptions != IndexOptions.NONE) {
         fi.setIndexOptions(indexOptions);
-        if (fieldType.omitNorms()) {      
+        if (omitNorms) {
           fi.setOmitsNorms();
         }
       }
-      fi.setDocValuesType(fieldType.docValuesType());
-      if (fieldType.indexOptions() != IndexOptions.NONE && fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
+      fi.setDocValuesType(docValuesType);
+      if (indexOptions != IndexOptions.NONE && indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
         if (random().nextBoolean()) {
           fi.setStorePayloads();
         }
@@ -106,34 +119,6 @@
     dir.close();
   }
   
-  private final IndexableFieldType randomFieldType(Random r) {
-    FieldType type = new FieldType();
-    
-    if (r.nextBoolean()) {
-      IndexOptions values[] = IndexOptions.values();
-      type.setIndexOptions(values[r.nextInt(values.length)]);
-      type.setOmitNorms(r.nextBoolean());
-      
-      if (r.nextBoolean()) {
-        type.setStoreTermVectors(true);
-        if (type.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
-          type.setStoreTermVectorPositions(r.nextBoolean());
-          type.setStoreTermVectorOffsets(r.nextBoolean());
-          if (type.storeTermVectorPositions()) {
-            type.setStoreTermVectorPayloads(r.nextBoolean());
-          }
-        }
-      }
-    }
-    
-    if (r.nextBoolean()) {
-      DocValuesType values[] = getDocValuesTypes();
-      type.setDocValuesType(values[r.nextInt(values.length)]);
-    }
-        
-    return type;
-  }
-  
   /** 
    * Hook to add any codec attributes to fieldinfo
    * instances added in this test.
@@ -181,7 +166,7 @@
   
   @Override
   protected void addRandomFields(Document doc) {
-    doc.add(new StoredField("foobar", TestUtil.randomSimpleString(random())));
+    doc.addStoredString("foobar", TestUtil.randomSimpleString(random()));
   }
 
   @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index 3477025..35727ea2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -42,10 +42,7 @@
 import org.apache.lucene.codecs.TermVectorsWriter;
 import org.apache.lucene.codecs.mockrandom.MockRandomPostingsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -198,9 +195,12 @@
     mp.setNoCFSRatio(0);
     IndexWriterConfig cfg = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(mp);
     IndexWriter w = new IndexWriter(dir, cfg);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+
     final int numDocs = atLeast(500);
     for (int i = 0; i < numDocs; ++i) {
-      Document d = new Document();
+      Document d = w.newDocument();
       addRandomFields(d);
       w.addDocument(d);
     }
@@ -219,6 +219,8 @@
     mp.setNoCFSRatio(0);
     cfg = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(mp);
     w = new IndexWriter(dir2, cfg);
+    fieldTypes = w.getFieldTypes();
+    fieldTypes.disableExistsFilters();
     TestUtil.addIndexesSlowly(w, reader);
 
     w.commit();
@@ -248,7 +250,7 @@
     final int numDocs = atLeast(10000);
     LeafReader reader1 = null;
     for (int i = 0; i < numDocs; ++i) {
-      Document d = new Document();
+      Document d = w.newDocument();
       addRandomFields(d);
       w.addDocument(d);
       if (i == 100) {
@@ -284,12 +286,11 @@
     // first make a one doc index
     Directory oneDocIndex = newDirectory();
     IndexWriter iw = new IndexWriter(oneDocIndex, new IndexWriterConfig(new MockAnalyzer(random())));
-    Document oneDoc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    Field customField = new Field("field", "contents", customType);
-    oneDoc.add(customField);
-    oneDoc.add(new NumericDocValuesField("field", 5));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    Document oneDoc = iw.newDocument();
+    fieldTypes.enableTermVectors("field");
+    oneDoc.addLargeText("field", "contents");
     iw.addDocument(oneDoc);
     LeafReader oneDocReader = getOnlySegmentReader(DirectoryReader.open(iw, true));
     iw.close();
@@ -365,7 +366,7 @@
     // StoredFieldsFormat
     try (StoredFieldsWriter consumer = codec.storedFieldsFormat().fieldsWriter(dir, segmentInfo, writeState.context)) {
       consumer.startDocument();
-      consumer.writeField(field, customField);
+      consumer.writeField(field, oneDoc.getField("field"));
       consumer.finishDocument();
       consumer.finish(fieldInfos, 1);
       IOUtils.close(consumer);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java
index d94c1c3..e88fe5d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java
@@ -18,7 +18,6 @@
  */
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -52,7 +51,7 @@
     for (int i = 0; i < numSegments; ++i) {
       final int numDocs = TestUtil.nextInt(random(), 1, 5);
       for (int j = 0; j < numDocs; ++j) {
-        writer.addDocument(new Document());
+        writer.addDocument(writer.newDocument());
       }
       writer.getReader().close();
     }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java
index ec13885..5bac88a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseNormsFormatTestCase.java
@@ -22,19 +22,16 @@
 import java.util.List;
 import java.util.Random;
 
-import com.carrotsearch.randomizedtesting.annotations.Seed;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.TestUtil;
+import com.carrotsearch.randomizedtesting.annotations.Seed;
 
 /**
  * Abstract class to do basic tests for a norms format.
@@ -246,16 +243,12 @@
     IndexWriterConfig conf = newIndexWriterConfig(analyzer);
     conf.setSimilarity(new CannedNormSimilarity(norms));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
-    Document doc = new Document();
-    Field idField = new StringField("id", "", Field.Store.NO);
-    Field storedField = newTextField("stored", "", Field.Store.YES);
-    doc.add(idField);
-    doc.add(storedField);
     
     for (int i = 0; i < numDocs; i++) {
-      idField.setStringValue(Integer.toString(i));
+      Document doc = writer.newDocument();
+      doc.addAtom("id", Integer.toString(i));
       long value = norms[i];
-      storedField.setStringValue(Long.toString(value));
+      doc.addLargeText("stored", Long.toString(value));
       writer.addDocument(doc);
       if (random().nextInt(31) == 0) {
         writer.commit();
@@ -277,7 +270,7 @@
       LeafReader r = context.reader();
       NumericDocValues docValues = r.getNormValues("stored");
       for (int i = 0; i < r.maxDoc(); i++) {
-        long storedValue = Long.parseLong(r.document(i).get("stored"));
+        long storedValue = Long.parseLong(r.document(i).getString("stored"));
         assertEquals("doc " + i, storedValue, docValues.get(i));
       }
     }
@@ -291,7 +284,7 @@
       LeafReader r = context.reader();
       NumericDocValues docValues = r.getNormValues("stored");
       for (int i = 0; i < r.maxDoc(); i++) {
-        long storedValue = Long.parseLong(r.document(i).get("stored"));
+        long storedValue = Long.parseLong(r.document(i).getString("stored"));
         assertEquals(storedValue, docValues.get(i));
       }
     }
@@ -333,8 +326,7 @@
   @Override
   protected void addRandomFields(Document doc) {
     // TODO: improve
-    doc.add(new TextField("foobar", TestUtil.randomSimpleString(random()), Field.Store.NO));
-    
+    doc.addLargeText("foobar", TestUtil.randomSimpleString(random()));
   }
 
   @Override
@@ -376,11 +368,11 @@
     int numDocs = atLeast(1000);
     List<Integer> toDelete = new ArrayList<>();
     for(int i=0;i<numDocs;i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", ""+i, Field.Store.NO));
+      Document doc = w.newDocument();
       if (random().nextInt(5) == 1) {
         toDelete.add(i);
-        doc.add(new TextField("content", "some content", Field.Store.NO));
+        doc.addAtom("id", ""+i);
+        doc.addLargeText("content", "some content");
       }
       w.addDocument(doc);
     }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 6024c7e..3077948 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -44,8 +44,7 @@
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
@@ -61,8 +60,8 @@
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.util.automaton.Automaton;
-import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.AutomatonTestUtil.RandomAcceptedStrings;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -1412,8 +1411,10 @@
     IndexWriterConfig iwc = newIndexWriterConfig(null);
     iwc.setCodec(getCodec());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("", "something", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    Document doc = iw.newDocument();
+    doc.addAtom("", "something");
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     LeafReader ar = getOnlySegmentReader(ir);
@@ -1437,15 +1438,17 @@
     IndexWriterConfig iwc = newIndexWriterConfig(null);
     iwc.setCodec(getCodec());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("", "", Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    Document doc = iw.newDocument();
+    doc.addAtom("", "");
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     LeafReader ar = getOnlySegmentReader(ir);
     Fields fields = ar.fields();
     int fieldCount = fields.size();
     // -1 is allowed, if the codec doesn't implement fields.size():
-    assertTrue(fieldCount == 1 || fieldCount == -1);
+    assertTrue("got fieldCount=" + fieldCount, fieldCount == 1 || fieldCount == -1);
     Terms terms = ar.terms("");
     assertNotNull(terms);
     TermsEnum termsEnum = terms.iterator(null);
@@ -1465,9 +1468,11 @@
     iwc.setCodec(getCodec());
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    Document doc = iw.newDocument();
     iw.addDocument(doc);
-    doc.add(newStringField("ghostField", "something", Field.Store.NO));
+    doc.addAtom("ghostField", "something");
     iw.addDocument(doc);
     iw.forceMerge(1);
     iw.deleteDocuments(new Term("ghostField", "something")); // delete the only term for the field
@@ -1690,7 +1695,7 @@
 
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
 
-    LineFileDocs docs = new LineFileDocs(random());
+    LineFileDocs docs = new LineFileDocs(w.w, random());
     int bytesToIndex = atLeast(100) * 1024;
     int bytesIndexed = 0;
     while (bytesIndexed < bytesToIndex) {
@@ -1735,16 +1740,17 @@
 
   @Override
   protected void addRandomFields(Document doc) {
+    FieldTypes fieldTypes = doc.getFieldTypes();
     for (IndexOptions opts : IndexOptions.values()) {
       if (opts == IndexOptions.NONE) {
         continue;
       }
-      FieldType ft = new FieldType();
-      ft.setIndexOptions(opts);
-      ft.freeze();
+      fieldTypes.setIndexOptions("f_" + opts, opts);
+      fieldTypes.disableHighlighting("f_" + opts);
+      fieldTypes.setMultiValued("f_" + opts);
       final int numFields = random().nextInt(5);
       for (int j = 0; j < numFields; ++j) {
-        doc.add(new Field("f_" + opts, TestUtil.randomSimpleString(random(), 2), ft));
+        doc.addLargeText("f_" + opts, TestUtil.randomSimpleString(random(), 2));
       }
     }
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java
index 8a9a51d..d975e3c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseSegmentInfoFormatTestCase.java
@@ -26,7 +26,6 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.StoredField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.StringHelper;
@@ -215,7 +214,7 @@
   
   @Override
   protected void addRandomFields(Document doc) {
-    doc.add(new StoredField("foobar", TestUtil.randomSimpleString(random())));
+    doc.addStoredString("foobar", TestUtil.randomSimpleString(random()));
   }
 
   @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index 375429f..c6fd137 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -34,20 +34,9 @@
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -58,7 +47,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.TestUtil;
-
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import com.carrotsearch.randomizedtesting.generators.RandomStrings;
@@ -73,9 +61,11 @@
 
   @Override
   protected void addRandomFields(Document d) {
+    FieldTypes fieldTypes = d.getFieldTypes();
+    fieldTypes.setMultiValued("f");
     final int numValues = random().nextInt(3);
     for (int i = 0; i < numValues; ++i) {
-      d.add(new StoredField("f", TestUtil.randomSimpleString(random(), 100)));
+      d.addStoredString("f", TestUtil.randomSimpleString(random(), 100));
     }
   }
 
@@ -89,10 +79,6 @@
 
     final List<Integer> fieldIDs = new ArrayList<>();
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setTokenized(false);
-    Field idField = newField("id", "", customType);
-
     for(int i=0;i<fieldCount;i++) {
       fieldIDs.add(i);
     }
@@ -103,13 +89,10 @@
       System.out.println("TEST: build index docCount=" + docCount);
     }
 
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
     for(int i=0;i<docCount;i++) {
-      Document doc = new Document();
-      doc.add(idField);
+      Document doc = w.newDocument();
       final String id = ""+i;
-      idField.setStringValue(id);
+      doc.addAtom("id", id);
       docs.put(id, doc);
       if (VERBOSE) {
         System.out.println("TEST: add doc id=" + id);
@@ -119,7 +102,7 @@
         final String s;
         if (rand.nextInt(4) != 3) {
           s = TestUtil.randomUnicodeString(rand, 1000);
-          doc.add(newField("f"+field, s, customType2));
+          doc.addStoredString("f"+field, s);
         } else {
           s = null;
         }
@@ -161,7 +144,7 @@
           }
           TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
           assertEquals(1, hits.totalHits);
-          StoredDocument doc = r.document(hits.scoreDocs[0].doc);
+          Document doc = r.document(hits.scoreDocs[0].doc);
           Document docExp = docs.get(testID);
           for(int i=0;i<fieldCount;i++) {
             assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i),  doc.get("f"+i));
@@ -179,29 +162,29 @@
   public void testStoredFieldsOrder() throws Throwable {
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
+    Document doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("zzz");
 
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-    doc.add(newField("zzz", "a b c", customType));
-    doc.add(newField("aaa", "a b c", customType));
-    doc.add(newField("zzz", "1 2 3", customType));
+    doc.addStoredString("zzz", "a b c");
+    doc.addStoredString("aaa", "a b c");
+    doc.addStoredString("zzz", "1 2 3");
     w.addDocument(doc);
     IndexReader r = w.getReader();
-    StoredDocument doc2 = r.document(0);
-    Iterator<StorableField> it = doc2.getFields().iterator();
+    Document doc2 = r.document(0);
+    Iterator<IndexableField> it = doc2.iterator();
     assertTrue(it.hasNext());
-    Field f = (Field) it.next();
+    IndexableField f = it.next();
     assertEquals(f.name(), "zzz");
     assertEquals(f.stringValue(), "a b c");
 
     assertTrue(it.hasNext());
-    f = (Field) it.next();
+    f = it.next();
     assertEquals(f.name(), "aaa");
     assertEquals(f.stringValue(), "a b c");
 
     assertTrue(it.hasNext());
-    f = (Field) it.next();
+    f = it.next();
     assertEquals(f.name(), "zzz");
     assertEquals(f.stringValue(), "1 2 3");
     assertFalse(it.hasNext());
@@ -218,20 +201,17 @@
     for(int i=0;i<50;i++)
       b[i] = (byte) (i+77);
 
-    Document doc = new Document();
-    Field f = new StoredField("binary", b, 10, 17);
-    byte[] bx = f.binaryValue().bytes;
-    assertTrue(bx != null);
-    assertEquals(50, bx.length);
-    assertEquals(10, f.binaryValue().offset);
-    assertEquals(17, f.binaryValue().length);
-    doc.add(f);
+    Document doc = w.newDocument();
+    doc.addBinary("binary", new BytesRef(b, 10, 17));
+    BytesRef binaryValue = doc.getBinary("binary");
+    assertEquals(10, binaryValue.offset);
+    assertEquals(17, binaryValue.length);
     w.addDocument(doc);
     w.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    StoredDocument doc2 = ir.document(0);
-    StorableField f2 = doc2.getField("binary");
+    Document doc2 = ir.document(0);
+    IndexableField f2 = doc2.getField("binary");
     b = f2.binaryValue().bytes;
     assertTrue(b != null);
     assertEquals(17, b.length, 17);
@@ -240,86 +220,12 @@
     dir.close();
   }
   
-  public void testNumericField() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    final int numDocs = atLeast(500);
-    final Number[] answers = new Number[numDocs];
-    final NumericType[] typeAnswers = new NumericType[numDocs];
-    for(int id=0;id<numDocs;id++) {
-      Document doc = new Document();
-      final Field nf;
-      final Field sf;
-      final Number answer;
-      final NumericType typeAnswer;
-      if (random().nextBoolean()) {
-        // float/double
-        if (random().nextBoolean()) {
-          final float f = random().nextFloat();
-          answer = Float.valueOf(f);
-          nf = new FloatField("nf", f, Field.Store.NO);
-          sf = new StoredField("nf", f);
-          typeAnswer = NumericType.FLOAT;
-        } else {
-          final double d = random().nextDouble();
-          answer = Double.valueOf(d);
-          nf = new DoubleField("nf", d, Field.Store.NO);
-          sf = new StoredField("nf", d);
-          typeAnswer = NumericType.DOUBLE;
-        }
-      } else {
-        // int/long
-        if (random().nextBoolean()) {
-          final int i = random().nextInt();
-          answer = Integer.valueOf(i);
-          nf = new IntField("nf", i, Field.Store.NO);
-          sf = new StoredField("nf", i);
-          typeAnswer = NumericType.INT;
-        } else {
-          final long l = random().nextLong();
-          answer = Long.valueOf(l);
-          nf = new LongField("nf", l, Field.Store.NO);
-          sf = new StoredField("nf", l);
-          typeAnswer = NumericType.LONG;
-        }
-      }
-      doc.add(nf);
-      doc.add(sf);
-      answers[id] = answer;
-      typeAnswers[id] = typeAnswer;
-      FieldType ft = new FieldType(IntField.TYPE_STORED);
-      ft.setNumericPrecisionStep(Integer.MAX_VALUE);
-      doc.add(new IntField("id", id, ft));
-      doc.add(new NumericDocValuesField("id", id));
-      w.addDocument(doc);
-    }
-    final DirectoryReader r = w.getReader();
-    w.close();
-    
-    assertEquals(numDocs, r.numDocs());
-
-    for(LeafReaderContext ctx : r.leaves()) {
-      final LeafReader sub = ctx.reader();
-      final NumericDocValues ids = DocValues.getNumeric(sub, "id");
-      for(int docID=0;docID<sub.numDocs();docID++) {
-        final StoredDocument doc = sub.document(docID);
-        final Field f = (Field) doc.getField("nf");
-        assertTrue("got f=" + f, f instanceof StoredField);
-        assertEquals(answers[(int) ids.get(docID)], f.numericValue());
-      }
-    }
-    r.close();
-    dir.close();
-  }
-
   public void testIndexedBit() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType onlyStored = new FieldType();
-    onlyStored.setStored(true);
-    doc.add(new Field("field", "value", onlyStored));
-    doc.add(new StringField("field2", "value", Field.Store.YES));
+    Document doc = w.newDocument();
+    doc.addStoredString("field", "value");
+    doc.addAtom("field2", "value");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     w.close();
@@ -335,10 +241,6 @@
     iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
     
-    FieldType ft = new FieldType();
-    ft.setStored(true);
-    ft.freeze();
-
     final String string = TestUtil.randomSimpleString(random(), 50);
     final byte[] bytes = string.getBytes(StandardCharsets.UTF_8);
     final long l = random().nextBoolean() ? random().nextInt(42) : random().nextLong();
@@ -346,35 +248,45 @@
     final float f = random().nextFloat();
     final double d = random().nextDouble();
 
-    List<Field> fields = Arrays.asList(
-        new Field("bytes", bytes, ft),
-        new Field("string", string, ft),
-        new LongField("long", l, Store.YES),
-        new IntField("int", i, Store.YES),
-        new FloatField("float", f, Store.YES),
-        new DoubleField("double", d, Store.YES)
-    );
-
     for (int k = 0; k < 100; ++k) {
-      Document doc = new Document();
-      for (Field fld : fields) {
-        doc.add(fld);
-      }
+      Document doc = iw.newDocument();
+      doc.addStoredBinary("bytes", bytes);
+      doc.addStoredString("string", string);
+      doc.addInt("int", i);
+      doc.addLong("long", l);
+      doc.addFloat("float", f);
+      doc.addDouble("double", d);
       iw.w.addDocument(doc);
     }
     iw.commit();
 
     final DirectoryReader reader = DirectoryReader.open(dir);
     final int docID = random().nextInt(100);
-    for (Field fld : fields) {
-      String fldName = fld.name();
-      final StoredDocument sDoc = reader.document(docID, Collections.singleton(fldName));
-      final StorableField sField = sDoc.getField(fldName);
-      if (Field.class.equals(fld.getClass())) {
-        assertEquals(fld.binaryValue(), sField.binaryValue());
-        assertEquals(fld.stringValue(), sField.stringValue());
-      } else {
-        assertEquals(fld.numericValue(), sField.numericValue());
+    for (String fldName : new String[] {"bytes", "string", "int", "long", "float", "double"}) {
+      final Document sDoc = reader.document(docID, Collections.singleton(fldName));
+
+      final IndexableField sField = sDoc.getField(fldName);
+      switch (fldName) {
+      case "bytes":
+        assertEquals(new BytesRef(bytes), sField.binaryValue());
+        break;
+      case "string":
+        assertEquals(string, sField.stringValue());
+        break;
+      case "int":
+        assertEquals(i, sField.numericValue().intValue());
+        break;
+      case "long":
+        assertEquals(l, sField.numericValue().longValue());
+        break;
+      case "float":
+        assertEquals(f, sField.numericValue().floatValue(), 0.0f);
+        break;
+      case "double":
+        assertEquals(d, sField.numericValue().doubleValue(), 0.0);
+        break;
+      default:
+        assert false;
       }
     }
     reader.close();
@@ -389,7 +301,7 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
     
     // make sure that the fact that documents might be empty is not a problem
-    final Document emptyDoc = new Document();
+    final Document emptyDoc = iw.newDocument();
     final int numDocs = random().nextBoolean() ? 1 : atLeast(1000);
     for (int i = 0; i < numDocs; ++i) {
       iw.addDocument(emptyDoc);
@@ -397,7 +309,7 @@
     iw.commit();
     final DirectoryReader rd = DirectoryReader.open(dir);
     for (int i = 0; i < numDocs; ++i) {
-      final StoredDocument doc = rd.document(i);
+      final Document doc = rd.document(i);
       assertNotNull(doc);
       assertTrue(doc.getFields().isEmpty());
     }
@@ -414,12 +326,10 @@
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
     
     // make sure the readers are properly cloned
-    final Document doc = new Document();
-    final Field field = new StringField("fld", "", Store.YES);
-    doc.add(field);
     final int numDocs = atLeast(1000);
     for (int i = 0; i < numDocs; ++i) {
-      field.setStringValue("" + i);
+      final Document doc = iw.newDocument();
+      doc.addAtom("fld", "" + i);
       iw.addDocument(doc);
     }
     iw.commit();
@@ -451,7 +361,7 @@
               if (topDocs.totalHits != 1) {
                 throw new IllegalStateException("Expected 1 hit, got " + topDocs.totalHits);
               }
-              final StoredDocument sdoc = rd.document(topDocs.scoreDocs[0].doc);
+              final Document sdoc = rd.document(topDocs.scoreDocs[0].doc);
               if (sdoc == null || sdoc.get("fld") == null) {
                 throw new IllegalStateException("Could not find document " + q);
               }
@@ -500,7 +410,7 @@
     IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
     iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
-    
+    FieldTypes fieldTypes = iw.getFieldTypes();
     final int docCount = atLeast(200);
     final byte[][][] data = new byte [docCount][][];
     for (int i = 0; i < docCount; ++i) {
@@ -517,17 +427,11 @@
       }
     }
 
-    final FieldType type = new FieldType(StringField.TYPE_STORED);
-    type.setIndexOptions(IndexOptions.NONE);
-    type.freeze();
-    IntField id = new IntField("id", 0, Store.YES);
     for (int i = 0; i < data.length; ++i) {
-      Document doc = new Document();
-      doc.add(id);
-      id.setIntValue(i);
+      Document doc = iw.newDocument();
+      doc.addInt("id", i);
       for (int j = 0; j < data[i].length; ++j) {
-        Field f = new Field("bytes" + j, data[i][j], type);
-        doc.add(f);
+        doc.addStoredBinary("bytes" + j, new BytesRef(data[i][j]));
       }
       iw.w.addDocument(doc);
       if (random().nextBoolean() && (i % (data.length / 10) == 0)) {
@@ -547,7 +451,7 @@
     for (int i = 0; i < 10; ++i) {
       final int min = random().nextInt(data.length);
       final int max = min + random().nextInt(20);
-      iw.deleteDocuments(NumericRangeQuery.newIntRange("id", min, max, true, false));
+      iw.deleteDocuments(new ConstantScoreQuery(fieldTypes.newIntRangeFilter("id", min, true, max, false)));
     }
 
     iw.forceMerge(2); // force merges with deletions
@@ -558,7 +462,7 @@
     assertTrue(ir.numDocs() > 0);
     int numDocs = 0;
     for (int i = 0; i < ir.maxDoc(); ++i) {
-      final StoredDocument doc = ir.document(i);
+      final Document doc = ir.document(i);
       if (doc == null) {
         continue;
       }
@@ -567,7 +471,7 @@
       assertEquals(data[docId].length + 1, doc.getFields().size());
       for (int j = 0; j < data[docId].length; ++j) {
         final byte[] arr = data[docId][j];
-        final BytesRef arr2Ref = doc.getBinaryValue("bytes" + j);
+        final BytesRef arr2Ref = doc.getBinary("bytes" + j);
         final byte[] arr2 = Arrays.copyOfRange(arr2Ref.bytes, arr2Ref.offset, arr2Ref.offset + arr2Ref.length);
         assertArrayEquals(arr, arr2);
       }
@@ -625,15 +529,15 @@
     }
     Document[] docs = new Document[numDocs];
     for (int i = 0; i < numDocs; ++i) {
-      Document doc = new Document();
-      doc.add(new StringField("to_delete", random().nextBoolean() ? "yes" : "no", Store.NO));
-      doc.add(new StoredField("id", i));
-      doc.add(new StoredField("i", random().nextInt(50)));
-      doc.add(new StoredField("l", random().nextLong()));
-      doc.add(new StoredField("d", random().nextDouble()));
-      doc.add(new StoredField("f", random().nextFloat()));
-      doc.add(new StoredField("s", RandomPicks.randomFrom(random(), stringValues)));
-      doc.add(new StoredField("b", new BytesRef(RandomPicks.randomFrom(random(), stringValues))));
+      Document doc = w.newDocument();
+      doc.addAtom("to_delete", random().nextBoolean() ? "yes" : "no");
+      doc.addStoredInt("id", i);
+      doc.addStoredInt("i", random().nextInt(50));
+      doc.addStoredLong("l", random().nextLong());
+      doc.addStoredDouble("d", random().nextDouble());
+      doc.addStoredFloat("f", random().nextFloat());
+      doc.addStoredString("s", RandomPicks.randomFrom(random(), stringValues));
+      doc.addStoredBinary("b", new BytesRef(RandomPicks.randomFrom(random(), stringValues)));
       docs[i] = doc;
       w.addDocument(doc);
     }
@@ -653,7 +557,7 @@
 
     reader = w.getReader();
     for (int i = 0; i < reader.maxDoc(); ++i) {
-      final StoredDocument doc = reader.document(i);
+      final Document doc = reader.document(i);
       final int id = doc.getField("id").numericValue().intValue();
       final Document expected = docs[id];
       assertEquals(expected.get("s"), doc.get("s"));
@@ -680,43 +584,31 @@
     IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
     iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("fld");
 
     if (dir instanceof MockDirectoryWrapper) {
       ((MockDirectoryWrapper) dir).setThrottling(Throttling.NEVER);
     }
-
-    final Document emptyDoc = new Document(); // emptyDoc
-    final Document bigDoc1 = new Document(); // lot of small fields
-    final Document bigDoc2 = new Document(); // 1 very big field
-
-    final Field idField = new StringField("id", "", Store.NO);
-    emptyDoc.add(idField);
-    bigDoc1.add(idField);
-    bigDoc2.add(idField);
-
-    final FieldType onlyStored = new FieldType(StringField.TYPE_STORED);
-    onlyStored.setIndexOptions(IndexOptions.NONE);
-
-    final Field smallField = new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored);
     final int numFields = RandomInts.randomIntBetween(random(), 500000, 1000000);
-    for (int i = 0; i < numFields; ++i) {
-      bigDoc1.add(smallField);
-    }
-
-    final Field bigField = new Field("fld", randomByteArray(RandomInts.randomIntBetween(random(), 1000000, 5000000), 2), onlyStored);
-    bigDoc2.add(bigField);
-
     final int numDocs = atLeast(5);
-    final Document[] docs = new Document[numDocs];
+    Document[] docs = new Document[numDocs];
     for (int i = 0; i < numDocs; ++i) {
-      docs[i] = RandomPicks.randomFrom(random(), Arrays.asList(emptyDoc, bigDoc1, bigDoc2));
-    }
-    for (int i = 0; i < numDocs; ++i) {
-      idField.setStringValue("" + i);
-      iw.addDocument(docs[i]);
+      Document doc = iw.newDocument();
+      int x = random().nextInt(3);
+      doc.addAtom("id", "" + i);
+      if (x == 1) {
+        for (int j = 0; j < numFields; ++j) {
+          doc.addStoredBinary("fld", randomByteArray(random().nextInt(10), 256));
+        }
+      } else {
+        doc.addStoredBinary("fld", randomByteArray(RandomInts.randomIntBetween(random(), 1000000, 5000000), 2));
+      }
+      iw.addDocument(doc);
       if (random().nextInt(numDocs) == 0) {
         iw.commit();
       }
+      docs[i] = doc;
     }
     iw.commit();
     iw.forceMerge(1); // look at what happens when big docs are merged
@@ -726,12 +618,12 @@
       final Query query = new TermQuery(new Term("id", "" + i));
       final TopDocs topDocs = searcher.search(query, 1);
       assertEquals("" + i, 1, topDocs.totalHits);
-      final StoredDocument doc = rd.document(topDocs.scoreDocs[0].doc);
+      final Document doc = rd.document(topDocs.scoreDocs[0].doc);
       assertNotNull(doc);
-      final StorableField[] fieldValues = doc.getFields("fld");
-      assertEquals(docs[i].getFields("fld").length, fieldValues.length);
-      if (fieldValues.length > 0) {
-        assertEquals(docs[i].getFields("fld")[0].binaryValue(), fieldValues[0].binaryValue());
+      final List<IndexableField> fieldValues = doc.getFields("fld");
+      assertEquals(docs[i].getFields("fld").size(), fieldValues.size());
+      if (fieldValues.size() > 0) {
+        assertEquals(docs[i].getFields("fld").get(0).binaryValue(), fieldValues.get(0).binaryValue());
       }
     }
     rd.close();
@@ -744,9 +636,9 @@
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));
     for (int i = 0; i < numDocs; ++i) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Store.YES));
-      doc.add(new StoredField("f", TestUtil.randomSimpleString(random())));
+      Document doc = w.newDocument();
+      doc.addAtom("id", Integer.toString(i));
+      doc.addAtom("f", TestUtil.randomSimpleString(random()));
       w.addDocument(doc);
     }
     final int deleteCount = TestUtil.nextInt(random(), 5, numDocs);
@@ -771,10 +663,10 @@
       Directory dir = newDirectory();
       IndexWriterConfig iwc = new IndexWriterConfig(null);
       IndexWriter iw = new IndexWriter(dir, iwc);
-      Document doc = new Document();
+      Document doc = iw.newDocument();
       for (int j = 0; j < 10; j++) {
         // add fields where name=value (e.g. 3=3) so we can detect if stuff gets screwed up.
-        doc.add(new StringField(Integer.toString(j), Integer.toString(j), Field.Store.YES));
+        doc.addAtom(Integer.toString(j), Integer.toString(j));
       }
       for (int j = 0; j < 10; j++) {
         iw.addDocument(doc);
@@ -801,10 +693,10 @@
     
     LeafReader ir = getOnlySegmentReader(DirectoryReader.open(iw, true));
     for (int i = 0; i < ir.maxDoc(); i++) {
-      StoredDocument doc = ir.document(i);
+      Document doc = ir.document(i);
       assertEquals(10, doc.getFields().size());
       for (int j = 0; j < 10; j++) {
-        assertEquals(Integer.toString(j), doc.get(Integer.toString(j)));
+        assertEquals(Integer.toString(j), doc.getString(Integer.toString(j)));
       }
     }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
index 4f9f958..2d8e937 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
@@ -23,6 +23,7 @@
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
@@ -35,11 +36,7 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
@@ -48,7 +45,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.TestUtil;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 /**
@@ -85,23 +81,15 @@
     return RandomPicks.randomFrom(random(), new ArrayList<>(validOptions()));
   }
 
-  protected FieldType fieldType(Options options) {
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(options.positions);
-    ft.setStoreTermVectorOffsets(options.offsets);
-    ft.setStoreTermVectorPayloads(options.payloads);
-    ft.freeze();
-    return ft;
-  }
-
   @Override
   protected void addRandomFields(Document doc) {
     for (Options opts : validOptions()) {
-      FieldType ft = fieldType(opts);
       final int numFields = random().nextInt(5);
       for (int j = 0; j < numFields; ++j) {
-        doc.add(new Field("f_" + opts, TestUtil.randomSimpleString(random(), 2), ft));
+        LowSchemaField field = new LowSchemaField(doc.getFieldTypes().getIndexAnalyzer(),
+                                                  "f_" + opts, TestUtil.randomSimpleString(random(), 2), IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        field.enableTermVectors(opts.positions, opts.offsets, opts.payloads);
+        doc.add(field);
       }
     }
   }
@@ -327,7 +315,7 @@
   protected class RandomDocument {
 
     private final String[] fieldNames;
-    private final FieldType[] fieldTypes;
+    private final Options options;
     private final RandomTokenStream[] tokenStreams;
 
     protected RandomDocument(int fieldCount, int maxTermCount, Options options, String[] fieldNames, String[] sampleTerms, BytesRef[] sampleTermBytes) {
@@ -335,9 +323,8 @@
         throw new IllegalArgumentException();
       }
       this.fieldNames = new String[fieldCount];
-      fieldTypes = new FieldType[fieldCount];
       tokenStreams = new RandomTokenStream[fieldCount];
-      Arrays.fill(fieldTypes, fieldType(options));
+      this.options = options;
       final Set<String> usedFileNames = new HashSet<>();
       for (int i = 0; i < fieldCount; ++i) {
         do {
@@ -348,10 +335,15 @@
       }
     }
 
-    public Document toDocument() {
-      final Document doc = new Document();
+    public Document toDocument(IndexWriter w) {
+      final Document doc = w.newDocument();
       for (int i = 0; i < fieldNames.length; ++i) {
-        doc.add(new Field(fieldNames[i], tokenStreams[i], fieldTypes[i]));
+        LowSchemaField field = new LowSchemaField(doc.getFieldTypes().getIndexAnalyzer(),
+                                                  fieldNames[i], null, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        field.doNotStore();
+        field.setTokenStream(tokenStreams[i]);
+        field.enableTermVectors(options.positions, options.offsets, options.payloads);
+        doc.add(field);
       }
       return doc;
     }
@@ -400,7 +392,7 @@
     assertEquals(fields1, fields2);
 
     for (int i = 0; i < doc.fieldNames.length; ++i) {
-      assertEquals(doc.tokenStreams[i], doc.fieldTypes[i], fields.terms(doc.fieldNames[i]));
+      assertEquals(doc.tokenStreams[i], doc.options, fields.terms(doc.fieldNames[i]));
     }
   }
 
@@ -417,14 +409,14 @@
   private final ThreadLocal<DocsEnum> docsEnum = new ThreadLocal<>();
   private final ThreadLocal<DocsAndPositionsEnum> docsAndPositionsEnum = new ThreadLocal<>();
 
-  protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
+  protected void assertEquals(RandomTokenStream tk, Options options, Terms terms) throws IOException {
     assertEquals(1, terms.getDocCount());
     final int termCount = new HashSet<>(Arrays.asList(tk.terms)).size();
     assertEquals(termCount, terms.size());
     assertEquals(termCount, terms.getSumDocFreq());
-    assertEquals(ft.storeTermVectorPositions(), terms.hasPositions());
-    assertEquals(ft.storeTermVectorOffsets(), terms.hasOffsets());
-    assertEquals(ft.storeTermVectorPayloads() && tk.hasPayloads(), terms.hasPayloads());
+    assertEquals(options.positions, terms.hasPositions());
+    assertEquals(options.offsets, terms.hasOffsets());
+    assertEquals(options.payloads && tk.hasPayloads(), terms.hasPayloads());
     final Set<BytesRef> uniqueTerms = new HashSet<>();
     for (String term : tk.freqs.keySet()) {
       uniqueTerms.add(new BytesRef(term));
@@ -454,14 +446,14 @@
 
       bits.clear(0);
       DocsAndPositionsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsAndPositionsEnum.get());
-      assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
+      assertEquals(options.offsets || options.positions, docsAndPositionsEnum != null);
       if (docsAndPositionsEnum != null) {
         assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
       }
       bits.set(0);
 
       docsAndPositionsEnum = termsEnum.docsAndPositions(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum);
-      assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
+      assertEquals(options.offsets || options.positions, docsAndPositionsEnum != null);
       if (terms.hasPositions() || terms.hasOffsets()) {
         assertEquals(0, docsAndPositionsEnum.nextDoc());
         final int freq = docsAndPositionsEnum.freq();
@@ -530,7 +522,7 @@
   }
 
   protected Document addId(Document doc, String id) {
-    doc.add(new StringField("id", id, Store.NO));
+    doc.addAtom("id", id);
     return doc;
   }
 
@@ -544,13 +536,13 @@
     for (Options options : validOptions()) {
       final int numDocs = atLeast(200);
       final int docWithVectors = random().nextInt(numDocs);
-      final Document emptyDoc = new Document();
       final Directory dir = newDirectory();
       final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+      final Document emptyDoc = writer.newDocument();
       final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), 20, options);
       for (int i = 0; i < numDocs; ++i) {
         if (i == docWithVectors) {
-          writer.addDocument(addId(doc.toDocument(), "42"));
+          writer.addDocument(addId(doc.toDocument(writer.w), "42"));
         } else {
           writer.addDocument(emptyDoc);
         }
@@ -583,7 +575,7 @@
       final Directory dir = newDirectory();
       final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
       final RandomDocument doc = docFactory.newDocument(TestUtil.nextInt(random(), 1, 2), atLeast(20000), options);
-      writer.addDocument(doc.toDocument());
+      writer.addDocument(doc.toDocument(writer.w));
       final IndexReader reader = writer.getReader();
       assertEquals(doc, reader.getTermVectors(0));
       reader.close();
@@ -598,7 +590,7 @@
       final Directory dir = newDirectory();
       final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
       final RandomDocument doc = docFactory.newDocument(atLeast(100), 5, options);
-      writer.addDocument(doc.toDocument());
+      writer.addDocument(doc.toDocument(writer.w));
       final IndexReader reader = writer.getReader();
       assertEquals(doc, reader.getTermVectors(0));
       reader.close();
@@ -619,9 +611,9 @@
         final Directory dir = newDirectory();
         final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
         final RandomDocument doc1 = docFactory.newDocument(numFields, 20, options1);
-        final RandomDocument doc2 = docFactory.newDocument(numFields, 20,  options2);
-        writer.addDocument(addId(doc1.toDocument(), "1"));
-        writer.addDocument(addId(doc2.toDocument(), "2"));
+        final RandomDocument doc2 = docFactory.newDocument(numFields, 20, options2);
+        writer.addDocument(addId(doc1.toDocument(writer.w), "1"));
+        writer.addDocument(addId(doc2.toDocument(writer.w), "2"));
         final IndexReader reader = writer.getReader();
         final int doc1ID = docID(reader, "1");
         assertEquals(doc1, reader.getTermVectors(doc1ID));
@@ -644,7 +636,7 @@
     final Directory dir = newDirectory();
     final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < numDocs; ++i) {
-      writer.addDocument(addId(docs[i].toDocument(), ""+i));
+      writer.addDocument(addId(docs[i].toDocument(writer.w), ""+i));
     }
     final IndexReader reader = writer.getReader();
     for (int i = 0; i < numDocs; ++i) {
@@ -672,7 +664,7 @@
       final Directory dir = newDirectory();
       final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
       for (int i = 0; i < numDocs; ++i) {
-        writer.addDocument(addId(docs[i].toDocument(), ""+i));
+        writer.addDocument(addId(docs[i].toDocument(writer.w), ""+i));
         if (rarely()) {
           writer.commit();
         }
@@ -708,7 +700,7 @@
       final Directory dir = newDirectory();
       final RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
       for (int i = 0; i < numDocs; ++i) {
-        writer.addDocument(addId(docs[i].toDocument(), ""+i));
+        writer.addDocument(addId(docs[i].toDocument(writer.w), ""+i));
       }
       final IndexReader reader = writer.getReader();
       for (int i = 0; i < numDocs; ++i) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
index 39ef70f..560c9c7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
@@ -19,211 +19,86 @@
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 
 class DocHelper {
-  
-  public static final FieldType customType;
+
   public static final String FIELD_1_TEXT = "field one text";
   public static final String TEXT_FIELD_1_KEY = "textField1";
-  public static Field textField1;
-  static {
-    customType = new FieldType(TextField.TYPE_STORED);
-    textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT, customType);
-  }
 
-  public static final FieldType customType2;
   public static final String FIELD_2_TEXT = "field field field two text";
   //Fields will be lexicographically sorted.  So, the order is: field, text, two
   public static final int [] FIELD_2_FREQS = {3, 1, 1}; 
   public static final String TEXT_FIELD_2_KEY = "textField2";
-  public static Field textField2;
-  static {
-    customType2 = new FieldType(TextField.TYPE_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, customType2);
-  }
   
-  public static final FieldType customType3;
   public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms";
   public static final String TEXT_FIELD_3_KEY = "textField3";
-  public static Field textField3;
   
-  static {
-    customType3 = new FieldType(TextField.TYPE_STORED);
-    customType3.setOmitNorms(true);
-    textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, customType3);
-  }
-
   public static final String KEYWORD_TEXT = "Keyword";
   public static final String KEYWORD_FIELD_KEY = "keyField";
-  public static Field keyField;
-  static {
-    keyField = new StringField(KEYWORD_FIELD_KEY, KEYWORD_TEXT, Field.Store.YES);
-  }
 
-  public static final FieldType customType5;
   public static final String NO_NORMS_TEXT = "omitNormsText";
   public static final String NO_NORMS_KEY = "omitNorms";
-  public static Field noNormsField;
-  static {
-    customType5 = new FieldType(TextField.TYPE_STORED);
-    customType5.setOmitNorms(true);
-    customType5.setTokenized(false);
-    noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT, customType5);
-  }
 
-  public static final FieldType customType6;
   public static final String NO_TF_TEXT = "analyzed with no tf and positions";
   public static final String NO_TF_KEY = "omitTermFreqAndPositions";
-  public static Field noTFField;
-  static {
-    customType6 = new FieldType(TextField.TYPE_STORED);
-    customType6.setIndexOptions(IndexOptions.DOCS);
-    noTFField = new Field(NO_TF_KEY, NO_TF_TEXT, customType6);
-  }
 
-  public static final FieldType customType7;
   public static final String UNINDEXED_FIELD_TEXT = "unindexed field text";
   public static final String UNINDEXED_FIELD_KEY = "unIndField";
-  public static Field unIndField;
-  static {
-    customType7 = new FieldType();
-    customType7.setStored(true);
-    unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT, customType7);
-  }
-
 
   public static final String UNSTORED_1_FIELD_TEXT = "unstored field text";
   public static final String UNSTORED_FIELD_1_KEY = "unStoredField1";
-  public static Field unStoredField1 = new TextField(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, Field.Store.NO);
 
-  public static final FieldType customType8;
   public static final String UNSTORED_2_FIELD_TEXT = "unstored field text";
   public static final String UNSTORED_FIELD_2_KEY = "unStoredField2";
-  public static Field unStoredField2;
-  static {
-    customType8 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType8.setStoreTermVectors(true);
-    unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT, customType8);
-  }
 
   public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary";
-  public static byte [] LAZY_FIELD_BINARY_BYTES;
-  public static Field lazyFieldBinary;
+  public static final BytesRef LAZY_FIELD_BINARY_BYTES = new BytesRef("These are some binary field bytes");
 
   public static final String LAZY_FIELD_KEY = "lazyField";
   public static final String LAZY_FIELD_TEXT = "These are some field bytes";
-  public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, customType);
   
   public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField";
   public static String LARGE_LAZY_FIELD_TEXT;
-  public static Field largeLazyField;
   
   //From Issue 509
   public static final String FIELD_UTF1_TEXT = "field one \u4e00text";
   public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8";
-  public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, customType);
 
   public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text";
   //Fields will be lexicographically sorted.  So, the order is: field, text, two
   public static final int [] FIELD_UTF2_FREQS = {3, 1, 1};
   public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8";
-  public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, customType2);
  
-  
-  
-  
   public static Map<String,Object> nameValues = null;
 
-  // ordered list of all the fields...
-  // could use LinkedHashMap for this purpose if Java1.4 is OK
-  public static Field[] fields = new Field[] {
-    textField1,
-    textField2,
-    textField3,
-    keyField,
-    noNormsField,
-    noTFField,
-    unIndField,
-    unStoredField1,
-    unStoredField2,
-    textUtfField1,
-    textUtfField2,
-    lazyField,
-    lazyFieldBinary,//placeholder for binary field, since this is null.  It must be second to last.
-    largeLazyField//placeholder for large field, since this is null.  It must always be last
-  };
-
-  public static Map<String,IndexableField> all     =new HashMap<>();
-  public static Map<String,IndexableField> indexed =new HashMap<>();
-  public static Map<String,IndexableField> stored  =new HashMap<>();
-  public static Map<String,IndexableField> unstored=new HashMap<>();
-  public static Map<String,IndexableField> unindexed=new HashMap<>();
-  public static Map<String,IndexableField> termvector=new HashMap<>();
-  public static Map<String,IndexableField> notermvector=new HashMap<>();
-  public static Map<String,IndexableField> lazy= new HashMap<>();
-  public static Map<String,IndexableField> noNorms=new HashMap<>();
-  public static Map<String,IndexableField> noTf=new HashMap<>();
-
   static {
     //Initialize the large Lazy Field
     StringBuilder buffer = new StringBuilder();
-    for (int i = 0; i < 10000; i++)
-    {
+    for (int i = 0; i < 10000; i++) {
       buffer.append("Lazily loading lengths of language in lieu of laughing ");
     }
     
-    try {
-      LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8");
-    } catch (UnsupportedEncodingException e) {
-    }
-    lazyFieldBinary = new StoredField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
-    fields[fields.length - 2] = lazyFieldBinary;
     LARGE_LAZY_FIELD_TEXT = buffer.toString();
-    largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, customType);
-    fields[fields.length - 1] = largeLazyField;
-    for (int i=0; i<fields.length; i++) {
-      IndexableField f = fields[i];
-      add(all,f);
-      if (f.fieldType().indexOptions() != IndexOptions.NONE) add(indexed,f);
-      else add(unindexed,f);
-      if (f.fieldType().storeTermVectors()) add(termvector,f);
-      if (f.fieldType().indexOptions() != IndexOptions.NONE && !f.fieldType().storeTermVectors()) add(notermvector,f);
-      if (f.fieldType().stored()) add(stored,f);
-      else add(unstored,f);
-      if (f.fieldType().indexOptions() == IndexOptions.DOCS) add(noTf,f);
-      if (f.fieldType().omitNorms()) add(noNorms,f);
-      if (f.fieldType().indexOptions() == IndexOptions.DOCS) add(noTf,f);
-      //if (f.isLazy()) add(lazy, f);
-    }
   }
 
-
-  private static void add(Map<String,IndexableField> map, IndexableField field) {
-    map.put(field.name(), field);
-  }
-
-
-  static
-  {
+  static {
     nameValues = new HashMap<>();
     nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
     nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
@@ -239,26 +114,132 @@
     nameValues.put(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT);
     nameValues.put(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT);
     nameValues.put(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT);
-  }   
-  
+  }
+
   /**
    * Adds the fields above to a document 
    * @param doc The document to write
    */ 
-  public static void setupDoc(Document doc) {
-    for (int i=0; i<fields.length; i++) {
-      doc.add(fields[i]);
-    }
+  private static void setupDoc(FieldTypes fieldTypes, Document doc) {
+
+    fieldTypes.enableTermVectors(TEXT_FIELD_2_KEY);
+    fieldTypes.disableHighlighting(TEXT_FIELD_2_KEY);
+    fieldTypes.enableTermVectorPositions(TEXT_FIELD_2_KEY);
+    fieldTypes.enableTermVectorOffsets(TEXT_FIELD_2_KEY);
+
+    fieldTypes.enableTermVectors(TEXT_FIELD_UTF2_KEY);
+    fieldTypes.disableHighlighting(TEXT_FIELD_UTF2_KEY);
+    fieldTypes.enableTermVectorPositions(TEXT_FIELD_UTF2_KEY);
+    fieldTypes.enableTermVectorOffsets(TEXT_FIELD_UTF2_KEY);
+
+    fieldTypes.disableHighlighting(TEXT_FIELD_3_KEY);
+    fieldTypes.disableNorms(TEXT_FIELD_3_KEY);
+
+    fieldTypes.disableHighlighting(NO_NORMS_KEY);
+    fieldTypes.setIndexOptions(NO_NORMS_KEY, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.disableNorms(NO_NORMS_KEY);
+
+    // Otherwise large text:
+    fieldTypes.disableHighlighting(NO_TF_KEY);
+    fieldTypes.setIndexOptions(NO_TF_KEY, IndexOptions.DOCS);
+
+    // Otherwise large text:
+    fieldTypes.disableStored(UNSTORED_FIELD_1_KEY);
+
+    // Otherwise large text:
+    fieldTypes.enableTermVectors(UNSTORED_FIELD_2_KEY);
+    fieldTypes.disableStored(UNSTORED_FIELD_2_KEY);
+
+    doc.addLargeText(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
+    doc.addLargeText(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
+    doc.addLargeText(TEXT_FIELD_3_KEY, FIELD_3_TEXT);
+    doc.addAtom(KEYWORD_FIELD_KEY, KEYWORD_TEXT);
+    doc.addLargeText(NO_NORMS_KEY, NO_NORMS_TEXT);
+    doc.addLargeText(NO_TF_KEY, NO_TF_TEXT);
+    doc.addStoredString(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT);
+    doc.addLargeText(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT);
+    doc.addLargeText(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT);
+    doc.addLargeText(LAZY_FIELD_KEY, LAZY_FIELD_TEXT);
+    doc.addStoredBinary(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
+    doc.addLargeText(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT);
+    doc.addLargeText(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT);
+    doc.addLargeText(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT);
   }                         
 
+  public static Set<String> getUnstored(FieldTypes fieldTypes) {
+    Set<String> unstored = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getStored(fieldName) == false) {
+        unstored.add(fieldName);
+      }
+    }
+    return unstored;
+  }
+
+  public static Set<String> getIndexed(FieldTypes fieldTypes) {
+    Set<String> indexed = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getIndexOptions(fieldName) != IndexOptions.NONE) {
+        indexed.add(fieldName);
+      }
+    }
+    return indexed;
+  }
+
+  public static Set<String> getNotIndexed(FieldTypes fieldTypes) {
+    Set<String> notIndexed = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getIndexOptions(fieldName) == IndexOptions.NONE) {
+        notIndexed.add(fieldName);
+      }
+    }
+    return notIndexed;
+  }
+
+  public static Set<String> getTermVectorFields(FieldTypes fieldTypes) {
+    Set<String> tvFields = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getTermVectors(fieldName)) {
+        tvFields.add(fieldName);
+      }
+    }
+    return tvFields;
+  }
+
+  public static Set<String> getNoTermVectorFields(FieldTypes fieldTypes) {
+    Set<String> noTVFields = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getIndexOptions(fieldName) != IndexOptions.NONE && fieldTypes.getTermVectors(fieldName) == false) {
+        noTVFields.add(fieldName);
+      }
+    }
+    return noTVFields;
+  }
+
+  public static Set<String> getNoNorms(FieldTypes fieldTypes) {
+    Set<String> noNorms = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      if (fieldTypes.getNorms(fieldName) == false) {
+        noNorms.add(fieldName);
+      }
+    }
+    return noNorms;
+  }
+
+  public static Set<String> getAll(FieldTypes fieldTypes) {
+    Set<String> all = new HashSet<>();
+    for(String fieldName : fieldTypes.getFieldNames()) {
+      all.add(fieldName);
+    }
+    return all;
+  }
+
   /**
-   * Writes the document to the directory using a segment
-   * named "test"; returns the SegmentInfo describing the new
-   * segment 
+   * Writes our test document to the directory and returns the SegmentCommitInfo
+   * describing the new segment 
    */ 
-  public static SegmentCommitInfo writeDoc(Random random, Directory dir, Document doc) throws IOException
-  {
-    return writeDoc(random, dir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), null, doc);
+  public static SegmentCommitInfo writeDoc(Random random, Directory dir) throws IOException {
+    return writeDoc(random, dir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), null);
   }
 
   /**
@@ -266,10 +247,11 @@
    * and the similarity score; returns the SegmentInfo
    * describing the new segment
    */ 
-  public static SegmentCommitInfo writeDoc(Random random, Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException {
+  public static SegmentCommitInfo writeDoc(Random random, Directory dir, Analyzer analyzer, Similarity similarity) throws IOException {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( /* LuceneTestCase.newIndexWriterConfig(random, */ 
         analyzer).setSimilarity(similarity == null ? IndexSearcher.getDefaultSimilarity() : similarity));
-    //writer.setNoCFSRatio(0.0);
+    Document doc = writer.newDocument();
+    setupDoc(writer.getFieldTypes(), doc);
     writer.addDocument(doc);
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
@@ -277,36 +259,37 @@
     return info;
   }
 
+  public static int numFields() {
+    return 14;
+  }
+
   public static int numFields(Document doc) {
     return doc.getFields().size();
   }
-
-  public static int numFields(StoredDocument doc) {
-    return doc.getFields().size();
-  }
   
-  public static Document createDocument(int n, String indexName, int numFields) {
+  public static Document createDocument(IndexWriter writer, int n, String indexName, int numFields) {
     StringBuilder sb = new StringBuilder();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
 
-    FieldType customType1 = new FieldType(StringField.TYPE_STORED);
-    customType1.setStoreTermVectors(true);
-    customType1.setStoreTermVectorPositions(true);
-    customType1.setStoreTermVectorOffsets(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(int i=0;i<numFields+1;i++) {
+      fieldTypes.enableTermVectors("field" + (i+1));
+      fieldTypes.enableTermVectorPositions("field" + (i+1));
+      fieldTypes.enableTermVectorOffsets("field" + (i+1));
+    }
+    fieldTypes.enableTermVectors("id");
+    fieldTypes.enableTermVectorPositions("id");
+    fieldTypes.enableTermVectorOffsets("id");
 
-    final Document doc = new Document();
-    doc.add(new Field("id", Integer.toString(n), customType1));
-    doc.add(new Field("indexname", indexName, customType1));
+    Document doc = writer.newDocument();
+    doc.addAtom("id", Integer.toString(n));
+    doc.addAtom("indexname", indexName);
     sb.append("a");
     sb.append(n);
-    doc.add(new Field("field1", sb.toString(), customType));
+    doc.addLargeText("field1", sb.toString());
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(new Field("field" + (i + 1), sb.toString(), customType));
+      doc.addLargeText("field" + (i+1), sb.toString());
     }
     return doc;
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
index 74f2936..dd9e994 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
@@ -19,9 +19,11 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FilterIterator;
 
@@ -34,18 +36,22 @@
   private final Set<String> fields;
   private final boolean negate;
   private final FieldInfos fieldInfos;
+  private final FieldTypes fieldTypes;
 
   public FieldFilterLeafReader(LeafReader in, Set<String> fields, boolean negate) {
     super(in);
     this.fields = fields;
     this.negate = negate;
     ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
+    Set<String> actualFields = new HashSet<>();
     for (FieldInfo fi : in.getFieldInfos()) {
       if (hasField(fi.name)) {
-        filteredInfos.add(fi);
+        filteredInfos.add(fi);  
+        actualFields.add(fi.name);
       }
     }
     fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
+    fieldTypes = new FieldTypes(in.getFieldTypes(), actualFields);
   }
   
   boolean hasField(String field) {
@@ -58,6 +64,11 @@
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return fieldTypes;
+  }
+
+  @Override
   public Fields getTermVectors(int docID) throws IOException {
     Fields f = super.getTermVectors(docID);
     if (f == null) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index 19c3fc4..fa0f466 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -25,8 +25,10 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.InfoStream;
@@ -97,23 +99,31 @@
     // any forced merges:
     doRandomForceMerge = !(c.getMergePolicy() instanceof NoMergePolicy) && r.nextBoolean();
   } 
-  
+
+  public FieldTypes getFieldTypes() {
+    return w.getFieldTypes();
+  }
+
+  public Document newDocument() {
+    return w.newDocument();
+  }
+
   /**
    * Adds a Document.
    * @see IndexWriter#addDocument(org.apache.lucene.index.IndexDocument)
    */
-  public <T extends IndexableField> void addDocument(final IndexDocument doc) throws IOException {
+  public <T extends IndexableField> void addDocument(final Iterable<T> doc) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     if (r.nextInt(5) == 3) {
       // TODO: maybe, we should simply buffer up added docs
       // (but we need to clone them), and only when
       // getReader, commit, etc. are called, we do an
       // addDocuments?  Would be better testing.
-      w.addDocuments(new Iterable<IndexDocument>() {
+      w.addDocuments(new Iterable<Iterable<T>>() {
 
         @Override
-        public Iterator<IndexDocument> iterator() {
-          return new Iterator<IndexDocument>() {
+        public Iterator<Iterable<T>> iterator() {
+          return new Iterator<Iterable<T>>() {
             boolean done;
             
             @Override
@@ -127,7 +137,7 @@
             }
 
             @Override
-            public IndexDocument next() {
+            public Iterable<T> next() {
               if (done) {
                 throw new IllegalStateException();
               }
@@ -159,13 +169,13 @@
     }
   }
   
-  public void addDocuments(Iterable<? extends IndexDocument> docs) throws IOException {
+  public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     w.addDocuments(docs);
     maybeCommit();
   }
 
-  public void updateDocuments(Term delTerm, Iterable<? extends IndexDocument> docs) throws IOException {
+  public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     w.updateDocuments(delTerm, docs);
     maybeCommit();
@@ -175,14 +185,14 @@
    * Updates a document.
    * @see IndexWriter#updateDocument(Term, org.apache.lucene.index.IndexDocument)
    */
-  public <T extends IndexableField> void updateDocument(Term t, final IndexDocument doc) throws IOException {
+  public <T extends IndexableField> void updateDocument(Term t, final Iterable<T> doc) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     if (r.nextInt(5) == 3) {
-      w.updateDocuments(t, new Iterable<IndexDocument>() {
+      w.updateDocuments(t, new Iterable<Iterable<T>>() {
 
         @Override
-        public Iterator<IndexDocument> iterator() {
-          return new Iterator<IndexDocument>() {
+        public Iterator<Iterable<T>> iterator() {
+          return new Iterator<Iterable<T>>() {
             boolean done;
             
             @Override
@@ -196,7 +206,7 @@
             }
 
             @Override
-            public IndexDocument next() {
+            public Iterable<T> next() {
               if (done) {
                 throw new IllegalStateException();
               }
@@ -232,7 +242,7 @@
     w.updateBinaryDocValue(term, field, value);
   }
   
-  public void updateDocValues(Term term, Field... updates) throws IOException {
+  public void updateDocValues(Term term, Iterable<? extends IndexableField> updates) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     w.updateDocValues(term, updates);
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
index ef87bb7..322c5fb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
@@ -28,7 +28,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -91,19 +90,19 @@
     return in;
   }
 
-  protected void updateDocuments(Term id, List<? extends IndexDocument> docs) throws Exception {
+  protected void updateDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
     writer.updateDocuments(id, docs);
   }
 
-  protected void addDocuments(Term id, List<? extends IndexDocument> docs) throws Exception {
+  protected void addDocuments(Term id, List<? extends Iterable<? extends IndexableField>> docs) throws Exception {
     writer.addDocuments(docs);
   }
 
-  protected void addDocument(Term id, IndexDocument doc) throws Exception {
+  protected void addDocument(Term id, Iterable<? extends IndexableField> doc) throws Exception {
     writer.addDocument(doc);
   }
 
-  protected void updateDocument(Term term, IndexDocument doc) throws Exception {
+  protected void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws Exception {
     writer.updateDocument(term, doc);
   }
 
@@ -149,17 +148,10 @@
                 }
 
                 Document doc = docs.nextDoc();
-                if (doc == null) {
-                  break;
-                }
-
+                
                 // Maybe add randomly named field
-                final String addedField;
                 if (random().nextBoolean()) {
-                  addedField = "extra" + random().nextInt(40);
-                  doc.add(newTextField(addedField, "a random field", Field.Store.YES));
-                } else {
-                  addedField = null;
+                  doc.addLargeText("extra" + random().nextInt(40), "a random field");
                 }
 
                 if (random().nextBoolean()) {
@@ -180,24 +172,21 @@
                       packID = packCount.getAndIncrement() + "";
                     }
 
-                    final Field packIDField = newStringField("packID", packID, Field.Store.YES);
                     final List<String> docIDs = new ArrayList<>();
                     final SubDocs subDocs = new SubDocs(packID, docIDs);
                     final List<Document> docsList = new ArrayList<>();
 
                     allSubDocs.add(subDocs);
-                    doc.add(packIDField);
-                    docsList.add(TestUtil.cloneDocument(doc));
-                    docIDs.add(doc.get("docid"));
+                    doc.addAtom("packID", packID);
+                    docsList.add(doc);
+                    docIDs.add(doc.getString("docid"));
 
                     final int maxDocCount = TestUtil.nextInt(random(), 1, 10);
                     while(docsList.size() < maxDocCount) {
                       doc = docs.nextDoc();
-                      if (doc == null) {
-                        break;
-                      }
-                      docsList.add(TestUtil.cloneDocument(doc));
-                      docIDs.add(doc.get("docid"));
+                      doc.addAtom("packID", packID);
+                      docsList.add(doc);
+                      docIDs.add(doc.getString("docid"));
                     }
                     addCount.addAndGet(docsList.size());
 
@@ -217,7 +206,6 @@
                       }
                       addDocuments(packIDTerm, docsList);
                     }
-                    doc.removeField("packID");
 
                     if (random().nextInt(5) == 2) {
                       if (VERBOSE) {
@@ -228,7 +216,7 @@
 
                   } else {
                     // Add single doc
-                    final String docid = doc.get("docid");
+                    final String docid = doc.getString("docid");
                     if (VERBOSE) {
                       System.out.println(Thread.currentThread().getName() + ": add doc docid:" + docid);
                     }
@@ -250,7 +238,7 @@
                   if (VERBOSE) {
                     System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("docid"));
                   }
-                  final String docid = doc.get("docid");
+                  final String docid = doc.getString("docid");
                   updateDocument(new Term("docid", docid), doc);
                   addCount.getAndIncrement();
 
@@ -292,9 +280,6 @@
                   }
                   toDeleteSubDocs.clear();
                 }
-                if (addedField != null) {
-                  doc.removeField(addedField);
-                }
               } catch (Throwable t) {
                 System.out.println(Thread.currentThread().getName() + ": hit exc");
                 t.printStackTrace();
@@ -432,7 +417,6 @@
     final long t0 = System.currentTimeMillis();
 
     Random random = new Random(random().nextLong());
-    final LineFileDocs docs = new LineFileDocs(random, true);
     final Path tempDir = createTempDir(testName);
     dir = getDirectory(newMockFSDirectory(tempDir)); // some subclasses rely on this being MDW
     if (dir instanceof BaseDirectoryWrapper) {
@@ -478,7 +462,7 @@
         final int inc = Math.max(1, maxDoc/50);
         for(int docID=0;docID<maxDoc;docID += inc) {
           if (liveDocs == null || liveDocs.get(docID)) {
-            final StoredDocument doc = reader.document(docID);
+            final Document doc = reader.document(docID);
             sum += doc.getFields().size();
           }
         }
@@ -504,6 +488,7 @@
         });
     }
     writer = new IndexWriter(dir, conf);
+    final LineFileDocs docs = new LineFileDocs(writer, random);
     TestUtil.reduceOpenFiles(writer);
 
     final ExecutorService es = random().nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName));
@@ -589,7 +574,7 @@
               startDocID = docID;
             }
             lastDocID = docID;
-            final StoredDocument doc = s.doc(docID);
+            final Document doc = s.doc(docID);
             assertEquals(subDocs.packID, doc.get("packID"));
           }
 
@@ -616,7 +601,7 @@
 
     // Verify: make sure all not-deleted docs are in fact
     // not deleted:
-    final int endID = Integer.parseInt(docs.nextDoc().get("docid"));
+    final int endID = Integer.parseInt(docs.nextDoc().getString("docid"));
     docs.close();
 
     for(int id=0;id<endID;id++) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java
index 45b8a7d..0466dc0 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/BaseExplanationTestCase.java
@@ -19,8 +19,6 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -31,7 +29,6 @@
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -70,15 +67,12 @@
   @BeforeClass
   public static void beforeClassTestExplanations() throws Exception {
     directory = newDirectory();
-    RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     for (int i = 0; i < docFields.length; i++) {
-      Document doc = new Document();
-      doc.add(newStringField(KEY, ""+i, Field.Store.NO));
-      doc.add(new SortedDocValuesField(KEY, new BytesRef(""+i)));
-      Field f = newTextField(FIELD, docFields[i], Field.Store.NO);
-      f.setBoost(i);
-      doc.add(f);
-      doc.add(newTextField(ALTFIELD, docFields[i], Field.Store.NO));
+      Document doc = writer.newDocument();
+      doc.addAtom(KEY, ""+i);
+      doc.addLargeText(FIELD, docFields[i], (float) i);
+      doc.addLargeText(ALTFIELD, docFields[i]);
       writer.addDocument(doc);
     }
     reader = writer.getReader();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index c1b02ae..55bfa8c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -24,7 +24,6 @@
 import junit.framework.Assert;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.AllDeletedFilterReader;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -137,7 +136,7 @@
     public FCInvisibleMultiReader(IndexReader... readers) {
       super(readers);
     }
-    
+
     @Override
     public Object getCoreCacheKey() {
       return cacheKey;
@@ -203,7 +202,7 @@
     }
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random)));
     for (int i = 0; i < numDocs; i++) {
-      w.addDocument(new Document());
+      w.addDocument(w.newDocument());
     }
     w.forceMerge(1);
     w.close();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java
index 8025f52..ad42928 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java
@@ -24,9 +24,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -60,17 +57,13 @@
     CharacterRunAutomaton stopset = new CharacterRunAutomaton(Automata.makeString(stopword));
     analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopset);
     RandomIndexWriter iw = new RandomIndexWriter(random, directory, analyzer);
-    Document doc = new Document();
-    Field id = new StringField("id", "", Field.Store.NO);
-    Field field = new TextField("field", "", Field.Store.NO);
-    doc.add(id);
-    doc.add(field);
     
     // index some docs
     int numDocs = atLeast(1000);
     for (int i = 0; i < numDocs; i++) {
-      id.setStringValue(Integer.toString(i));
-      field.setStringValue(randomFieldContents());
+      Document doc = iw.newDocument();
+      doc.addUniqueAtom("id", Integer.toString(i));
+      doc.addLargeText("field", randomFieldContents());
       iw.addDocument(doc);
     }
     
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
index 9bb3cd8..1db0cb8 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
@@ -197,6 +197,7 @@
   protected final class NodeState implements Closeable {
     public final Directory dir;
     public final IndexWriter writer;
+    public final LineFileDocs docs;
     public final SearcherLifetimeManager searchers;
     public final SearcherManager mgr;
     public final int myNodeID;
@@ -457,6 +458,7 @@
         iwc.setInfoStream(new PrintStreamInfoStream(System.out));
       }
       writer = new IndexWriter(dir, iwc);
+      docs = new LineFileDocs(writer, random());
       mgr = new SearcherManager(writer, true, null);
       searchers = new SearcherLifetimeManager();
 
@@ -549,17 +551,16 @@
     @Override
     public void run() {
       try {
-        final LineFileDocs docs = new LineFileDocs(random(), true);
         int numDocs = 0;
         while (System.nanoTime() < endTimeNanos) {
           final int what = random().nextInt(3);
           final NodeState node = nodes[random().nextInt(nodes.length)];
           if (numDocs == 0 || what == 0) {
-            node.writer.addDocument(docs.nextDoc());
+            node.writer.addDocument(node.docs.nextDoc());
             numDocs++;
           } else if (what == 1) {
             node.writer.updateDocument(new Term("docid", ""+random().nextInt(numDocs)),
-                                        docs.nextDoc());
+                                       node.docs.nextDoc());
             numDocs++;
           } else {
             node.writer.deleteDocuments(new Term("docid", ""+random().nextInt(numDocs)));
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
index e0d06dc..36771f2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
@@ -35,14 +35,8 @@
 import java.util.zip.GZIPInputStream;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.IndexWriter;
 
 /** Minimal port of benchmark's LneDocSource +
  * DocMaker, so tests can enum docs from a line file created
@@ -53,22 +47,29 @@
   private final static int BUFFER_SIZE = 1 << 16;     // 64K
   private final AtomicInteger id = new AtomicInteger();
   private final String path;
-  private final boolean useDocValues;
+  private IndexWriter w;
 
   /** If forever is true, we rewind the file at EOF (repeat
    * the docs over and over) */
-  public LineFileDocs(Random random, String path, boolean useDocValues) throws IOException {
+  public LineFileDocs(IndexWriter w, Random random, String path) throws IOException {
+    this.w = w;
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("body");
+    fieldTypes.enableTermVectorPositions("body");
+    fieldTypes.enableTermVectorOffsets("body");
+    fieldTypes.disableHighlighting("body");
+    fieldTypes.disableSorting("docid_int");
+
     this.path = path;
-    this.useDocValues = useDocValues;
     open(random);
   }
 
-  public LineFileDocs(Random random) throws IOException {
-    this(random, LuceneTestCase.TEST_LINE_DOCS_FILE, true);
+  public LineFileDocs(IndexWriter w, Random random) throws IOException {
+    this(w, random, LuceneTestCase.TEST_LINE_DOCS_FILE);
   }
 
-  public LineFileDocs(Random random, boolean useDocValues) throws IOException {
-    this(random, LuceneTestCase.TEST_LINE_DOCS_FILE, useDocValues);
+  public void setIndexWriter(IndexWriter w) {
+    this.w = w;
   }
 
   @Override
@@ -155,58 +156,6 @@
 
   private final static char SEP = '\t';
 
-  private static final class DocState {
-    final Document doc;
-    final Field titleTokenized;
-    final Field title;
-    final Field titleDV;
-    final Field body;
-    final Field id;
-    final Field idNum;
-    final Field idNumDV;
-    final Field date;
-
-    public DocState(boolean useDocValues) {
-      doc = new Document();
-      
-      title = new StringField("title", "", Field.Store.NO);
-      doc.add(title);
-
-      FieldType ft = new FieldType(TextField.TYPE_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(true);
-      ft.setStoreTermVectorPositions(true);
-      
-      titleTokenized = new Field("titleTokenized", "", ft);
-      doc.add(titleTokenized);
-
-      body = new Field("body", "", ft);
-      doc.add(body);
-
-      id = new StringField("docid", "", Field.Store.YES);
-      doc.add(id);
-
-      idNum = new IntField("docid_int", 0, Field.Store.NO);
-      doc.add(idNum);
-
-      date = new StringField("date", "", Field.Store.YES);
-      doc.add(date);
-
-      if (useDocValues) {
-        titleDV = new SortedDocValuesField("titleDV", new BytesRef());
-        idNumDV = new NumericDocValuesField("docid_intDV", 0);
-        doc.add(titleDV);
-        doc.add(idNumDV);
-      } else {
-        titleDV = null;
-        idNumDV = null;
-      }
-    }
-  }
-
-  private final ThreadLocal<DocState> threadDocs = new ThreadLocal<>();
-
   /** Note: Document instance is re-used per-thread */
   public Document nextDoc() throws IOException {
     String line;
@@ -223,12 +172,6 @@
       }
     }
 
-    DocState docState = threadDocs.get();
-    if (docState == null) {
-      docState = new DocState(useDocValues);
-      threadDocs.set(docState);
-    }
-
     int spot = line.indexOf(SEP);
     if (spot == -1) {
       throw new RuntimeException("line: [" + line + "] is in an invalid format !");
@@ -238,20 +181,18 @@
       throw new RuntimeException("line: [" + line + "] is in an invalid format !");
     }
 
-    docState.body.setStringValue(line.substring(1+spot2, line.length()));
+    Document doc = w.newDocument();
+    doc.addLargeText("body", line.substring(1+spot2, line.length()));
+
     final String title = line.substring(0, spot);
-    docState.title.setStringValue(title);
-    if (docState.titleDV != null) {
-      docState.titleDV.setBytesValue(new BytesRef(title));
-    }
-    docState.titleTokenized.setStringValue(title);
-    docState.date.setStringValue(line.substring(1+spot, spot2));
-    final int i = id.getAndIncrement();
-    docState.id.setStringValue(Integer.toString(i));
-    docState.idNum.setIntValue(i);
-    if (docState.idNumDV != null) {
-      docState.idNumDV.setLongValue(i);
-    }
-    return docState.doc;
+    doc.addLargeText("titleTokenized", title);
+    doc.addAtom("title", title);
+    doc.addShortText("titleDV", title);
+
+    doc.addAtom("date", line.substring(1+spot, spot2));
+    int i = id.getAndIncrement();
+    doc.addAtom("docid", Integer.toString(i));
+    doc.addInt("docid_int", i);
+    return doc;
   }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocsText.java b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocsText.java
new file mode 100644
index 0000000..49fe94b
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocsText.java
@@ -0,0 +1,184 @@
+package org.apache.lucene.util;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedReader;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.nio.channels.Channels;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CodingErrorAction;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.zip.GZIPInputStream;
+
+/** Just provides the parsed body, title, date, id from the line file docs. */
+public class LineFileDocsText implements Closeable {
+
+  private BufferedReader reader;
+  private final static int BUFFER_SIZE = 1 << 16;     // 64K
+  private final AtomicInteger id = new AtomicInteger();
+  private final String path;
+  
+  public static class DocText {
+    public final String body;
+    public final String title;
+    public final String date;
+    public final String docid;
+
+    DocText(String body, String title, String date, String docid) {
+      this.body = body;
+      this.title = title;
+      this.date = date;
+      this.docid = docid;
+    }
+  }
+
+  /** If forever is true, we rewind the file at EOF (repeat
+   * the docs over and over) */
+  public LineFileDocsText(Random random, String path) throws IOException {
+    this.path = path;
+    open(random);
+  }
+
+  public LineFileDocsText(Random random) throws IOException {
+    this(random, LuceneTestCase.TEST_LINE_DOCS_FILE);
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (reader != null) {
+      reader.close();
+      reader = null;
+    }
+  }
+  
+  private long randomSeekPos(Random random, long size) {
+    if (random == null || size <= 3L)
+      return 0L;
+    return (random.nextLong()&Long.MAX_VALUE) % (size/3);
+  }
+
+  private synchronized void open(Random random) throws IOException {
+    InputStream is = getClass().getResourceAsStream(path);
+    boolean needSkip = true;
+    long size = 0L, seekTo = 0L;
+    if (is == null) {
+      // if its not in classpath, we load it as absolute filesystem path (e.g. Hudson's home dir)
+      Path file = Paths.get(path);
+      size = Files.size(file);
+      if (path.endsWith(".gz")) {
+        // if it is a gzip file, we need to use InputStream and slowly skipTo:
+        is = Files.newInputStream(file);
+      } else {
+        // optimized seek using SeekableByteChannel
+        seekTo = randomSeekPos(random, size);
+        final SeekableByteChannel channel = Files.newByteChannel(file);
+        if (LuceneTestCase.VERBOSE) {
+          System.out.println("TEST: LineFileDocs: file seek to fp=" + seekTo + " on open");
+        }
+        channel.position(seekTo);
+        is = Channels.newInputStream(channel);
+        needSkip = false;
+      }
+    } else {
+      // if the file comes from Classpath:
+      size = is.available();
+    }
+    
+    if (path.endsWith(".gz")) {
+      is = new GZIPInputStream(is);
+      // guestimate:
+      size *= 2.8;
+    }
+    
+    // If we only have an InputStream, we need to seek now,
+    // but this seek is a scan, so very inefficient!!!
+    if (needSkip) {
+      seekTo = randomSeekPos(random, size);
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println("TEST: LineFileDocs: stream skip to fp=" + seekTo + " on open");
+      }
+      is.skip(seekTo);
+    }
+    
+    // if we seeked somewhere, read until newline char
+    if (seekTo > 0L) {
+      int b;
+      do {
+        b = is.read();
+      } while (b >= 0 && b != 13 && b != 10);
+    }
+    
+    CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder()
+        .onMalformedInput(CodingErrorAction.REPORT)
+        .onUnmappableCharacter(CodingErrorAction.REPORT);
+    reader = new BufferedReader(new InputStreamReader(is, decoder), BUFFER_SIZE);
+    
+    if (seekTo > 0L) {
+      // read one more line, to make sure we are not inside a Windows linebreak (\r\n):
+      reader.readLine();
+    }
+  }
+
+  public synchronized void reset(Random random) throws IOException {
+    close();
+    open(random);
+    id.set(0);
+  }
+
+  private final static char SEP = '\t';
+
+  /** Note: Document instance is re-used per-thread */
+  public DocText nextDoc() throws IOException {
+    String line;
+    synchronized(this) {
+      line = reader.readLine();
+      if (line == null) {
+        // Always rewind at end:
+        if (LuceneTestCase.VERBOSE) {
+          System.out.println("TEST: LineFileDocs: now rewind file...");
+        }
+        close();
+        open(null);
+        line = reader.readLine();
+      }
+    }
+
+    int spot = line.indexOf(SEP);
+    if (spot == -1) {
+      throw new RuntimeException("line: [" + line + "] is in an invalid format !");
+    }
+    int spot2 = line.indexOf(SEP, 1 + spot);
+    if (spot2 == -1) {
+      throw new RuntimeException("line: [" + line + "] is in an invalid format !");
+    }
+
+    return new DocText(line.substring(1+spot2, line.length()),
+                       line.substring(0, spot),
+                       line.substring(1+spot, spot2),
+                       Integer.toString(id.getAndIncrement()));
+  }
+}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index f84a965..8b15876 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -58,11 +58,7 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.AlcoholicMergePolicy;
 import org.apache.lucene.index.AssertingDirectoryReader;
 import org.apache.lucene.index.AssertingLeafReader;
@@ -77,11 +73,11 @@
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.LiveIndexWriterConfig;
@@ -99,6 +95,7 @@
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ParallelCompositeReader;
 import org.apache.lucene.index.ParallelLeafReader;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SegmentReader;
 import org.apache.lucene.index.SerialMergeScheduler;
 import org.apache.lucene.index.SimpleMergedSegmentWarmer;
@@ -106,8 +103,6 @@
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.index.TermsEnum;
@@ -118,7 +113,9 @@
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.FilterCachingPolicy;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
+import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
@@ -144,7 +141,6 @@
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
 import org.junit.runner.RunWith;
-
 import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
 import com.carrotsearch.randomizedtesting.LifecycleScope;
 import com.carrotsearch.randomizedtesting.MixWithSuiteName;
@@ -644,8 +640,6 @@
     .around(new TestRuleSetupAndRestoreInstanceEnv())
     .around(parentChainCallRule);
 
-  private static final Map<String,FieldType> fieldToType = new HashMap<String,FieldType>();
-
   enum LiveIWCFlushMode {BY_RAM, BY_DOCS, EITHER};
 
   /** Set by TestRuleSetupAndRestoreClassEnv */
@@ -655,6 +649,8 @@
     liveIWCFlushMode = flushMode;
   }
 
+  protected Directory dir;
+
   // -----------------------------------------------------------------
   // Suite and test case setup/ cleanup.
   // -----------------------------------------------------------------
@@ -665,6 +661,7 @@
   @Before
   public void setUp() throws Exception {
     parentChainCallRule.setupCalled = true;
+    dir = newDirectory();
   }
 
   /**
@@ -673,10 +670,13 @@
   @After
   public void tearDown() throws Exception {
     parentChainCallRule.teardownCalled = true;
-    fieldToType.clear();
 
     // Test is supposed to call this itself, but we do this defensively in case it forgot:
     restoreIndexWriterMaxDocs();
+
+    if (dir != null) {
+      dir.close();
+    }
   }
 
   /** Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in one index; call
@@ -879,9 +879,39 @@
     dumpIterator(label, iter, stream);
   }
 
-  /** create a new index writer config with random defaults */
+  /** create a new index writer config with random defaults, using MockAnalyzer */
   public static IndexWriterConfig newIndexWriterConfig() {
-    return newIndexWriterConfig(new MockAnalyzer(random()));
+    return newIndexWriterConfig(random(), new MockAnalyzer(random()));
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public static IndexWriter newIndexWriter(Directory dir) throws IOException {
+    return new IndexWriter(dir, newIndexWriterConfig());
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public IndexWriter newIndexWriter(IndexWriterConfig iwc) throws IOException {
+    return new IndexWriter(dir, iwc);
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public IndexWriter newIndexWriter() throws IOException {
+    return new IndexWriter(dir, newIndexWriterConfig());
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public static RandomIndexWriter newRandomIndexWriter(Directory dir) throws IOException {
+    return new RandomIndexWriter(random(), dir, newIndexWriterConfig());
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public RandomIndexWriter newRandomIndexWriter() throws IOException {
+    return new RandomIndexWriter(random(), dir, newIndexWriterConfig());
+  }
+
+  /** create a new index writer config with random defaults, using MockAnalyzer */
+  public static RandomIndexWriter newRandomIndexWriter(Directory dir, Analyzer a) throws IOException {
+    return new RandomIndexWriter(random(), dir, newIndexWriterConfig(a));
   }
 
   /** create a new index writer config with random defaults */
@@ -1363,110 +1393,7 @@
     }
   }
   
-  public static Field newStringField(String name, String value, Store stored) {
-    return newField(random(), name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
-  }
-
-  public static Field newTextField(String name, String value, Store stored) {
-    return newField(random(), name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED);
-  }
-  
-  public static Field newStringField(Random random, String name, String value, Store stored) {
-    return newField(random, name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
-  }
-  
-  public static Field newTextField(Random random, String name, String value, Store stored) {
-    return newField(random, name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED);
-  }
-  
-  public static Field newField(String name, String value, FieldType type) {
-    return newField(random(), name, value, type);
-  }
-
-  /** Returns a FieldType derived from newType but whose
-   *  term vector options match the old type */
-  private static FieldType mergeTermVectorOptions(FieldType newType, FieldType oldType) {
-    if (newType.indexOptions() != IndexOptions.NONE && oldType.storeTermVectors() == true && newType.storeTermVectors() == false) {
-      newType = new FieldType(newType);
-      newType.setStoreTermVectors(oldType.storeTermVectors());
-      newType.setStoreTermVectorPositions(oldType.storeTermVectorPositions());
-      newType.setStoreTermVectorOffsets(oldType.storeTermVectorOffsets());
-      newType.setStoreTermVectorPayloads(oldType.storeTermVectorPayloads());
-      newType.freeze();
-    }
-
-    return newType;
-  }
-
-  // TODO: if we can pull out the "make term vector options
-  // consistent across all instances of the same field name"
-  // write-once schema sort of helper class then we can
-  // remove the sync here.  We can also fold the random
-  // "enable norms" (now commented out, below) into that:
-  public synchronized static Field newField(Random random, String name, String value, FieldType type) {
-
-    // Defeat any consumers that illegally rely on intern'd
-    // strings (we removed this from Lucene a while back):
-    name = new String(name);
-
-    FieldType prevType = fieldToType.get(name);
-
-    if (usually(random) || type.indexOptions() == IndexOptions.NONE || prevType != null) {
-      // most of the time, don't modify the params
-      if (prevType == null) {
-        fieldToType.put(name, new FieldType(type));
-      } else {
-        type = mergeTermVectorOptions(type, prevType);
-      }
-
-      return new Field(name, value, type);
-    }
-
-    // TODO: once all core & test codecs can index
-    // offsets, sometimes randomly turn on offsets if we are
-    // already indexing positions...
-
-    FieldType newType = new FieldType(type);
-    if (!newType.stored() && random.nextBoolean()) {
-      newType.setStored(true); // randomly store it
-    }
-
-    // Randomly turn on term vector options, but always do
-    // so consistently for the same field name:
-    if (!newType.storeTermVectors() && random.nextBoolean()) {
-      newType.setStoreTermVectors(true);
-      if (!newType.storeTermVectorPositions()) {
-        newType.setStoreTermVectorPositions(random.nextBoolean());
-        
-        if (newType.storeTermVectorPositions()) {
-          if (!newType.storeTermVectorPayloads()) {
-            newType.setStoreTermVectorPayloads(random.nextBoolean());
-          }
-        }
-      }
-      
-      if (!newType.storeTermVectorOffsets()) {
-        newType.setStoreTermVectorOffsets(random.nextBoolean());
-      }
-
-      if (VERBOSE) {
-        System.out.println("NOTE: LuceneTestCase: upgrade name=" + name + " type=" + newType);
-      }
-    }
-    newType.freeze();
-    fieldToType.put(name, newType);
-
-    // TODO: we need to do this, but smarter, ie, most of
-    // the time we set the same value for a given field but
-    // sometimes (rarely) we change it up:
-    /*
-    if (newType.omitNorms()) {
-      newType.setOmitNorms(random.nextBoolean());
-    }
-    */
-    
-    return new Field(name, value, newType);
-  }
+  // TODO: can we randomize how fields are indexed here?
 
   /** 
    * Return a random Locale from the available locales on the system.
@@ -2239,25 +2166,25 @@
   public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
     assert leftReader.maxDoc() == rightReader.maxDoc();
     for (int i = 0; i < leftReader.maxDoc(); i++) {
-      StoredDocument leftDoc = leftReader.document(i);
-      StoredDocument rightDoc = rightReader.document(i);
+      Document leftDoc = leftReader.document(i);
+      Document rightDoc = rightReader.document(i);
       
       // TODO: I think this is bogus because we don't document what the order should be
       // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
       // in whatever way it wants (e.g. maybe it packs related fields together or something)
       // To fix this, we sort the fields in both documents by name, but
       // we still assume that all instances with same name are in order:
-      Comparator<StorableField> comp = new Comparator<StorableField>() {
+      Comparator<IndexableField> comp = new Comparator<IndexableField>() {
         @Override
-        public int compare(StorableField arg0, StorableField arg1) {
+        public int compare(IndexableField arg0, IndexableField arg1) {
           return arg0.name().compareTo(arg1.name());
         }        
       };
       Collections.sort(leftDoc.getFields(), comp);
       Collections.sort(rightDoc.getFields(), comp);
 
-      Iterator<StorableField> leftIterator = leftDoc.iterator();
-      Iterator<StorableField> rightIterator = rightDoc.iterator();
+      Iterator<IndexableField> leftIterator = leftDoc.iterator();
+      Iterator<IndexableField> rightIterator = rightDoc.iterator();
       while (leftIterator.hasNext()) {
         assertTrue(info, rightIterator.hasNext());
         assertStoredFieldEquals(info, leftIterator.next(), rightIterator.next());
@@ -2269,7 +2196,7 @@
   /** 
    * checks that two stored fields are equivalent 
    */
-  public void assertStoredFieldEquals(String info, StorableField leftField, StorableField rightField) {
+  public void assertStoredFieldEquals(String info, IndexableField leftField, IndexableField rightField) {
     assertEquals(info, leftField.name(), rightField.name());
     assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
     assertEquals(info, leftField.stringValue(), rightField.stringValue());
@@ -2546,6 +2473,32 @@
     assert enabled = true; // Intentional side-effect!!!
     assertsAreEnabled = enabled;
   }
+
+  protected interface ThrowableRunnable {
+    public void run() throws Exception;
+  }
+
+  /** Only catches {@code IllegalStateException} and {@code IllegalArgumentException} and checks their message. */
+  protected static void shouldFail(ThrowableRunnable x, String message) throws Exception {
+    try {
+      x.run();
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      assertTrue("wrong message: " + ise.getMessage(), ise.getMessage().startsWith(message));
+    } catch (IllegalArgumentException iae) {
+      assertTrue("wrong message: " + iae.getMessage(), iae.getMessage().startsWith(message));
+    }
+  }
+
+  public static int hitCount(IndexSearcher s, Query q) throws IOException {
+    if (random().nextBoolean()) {
+      return s.search(q, 1).totalHits;
+    } else {
+      TotalHitCountCollector c = new TotalHitCountCollector();
+      s.search(q, c);
+      return c.getTotalHits();
+    }
+  }
   
   /** 
    * Compares two strings with a collator, also looking to see if the the strings
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index aa1f18a..de674db 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -57,16 +57,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.NumericType;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.CheckIndex;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.DirectoryReader;
@@ -753,6 +743,14 @@
     return new String(buffer, 0, i);
   }
 
+  /** Returns a random binary term. */
+  public static BytesRef randomBinaryTerm(Random r) {
+    int length = r.nextInt(15);
+    BytesRef b = new BytesRef(length);
+    r.nextBytes(b.bytes);
+    b.length = length;
+    return b;
+  }
   
   /** Return a Codec that can read any of the
    *  default codecs and formats, but always writes in the specified
@@ -943,57 +941,6 @@
     }
   }
 
-  // NOTE: this is likely buggy, and cannot clone fields
-  // with tokenStreamValues, etc.  Use at your own risk!!
-
-  // TODO: is there a pre-existing way to do this!!!
-  public static Document cloneDocument(Document doc1) {
-    final Document doc2 = new Document();
-    for(IndexableField f : doc1.getFields()) {
-      final Field field1 = (Field) f;
-      final Field field2;
-      final DocValuesType dvType = field1.fieldType().docValuesType();
-      final NumericType numType = field1.fieldType().numericType();
-      if (dvType != DocValuesType.NONE) {
-        switch(dvType) {
-          case NUMERIC:
-            field2 = new NumericDocValuesField(field1.name(), field1.numericValue().longValue());
-            break;
-          case BINARY:
-            field2 = new BinaryDocValuesField(field1.name(), field1.binaryValue());
-            break;
-          case SORTED:
-            field2 = new SortedDocValuesField(field1.name(), field1.binaryValue());
-            break;
-          default:
-            throw new IllegalStateException("unknown Type: " + dvType);
-        }
-      } else if (numType != null) {
-        switch (numType) {
-          case INT:
-            field2 = new IntField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
-            break;
-          case FLOAT:
-            field2 = new FloatField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
-            break;
-          case LONG:
-            field2 = new LongField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
-            break;
-          case DOUBLE:
-            field2 = new DoubleField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
-            break;
-          default:
-            throw new IllegalStateException("unknown Type: " + numType);
-        }
-      } else {
-        field2 = new Field(field1.name(), field1.stringValue(), field1.fieldType());
-      }
-      doc2.add(field2);
-    }
-
-    return doc2;
-  }
-
   // Returns a DocsEnum, but randomly sometimes uses a
   // DocsAndFreqsEnum, DocsAndPositionsEnum.  Returns null
   // if field/term doesn't exist:
@@ -1208,6 +1155,24 @@
     }
   }
 
+  /** For debugging: tries to include br.utf8ToString(), but if that
+   *  fails (because it's not valid utf8, which is fine!), just
+   *  use ordinary toString. */
+  public static String brToString(BytesRef br) {
+    if (br == null) {
+      return "(null)";
+    } else {
+      try {
+        return br.utf8ToString() + " " + br.toString();
+      } catch (Throwable t) {
+        // If BytesRef isn't actually UTF8, or it's eg a
+        // prefix of UTF8 that ends mid-unicode-char, we
+        // fallback to hex:
+        return br.toString();
+      }
+    }
+  }
+
   /** Returns true if this is an FSDirectory backed by {@link WindowsFS}. */
   public static boolean isWindowsFS(Directory dir) {
     // First unwrap directory to see if there is an FSDir:
diff --git a/solr/example/scripts/cloud-scripts/log4j.properties b/solr/example/scripts/cloud-scripts/log4j.properties
new file mode 100644
index 0000000..c581583
--- /dev/null
+++ b/solr/example/scripts/cloud-scripts/log4j.properties
@@ -0,0 +1,8 @@
+#  Logging level
+log4j.rootLogger=INFO, stderr
+
+# log to stderr
+log4j.appender.stderr = org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.Target = System.err
+log4j.appender.stderr.layout = org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
diff --git a/solr/example/scripts/cloud-scripts/zkcli.bat b/solr/example/scripts/cloud-scripts/zkcli.bat
new file mode 100644
index 0000000..b7aa695
--- /dev/null
+++ b/solr/example/scripts/cloud-scripts/zkcli.bat
@@ -0,0 +1,21 @@
+@echo off

+REM You can override pass the following parameters to this script:

+REM 

+

+set JVM=java

+

+REM  Find location of this script

+

+set SDIR=%~dp0

+if "%SDIR:~-1%"=="\" set SDIR=%SDIR:~0,-1%

+

+IF exist %SDIR%\..\..\solr-webapp\webapp\nul (

+  echo %SDIR%\....\..\solr-webapp\webapp exists

+) ELSE (

+  echo -------------------

+  echo Unzip example\webapps\solr.war to example\solr-webapp\. to use this script.

+  echo Starting the Solr example via start.jar will also do this extraction.

+  echo -------------------

+)

+

+"%JVM%" -Dlog4j.configuration="file:%SDIR%\log4j.properties" -classpath "%SDIR%\..\..\solr-webapp\webapp\WEB-INF\lib\*;%SDIR%\..\..\lib\ext\*" org.apache.solr.cloud.ZkCLI %*

diff --git a/solr/example/scripts/cloud-scripts/zkcli.sh b/solr/example/scripts/cloud-scripts/zkcli.sh
new file mode 100755
index 0000000..3110898
--- /dev/null
+++ b/solr/example/scripts/cloud-scripts/zkcli.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+# You can override pass the following parameters to this script:
+# 
+
+JVM="java"
+
+# Find location of this script
+
+sdir="`dirname \"$0\"`"
+
+if [ ! -d "$sdir/../../solr-webapp/webapp" ]; then
+  unzip $sdir/../../webapps/solr.war -d $sdir/../../solr-webapp/webapp
+fi
+
+PATH=$JAVA_HOME/bin:$PATH $JVM -Dlog4j.configuration=file:$sdir/log4j.properties -classpath "$sdir/../../solr-webapp/webapp/WEB-INF/lib/*:$sdir/../../lib/ext/*" org.apache.solr.cloud.ZkCLI ${1+"$@"}
+
diff --git a/solr/example/scripts/map-reduce/set-map-reduce-classpath.sh b/solr/example/scripts/map-reduce/set-map-reduce-classpath.sh
new file mode 100755
index 0000000..c430570
--- /dev/null
+++ b/solr/example/scripts/map-reduce/set-map-reduce-classpath.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+######################################################################
+#
+# Running this script will set two environment variables:
+# HADOOP_CLASSPATH
+# HADOOP_LIBJAR: pass this to the -libjar MapReduceIndexBuilder option
+#
+######################################################################
+
+# return absolute path
+function absPath {
+  echo $(cd $(dirname "$1"); pwd)/$(basename "$1")
+}
+
+
+# Find location of this script
+
+sdir="`cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd`"
+
+solr_distrib="$sdir/../../.."
+
+echo `absPath $solr_distrib`
+
+# extract war if necessary
+
+if [ ! -d "$solr_distrib/example/solr-webapp/webapp" ]; then
+   unzip -o $solr_distrib/example/webapps/solr.war -d $solr_distrib/example/solr-webapp/webapp
+fi
+
+# Setup env variables for MapReduceIndexerTool
+
+# Setup HADOOP_CLASSPATH
+
+dir1=`absPath "$solr_distrib/dist"`
+dir2=`absPath "$solr_distrib/dist/solrj-lib"`
+dir3=`absPath "$solr_distrib/contrib/map-reduce/lib"`
+dir4=`absPath "$solr_distrib/contrib/morphlines-core/lib"`
+dir5=`absPath "$solr_distrib/contrib/morphlines-cell/lib"`
+dir6=`absPath "$solr_distrib/contrib/extraction/lib"`
+dir7=`absPath "$solr_distrib/example/solr-webapp/webapp/WEB-INF/lib"`
+
+# Setup -libjar
+
+lib1=`ls -m $dir1/*.jar | tr -d ' \n'`
+lib2=`ls -m $dir2/*.jar | tr -d ' \n' | sed 's/\,[^\,]*\(log4j\|slf4j\)[^\,]*//g'`
+lib3=`ls -m $dir3/*.jar | tr -d ' \n'`
+lib4=`ls -m $dir4/*.jar | tr -d ' \n'`
+lib5=`ls -m $dir5/*.jar | tr -d ' \n'`
+lib6=`ls -m $dir6/*.jar | tr -d ' \n'`
+lib7=`ls -m $dir7/*.jar | tr -d ' \n'`
+
+export HADOOP_CLASSPATH="$dir1/*:$dir2/*:$dir3/*:$dir4/*:$dir5/*:$dir6/*:$dir7/*"
+export HADOOP_LIBJAR="$lib1,$lib2,$lib3,$lib4,$lib5,$lib6,$lib7"
+
+#echo $HADOOP_CLASSPATH
+#echo $HADOOP_LIBJAR
+