LUCENE-6852: merge trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene6852@1710430 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
index d58f522..faf46d0 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
@@ -21,6 +21,7 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
+import org.apache.lucene.codecs.DimensionalFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.FilterCodec;
@@ -152,6 +153,11 @@
     return docValuesFormat;
   }
 
+  @Override
+  public final DimensionalFormat dimensionalFormat() {
+    return DimensionalFormat.EMPTY;
+  }
+
   private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
   private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene50");
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
new file mode 100644
index 0000000..af60a0d
--- /dev/null
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
@@ -0,0 +1,95 @@
+package org.apache.lucene.codecs.simpletext;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.bkd.BKDReader;
+
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.BLOCK_COUNT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.BLOCK_DOC_ID;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.BLOCK_VALUE;
+
+class SimpleTextBKDReader extends BKDReader {
+
+  public SimpleTextBKDReader(IndexInput datIn, int numDims, int maxPointsInLeafNode, int bytesPerDim, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
+    super(datIn, numDims, maxPointsInLeafNode, bytesPerDim, leafBlockFPs, splitPackedValues);
+  }
+
+  @Override
+  protected void visitDocIDs(IndexInput in, long blockFP, IntersectVisitor visitor) throws IOException {
+    BytesRefBuilder scratch = new BytesRefBuilder();
+    in.seek(blockFP);
+    readLine(in, scratch);
+    int count = parseInt(scratch, BLOCK_COUNT);
+    for(int i=0;i<count;i++) {
+      readLine(in, scratch);
+      visitor.visit(parseInt(scratch, BLOCK_DOC_ID));
+    }
+  }
+
+  @Override
+  protected int readDocIDs(IndexInput in, long blockFP, int[] docIDs) throws IOException {
+    BytesRefBuilder scratch = new BytesRefBuilder();
+    in.seek(blockFP);
+    readLine(in, scratch);
+    int count = parseInt(scratch, BLOCK_COUNT);
+    for(int i=0;i<count;i++) {
+      readLine(in, scratch);
+      docIDs[i] = parseInt(scratch, BLOCK_DOC_ID);
+    }
+    return count;
+  }
+
+  @Override
+  protected void visitDocValues(byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
+    assert scratchPackedValue.length == packedBytesLength;
+    BytesRefBuilder scratch = new BytesRefBuilder();
+    for(int i=0;i<count;i++) {
+      readLine(in, scratch);
+      assert startsWith(scratch, BLOCK_VALUE);
+      BytesRef br = SimpleTextUtil.fromBytesRefString(stripPrefix(scratch, BLOCK_VALUE));
+      assert br.length == packedBytesLength;
+      System.arraycopy(br.bytes, br.offset, scratchPackedValue, 0, packedBytesLength);
+      visitor.visit(docIDs[i], scratchPackedValue);
+    }
+  }
+
+  private int parseInt(BytesRefBuilder scratch, BytesRef prefix) {
+    assert startsWith(scratch, prefix);
+    return Integer.parseInt(stripPrefix(scratch, prefix));
+  }
+
+  private String stripPrefix(BytesRefBuilder scratch, BytesRef prefix) {
+    return new String(scratch.bytes(), prefix.length, scratch.length() - prefix.length, StandardCharsets.UTF_8);
+  }
+
+  private boolean startsWith(BytesRefBuilder scratch, BytesRef prefix) {
+    return StringHelper.startsWith(scratch.get(), prefix);
+  }
+
+  private void readLine(IndexInput in, BytesRefBuilder scratch) throws IOException {
+    SimpleTextUtil.readLine(in, scratch);
+  }
+}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java
index 2c3435a..f8285c1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java
@@ -19,12 +19,13 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
+import org.apache.lucene.codecs.DimensionalFormat;
+import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.LiveDocsFormat;
+import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 
@@ -44,11 +45,12 @@
   private final LiveDocsFormat liveDocs = new SimpleTextLiveDocsFormat();
   private final DocValuesFormat dvFormat = new SimpleTextDocValuesFormat();
   private final CompoundFormat compoundFormat = new SimpleTextCompoundFormat();
+  private final DimensionalFormat dimensionalFormat = new SimpleTextDimensionalFormat();
   
   public SimpleTextCodec() {
     super("SimpleText");
   }
-  
+
   @Override
   public PostingsFormat postingsFormat() {
     return postings;
@@ -93,4 +95,9 @@
   public CompoundFormat compoundFormat() {
     return compoundFormat;
   }
+
+  @Override
+  public DimensionalFormat dimensionalFormat() {
+    return dimensionalFormat;
+  }
 }
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
index a918709..c994df7 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
@@ -214,7 +214,7 @@
   }
   
   // helper method to strip strip away 'prefix' from 'scratch' and return as String
-  private String stripPrefix(BytesRefBuilder scratch, BytesRef prefix) throws IOException {
+  private String stripPrefix(BytesRefBuilder scratch, BytesRef prefix) {
     return new String(scratch.bytes(), prefix.length, scratch.length() - prefix.length, StandardCharsets.UTF_8);
   }
   
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalFormat.java
new file mode 100644
index 0000000..56e7579
--- /dev/null
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalFormat.java
@@ -0,0 +1,53 @@
+package org.apache.lucene.codecs.simpletext;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.codecs.DimensionalFormat;
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.codecs.DimensionalWriter;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+
+/** For debugging, curiosity, transparency only!!  Do not
+ *  use this codec in production.
+ *
+ *  <p>This codec stores all dimensional data in a single
+ *  human-readable text file (_N.dim).  You can view this in
+ *  any text editor, and even edit it to alter your index.
+ *
+ *  @lucene.experimental */
+public final class SimpleTextDimensionalFormat extends DimensionalFormat {
+  
+  @Override
+  public DimensionalWriter fieldsWriter(SegmentWriteState state) throws IOException {
+    return new SimpleTextDimensionalWriter(state);
+  }
+
+  @Override
+  public DimensionalReader fieldsReader(SegmentReadState state) throws IOException {
+    return new SimpleTextDimensionalReader(state);
+  }
+
+  /** Extension of dimensional data file */
+  static final String DIMENSIONAL_EXTENSION = "dim";
+
+  /** Extension of dimensional index file */
+  static final String DIMENSIONAL_INDEX_EXTENSION = "dii";
+}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalReader.java
new file mode 100644
index 0000000..a453ed9
--- /dev/null
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalReader.java
@@ -0,0 +1,176 @@
+package org.apache.lucene.codecs.simpletext;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.store.BufferedChecksumIndexInput;
+import org.apache.lucene.store.ChecksumIndexInput;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.bkd.BKDReader;
+
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.BLOCK_FP;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.BYTES_PER_DIM;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.FIELD_COUNT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.FIELD_FP;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.FIELD_FP_NAME;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.INDEX_COUNT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.MAX_LEAF_POINTS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.NUM_DIMS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.SPLIT_COUNT;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.SPLIT_DIM;
+import static org.apache.lucene.codecs.simpletext.SimpleTextDimensionalWriter.SPLIT_VALUE;
+
+class SimpleTextDimensionalReader extends DimensionalReader {
+
+  private final IndexInput dataIn;
+  final SegmentReadState readState;
+  final Map<String,BKDReader> readers = new HashMap<>();
+  final BytesRefBuilder scratch = new BytesRefBuilder();
+
+  public SimpleTextDimensionalReader(SegmentReadState readState) throws IOException {
+    // Initialize readers now:
+    String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextDimensionalFormat.DIMENSIONAL_EXTENSION);
+    dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT);
+    String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextDimensionalFormat.DIMENSIONAL_INDEX_EXTENSION);
+    try (ChecksumIndexInput in = new BufferedChecksumIndexInput(readState.directory.openInput(indexFileName, IOContext.DEFAULT))) {
+      readLine(in);
+      int count = parseInt(FIELD_COUNT);
+      for(int i=0;i<count;i++) {
+        readLine(in);
+        String fieldName = stripPrefix(FIELD_FP_NAME);
+        readLine(in);
+        long fp = parseLong(FIELD_FP);
+        readers.put(fieldName, initReader(fp));
+      }
+      SimpleTextUtil.checkFooter(in);
+    }
+    this.readState = readState;
+  }
+
+  private BKDReader initReader(long fp) throws IOException {
+    // NOTE: matches what writeIndex does in SimpleTextDimensionalWriter
+    dataIn.seek(fp);
+    readLine(dataIn);
+    int numDims = parseInt(NUM_DIMS);
+
+    readLine(dataIn);
+    int bytesPerDim = parseInt(BYTES_PER_DIM);
+
+    readLine(dataIn);
+    int maxPointsInLeafNode = parseInt(MAX_LEAF_POINTS);
+
+    readLine(dataIn);
+    int count = parseInt(INDEX_COUNT);
+    long[] leafBlockFPs = new long[count];
+    for(int i=0;i<count;i++) {
+      readLine(dataIn);
+      leafBlockFPs[i] = parseLong(BLOCK_FP);
+    }
+    readLine(dataIn);
+    count = parseInt(SPLIT_COUNT);
+
+    byte[] splitPackedValues = new byte[count * (1 + bytesPerDim)];
+    for(int i=0;i<count;i++) {
+      readLine(dataIn);
+      splitPackedValues[(1 + bytesPerDim) * i] = (byte) parseInt(SPLIT_DIM);
+      readLine(dataIn);
+      assert startsWith(SPLIT_VALUE);
+      BytesRef br = SimpleTextUtil.fromBytesRefString(stripPrefix(SPLIT_VALUE));
+      assert br.length == bytesPerDim;
+      System.arraycopy(br.bytes, br.offset, splitPackedValues, (1 + bytesPerDim) * i + 1, bytesPerDim);
+    }
+
+    return new SimpleTextBKDReader(dataIn, numDims, maxPointsInLeafNode, bytesPerDim, leafBlockFPs, splitPackedValues);
+  }
+
+  private void readLine(IndexInput in) throws IOException {
+    SimpleTextUtil.readLine(in, scratch);
+  }
+
+  private boolean startsWith(BytesRef prefix) {
+    return StringHelper.startsWith(scratch.get(), prefix);
+  }
+
+  private int parseInt(BytesRef prefix) {
+    assert startsWith(prefix);
+    return Integer.parseInt(stripPrefix(prefix));
+  }
+
+  private long parseLong(BytesRef prefix) {
+    assert startsWith(prefix);
+    return Long.parseLong(stripPrefix(prefix));
+  }
+
+  private String stripPrefix(BytesRef prefix) {
+    return new String(scratch.bytes(), prefix.length, scratch.length() - prefix.length, StandardCharsets.UTF_8);
+  }
+
+  /** Finds all documents and points matching the provided visitor */
+  @Override
+  public void intersect(String field, IntersectVisitor visitor) throws IOException {
+    BKDReader bkdReader = readers.get(field);
+    if (bkdReader == null) {
+      throw new IllegalArgumentException("field=\"" + field + "\" was not indexed with dimensional values");
+    }
+    bkdReader.intersect(visitor);
+  }
+
+  @Override
+  public void checkIntegrity() throws IOException {
+    BytesRefBuilder scratch = new BytesRefBuilder();
+    IndexInput clone = dataIn.clone();
+    clone.seek(0);
+
+    // checksum is fixed-width encoded with 20 bytes, plus 1 byte for newline (the space is included in SimpleTextUtil.CHECKSUM):
+    long footerStartPos = dataIn.length() - (SimpleTextUtil.CHECKSUM.length + 21);
+    ChecksumIndexInput input = new BufferedChecksumIndexInput(clone);
+    while (true) {
+      SimpleTextUtil.readLine(input, scratch);
+      if (input.getFilePointer() >= footerStartPos) {
+        // Make sure we landed at precisely the right location:
+        if (input.getFilePointer() != footerStartPos) {
+          throw new CorruptIndexException("SimpleText failure: footer does not start at expected position current=" + input.getFilePointer() + " vs expected=" + footerStartPos, input);
+        }
+        SimpleTextUtil.checkFooter(input);
+        break;
+      }
+    }
+  }
+
+  @Override
+  public long ramBytesUsed() {
+    return 0L;
+  }
+
+  @Override
+  public void close() throws IOException {
+    dataIn.close();
+  }
+}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalWriter.java
new file mode 100644
index 0000000..31d807c
--- /dev/null
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDimensionalWriter.java
@@ -0,0 +1,203 @@
+package org.apache.lucene.codecs.simpletext;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.codecs.DimensionalWriter;
+import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
+import org.apache.lucene.index.DimensionalValues.Relation;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.bkd.BKDWriter;
+
+class SimpleTextDimensionalWriter extends DimensionalWriter {
+
+  final static BytesRef NUM_DIMS      = new BytesRef("num dims ");
+  final static BytesRef BYTES_PER_DIM = new BytesRef("bytes per dim ");
+  final static BytesRef MAX_LEAF_POINTS = new BytesRef("max leaf points ");
+  final static BytesRef INDEX_COUNT = new BytesRef("index count ");
+  final static BytesRef BLOCK_COUNT   = new BytesRef("block count ");
+  final static BytesRef BLOCK_DOC_ID  = new BytesRef("  doc ");
+  final static BytesRef BLOCK_FP      = new BytesRef("  block fp ");
+  final static BytesRef BLOCK_VALUE   = new BytesRef("  block value ");
+  final static BytesRef SPLIT_COUNT   = new BytesRef("split count ");
+  final static BytesRef SPLIT_DIM     = new BytesRef("  split dim ");
+  final static BytesRef SPLIT_VALUE   = new BytesRef("  split value ");
+  final static BytesRef FIELD_COUNT   = new BytesRef("field count ");
+  final static BytesRef FIELD_FP_NAME = new BytesRef("  field fp name ");
+  final static BytesRef FIELD_FP      = new BytesRef("  field fp ");
+
+  private IndexOutput dataOut;
+  final BytesRefBuilder scratch = new BytesRefBuilder();
+  final SegmentWriteState writeState;
+  final Map<String,Long> indexFPs = new HashMap<>();
+
+  public SimpleTextDimensionalWriter(SegmentWriteState writeState) throws IOException {
+    String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextDimensionalFormat.DIMENSIONAL_EXTENSION);
+    dataOut = writeState.directory.createOutput(fileName, writeState.context);
+    this.writeState = writeState;
+  }
+
+  @Override
+  public void writeField(FieldInfo fieldInfo, DimensionalReader values) throws IOException {
+
+    // We use the normal BKDWriter, but subclass to customize how it writes the index and blocks to disk:
+    BKDWriter writer = new BKDWriter(writeState.directory,
+                                     writeState.segmentInfo.name,
+                                     fieldInfo.getDimensionCount(),
+                                     fieldInfo.getDimensionNumBytes(),
+                                     BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE,
+                                     BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP) {
+
+        @Override
+        protected void writeIndex(IndexOutput out, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
+          write(out, NUM_DIMS);
+          writeInt(out, numDims);
+          newline(out);
+
+          write(out, BYTES_PER_DIM);
+          writeInt(out, bytesPerDim);
+          newline(out);
+
+          write(out, MAX_LEAF_POINTS);
+          writeInt(out, maxPointsInLeafNode);
+          newline(out);
+
+          write(out, INDEX_COUNT);
+          writeInt(out, leafBlockFPs.length);
+          newline(out);
+
+          for(int i=0;i<leafBlockFPs.length;i++) {
+            write(out, BLOCK_FP);
+            writeLong(out, leafBlockFPs[i]);
+            newline(out);
+          }
+
+          assert (splitPackedValues.length % (1 + fieldInfo.getDimensionNumBytes())) == 0;
+          int count = splitPackedValues.length / (1 + fieldInfo.getDimensionNumBytes());
+          assert count == leafBlockFPs.length;
+
+          write(out, SPLIT_COUNT);
+          writeInt(out, count);
+          newline(out);
+
+          for(int i=0;i<count;i++) {
+            write(out, SPLIT_DIM);
+            writeInt(out, splitPackedValues[i * (1 + fieldInfo.getDimensionNumBytes())] & 0xff);
+            newline(out);
+            write(out, SPLIT_VALUE);
+            BytesRef br = new BytesRef(splitPackedValues, 1+(i * (1+fieldInfo.getDimensionNumBytes())), fieldInfo.getDimensionNumBytes());
+            write(out, br.toString());
+            newline(out);
+          }
+        }
+
+        @Override
+        protected void writeLeafBlockDocs(IndexOutput out, int[] docIDs, int start, int count) throws IOException {
+          write(out, BLOCK_COUNT);
+          writeInt(out, count);
+          newline(out);
+          for(int i=0;i<count;i++) {
+            write(out, BLOCK_DOC_ID);
+            writeInt(out, docIDs[start+i]);
+            newline(out);
+          }
+        }
+
+        @Override
+        protected void writeLeafBlockPackedValue(IndexOutput out, byte[] bytes, int offset, int length) throws IOException {
+          assert length == packedBytesLength;
+          write(out, BLOCK_VALUE);
+          write(out, new BytesRef(bytes, offset, length).toString());
+          newline(out);
+        }          
+      };
+
+    values.intersect(fieldInfo.name, new IntersectVisitor() {
+        @Override
+        public void visit(int docID) {
+          throw new IllegalStateException();
+        }
+
+        public void visit(int docID, byte[] packedValue) throws IOException {
+          writer.add(packedValue, docID);
+        }
+
+        @Override
+        public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+          return Relation.QUERY_CROSSES_CELL;
+        }
+      });
+    indexFPs.put(fieldInfo.name, writer.finish(dataOut));
+  }
+
+  private void write(IndexOutput out, String s) throws IOException {
+    SimpleTextUtil.write(out, s, scratch);
+  }
+
+  private void writeInt(IndexOutput out, int x) throws IOException {
+    SimpleTextUtil.write(out, Integer.toString(x), scratch);
+  }
+
+  private void writeLong(IndexOutput out, long x) throws IOException {
+    SimpleTextUtil.write(out, Long.toString(x), scratch);
+  }
+
+  private void write(IndexOutput out, BytesRef b) throws IOException {
+    SimpleTextUtil.write(out, b);
+  }
+
+  private void newline(IndexOutput out) throws IOException {
+    SimpleTextUtil.writeNewline(out);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (dataOut != null) {
+      SimpleTextUtil.writeChecksum(dataOut, scratch);
+      dataOut.close();
+      dataOut = null;
+
+      // Write index file
+      String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextDimensionalFormat.DIMENSIONAL_INDEX_EXTENSION);
+      try (IndexOutput indexOut = writeState.directory.createOutput(fileName, writeState.context)) {
+        int count = indexFPs.size();
+        write(indexOut, FIELD_COUNT);
+        write(indexOut, Integer.toString(count));
+        newline(indexOut);
+        for(Map.Entry<String,Long> ent : indexFPs.entrySet()) {
+          write(indexOut, FIELD_FP_NAME);
+          write(indexOut, ent.getKey());
+          newline(indexOut);
+          write(indexOut, FIELD_FP);
+          write(indexOut, Long.toString(ent.getValue()));
+          newline(indexOut);
+        }
+        SimpleTextUtil.writeChecksum(indexOut, scratch);
+      }
+    }
+  }
+}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java
index db2ce9a..72600e4 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java
@@ -24,7 +24,6 @@
 import java.text.DecimalFormat;
 import java.text.DecimalFormatSymbols;
 import java.text.ParseException;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Locale;
 import java.util.Map;
@@ -499,7 +498,7 @@
   }
 
   /** Used only in ctor: */
-  private String stripPrefix(BytesRef prefix) throws IOException {
+  private String stripPrefix(BytesRef prefix) {
     return new String(scratch.bytes(), prefix.length, scratch.length() - prefix.length, StandardCharsets.UTF_8);
   }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
index c50152c..dc68f72 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
@@ -64,6 +64,8 @@
   static final BytesRef NUM_ATTS        =  new BytesRef("  attributes ");
   static final BytesRef ATT_KEY         =  new BytesRef("    key ");
   static final BytesRef ATT_VALUE       =  new BytesRef("    value ");
+  static final BytesRef DIM_COUNT       =  new BytesRef("  dimensional count ");
+  static final BytesRef DIM_NUM_BYTES   =  new BytesRef("  dimensional num bytes ");
   
   @Override
   public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext iocontext) throws IOException {
@@ -130,8 +132,17 @@
           atts.put(key, value);
         }
 
+        SimpleTextUtil.readLine(input, scratch);
+        assert StringHelper.startsWith(scratch.get(), DIM_COUNT);
+        int dimensionalCount = Integer.parseInt(readString(DIM_COUNT.length, scratch));
+
+        SimpleTextUtil.readLine(input, scratch);
+        assert StringHelper.startsWith(scratch.get(), DIM_NUM_BYTES);
+        int dimensionalNumBytes = Integer.parseInt(readString(DIM_NUM_BYTES.length, scratch));
+
         infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, 
-          omitNorms, storePayloads, indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(atts));
+                                 omitNorms, storePayloads, indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(atts),
+                                 dimensionalCount, dimensionalNumBytes);
       }
 
       SimpleTextUtil.checkFooter(input);
@@ -219,6 +230,14 @@
             SimpleTextUtil.writeNewline(out);
           }
         }
+
+        SimpleTextUtil.write(out, DIM_COUNT);
+        SimpleTextUtil.write(out, Integer.toString(fi.getDimensionCount()), scratch);
+        SimpleTextUtil.writeNewline(out);
+        
+        SimpleTextUtil.write(out, DIM_NUM_BYTES);
+        SimpleTextUtil.write(out, Integer.toString(fi.getDimensionNumBytes()), scratch);
+        SimpleTextUtil.writeNewline(out);
       }
       SimpleTextUtil.writeChecksum(out, scratch);
       success = true;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextUtil.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextUtil.java
index b559cc7..f09fa85 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextUtil.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextUtil.java
@@ -28,7 +28,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.StringHelper;
-import org.apache.lucene.util.UnicodeUtil;
 
 class SimpleTextUtil {
   public final static byte NEWLINE = 10;
@@ -77,15 +76,15 @@
     // same number of bytes
     // (BaseIndexFileFormatTestCase.testMergeStability cares):
     String checksum = String.format(Locale.ROOT, "%020d", out.getChecksum());
-    SimpleTextUtil.write(out, CHECKSUM);
-    SimpleTextUtil.write(out, checksum, scratch);
-    SimpleTextUtil.writeNewline(out);
+    write(out, CHECKSUM);
+    write(out, checksum, scratch);
+    writeNewline(out);
   }
   
   public static void checkFooter(ChecksumIndexInput input) throws IOException {
     BytesRefBuilder scratch = new BytesRefBuilder();
     String expectedChecksum = String.format(Locale.ROOT, "%020d", input.getChecksum());
-    SimpleTextUtil.readLine(input, scratch);
+    readLine(input, scratch);
     if (StringHelper.startsWith(scratch.get(), CHECKSUM) == false) {
       throw new CorruptIndexException("SimpleText failure: expected checksum line but got " + scratch.get().utf8ToString(), input);
     }
@@ -97,4 +96,21 @@
       throw new CorruptIndexException("Unexpected stuff at the end of file, please be careful with your text editor!", input);
     }
   }
+
+  /** Inverse of {@link BytesRef#toString}. */
+  public static BytesRef fromBytesRefString(String s) {
+    if (s.length() < 2) {
+      throw new IllegalArgumentException("string " + s + " was not created from BytesRef.toString?");
+    }
+    if (s.charAt(0) != '[' || s.charAt(s.length()-1) != ']') {
+      throw new IllegalArgumentException("string " + s + " was not created from BytesRef.toString?");
+    }
+    String[] parts = s.substring(1, s.length()-1).split(" ");
+    byte[] bytes = new byte[parts.length];
+    for(int i=0;i<parts.length;i++) {
+      bytes[i] = (byte) Integer.parseInt(parts[i], 16);
+    }
+
+    return new BytesRef(bytes);
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
index 7ae80fe..9ded04a 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
@@ -18,8 +18,8 @@
  */
 
 import java.util.Objects;
-import java.util.Set;
 import java.util.ServiceLoader; // javadocs
+import java.util.Set;
 
 import org.apache.lucene.index.IndexWriterConfig; // javadocs
 import org.apache.lucene.util.NamedSPILoader;
@@ -107,6 +107,9 @@
   
   /** Encodes/decodes compound files */
   public abstract CompoundFormat compoundFormat();
+
+  /** Encodes/decodes dimensional index */
+  public abstract DimensionalFormat dimensionalFormat();
   
   /** looks up a codec by name */
   public static Codec forName(String name) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DimensionalFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalFormat.java
new file mode 100644
index 0000000..9b4dd31
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalFormat.java
@@ -0,0 +1,81 @@
+package org.apache.lucene.codecs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+
+/** 
+ * Encodes/decodes indexed dimensional data.
+ *
+ * @lucene.experimental */
+public abstract class DimensionalFormat {
+
+  /**
+   * Creates a new dimensional format.
+   */
+  protected DimensionalFormat() {
+  }
+
+  /** Writes a new segment */
+  public abstract DimensionalWriter fieldsWriter(SegmentWriteState state) throws IOException;
+
+  /** Reads a segment.  NOTE: by the time this call
+   *  returns, it must hold open any files it will need to
+   *  use; else, those files may be deleted. 
+   *  Additionally, required files may be deleted during the execution of 
+   *  this call before there is a chance to open them. Under these 
+   *  circumstances an IOException should be thrown by the implementation. 
+   *  IOExceptions are expected and will automatically cause a retry of the 
+   *  segment opening logic with the newly revised segments.
+   *  */
+  public abstract DimensionalReader fieldsReader(SegmentReadState state) throws IOException;
+
+  /** A {@code DimensionalFormat} that has nothing indexed */
+  public static final DimensionalFormat EMPTY = new DimensionalFormat() {
+      @Override
+      public DimensionalWriter fieldsWriter(SegmentWriteState state) {
+        throw new UnsupportedOperationException();
+      }
+
+      @Override
+      public DimensionalReader fieldsReader(SegmentReadState state) {
+        return new DimensionalReader() {
+          @Override
+          public void close() {
+          }
+
+          @Override
+          public long ramBytesUsed() {
+            return 0L;
+          }
+
+          @Override
+          public void checkIntegrity() {
+          }
+
+          @Override
+          public void intersect(String fieldName, IntersectVisitor visitor) {
+            throw new IllegalArgumentException("field=\"" + fieldName + "\" was not indexed with dimensional values");
+          }
+        };
+      }
+    };
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DimensionalReader.java b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalReader.java
new file mode 100644
index 0000000..7d6eb3c
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalReader.java
@@ -0,0 +1,51 @@
+package org.apache.lucene.codecs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.lucene.index.DimensionalValues;
+import org.apache.lucene.util.Accountable;
+
+/** Abstract API to visit dimensional values.
+ *
+ * @lucene.experimental
+ */
+public abstract class DimensionalReader extends DimensionalValues implements Closeable, Accountable {
+
+  /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
+  protected DimensionalReader() {}
+
+  /** 
+   * Checks consistency of this reader.
+   * <p>
+   * Note that this may be costly in terms of I/O, e.g. 
+   * may involve computing a checksum value against large data files.
+   * @lucene.internal
+   */
+  public abstract void checkIntegrity() throws IOException;
+
+  /** 
+   * Returns an instance optimized for merging.
+   * <p>
+   * The default implementation returns {@code this} */
+  public DimensionalReader getMergeInstance() throws IOException {
+    return this;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DimensionalWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalWriter.java
new file mode 100644
index 0000000..b236b51
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/DimensionalWriter.java
@@ -0,0 +1,99 @@
+package org.apache.lucene.codecs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.MergeState;
+
+/** Abstract API to write dimensional values
+ *
+ * @lucene.experimental
+ */
+
+public abstract class DimensionalWriter implements Closeable {
+  /** Sole constructor. (For invocation by subclass 
+   *  constructors, typically implicit.) */
+  protected DimensionalWriter() {
+  }
+
+  /** Write all values contained in the provided reader */
+  public abstract void writeField(FieldInfo fieldInfo, DimensionalReader values) throws IOException;
+
+  /** Default merge implementation to merge incoming dimensional readers by visiting all their points and
+   *  adding to this writer */
+  public void merge(MergeState mergeState) throws IOException {
+    for (FieldInfo fieldInfo : mergeState.mergeFieldInfos) {
+      if (fieldInfo.getDimensionCount() != 0) {
+        writeField(fieldInfo,
+                   new DimensionalReader() {
+                     @Override
+                     public void intersect(String fieldName, IntersectVisitor mergedVisitor) throws IOException {
+                       if (fieldName.equals(fieldInfo.name) == false) {
+                         throw new IllegalArgumentException("field name must match the field being merged");
+                       }
+                       for (int i=0;i<mergeState.dimensionalReaders.length;i++) {
+                         DimensionalReader dimensionalReader = mergeState.dimensionalReaders[i];
+                         MergeState.DocMap docMap = mergeState.docMaps[i];
+                         int docBase = mergeState.docBase[i];
+                         dimensionalReader.intersect(fieldInfo.name,
+                                                     new IntersectVisitor() {
+                                                       @Override
+                                                       public void visit(int docID) {
+                                                         // Should never be called because our compare method never returns Relation.CELL_INSIDE_QUERY
+                                                         throw new IllegalStateException();
+                                                       }
+
+                                                       @Override
+                                                       public void visit(int docID, byte[] packedValue) throws IOException {
+                                                         int newDocID = docMap.get(docID);
+                                                         if (newDocID != -1) {
+                                                           // Not deleted:
+                                                           mergedVisitor.visit(docBase + newDocID, packedValue);
+                                                         }
+                                                       }
+
+                                                       @Override
+                                                       public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+                                                         // Forces this segment's DimensionalReader to always visit all docs + values:
+                                                         return Relation.QUERY_CROSSES_CELL;
+                                                       }
+                                                     });
+                       }
+                     }
+
+                     @Override
+                     public void checkIntegrity() {
+                       throw new UnsupportedOperationException();
+                     }
+
+                     @Override
+                     public long ramBytesUsed() {
+                       return 0L;
+                     }
+
+                     @Override
+                     public void close() {
+                     }
+                   });
+      }
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java
index 11ba4ec..3465450 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java
@@ -103,4 +103,9 @@
   public CompoundFormat compoundFormat() {
     return delegate.compoundFormat();
   }
+
+  @Override
+  public DimensionalFormat dimensionalFormat() {
+    return delegate.dimensionalFormat();
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
index 7c9fc0d..35edb06 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
@@ -152,7 +152,7 @@
           lastAttributes = attributes;
           try {
             infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, 
-                                     indexOptions, docValuesType, dvGen, attributes);
+                                     indexOptions, docValuesType, dvGen, attributes, 0, 0);
             infos[i].checkConsistency();
           } catch (IllegalStateException e) {
             throw new CorruptIndexException("invalid fieldinfo for field: " + name + ", fieldNumber=" + fieldNumber, input, e);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
index d5c49fb..0df82c7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java
@@ -21,6 +21,9 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
+import org.apache.lucene.codecs.DimensionalFormat;
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.codecs.DimensionalWriter;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.FilterCodec;
@@ -39,6 +42,8 @@
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
 
 /**
  * Implements the Lucene 5.3 index format, with configurable per-field postings
@@ -157,6 +162,11 @@
     return docValuesFormat;
   }
 
+  @Override
+  public final DimensionalFormat dimensionalFormat() {
+    return DimensionalFormat.EMPTY;
+  }
+
   private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
   private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene50");
 
diff --git a/lucene/core/src/java/org/apache/lucene/document/DimensionalField.java b/lucene/core/src/java/org/apache/lucene/document/DimensionalField.java
new file mode 100644
index 0000000..52bb3b5
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/DimensionalField.java
@@ -0,0 +1,122 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.BytesRef;
+
+/** A field that is indexed dimensionally such that finding
+ *  all documents within an N-dimensional at search time is
+ *  efficient.  Muliple values for the same field in one documents
+ *  is allowed. */
+
+public final class DimensionalField extends Field {
+
+  private static FieldType getType(byte[][] point) {
+    if (point == null) {
+      throw new IllegalArgumentException("point cannot be null");
+    }
+    if (point.length == 0) {
+      throw new IllegalArgumentException("point cannot be 0 dimensions");
+    }
+    int bytesPerDim = -1;
+    for(int i=0;i<point.length;i++) {
+      byte[] oneDim = point[i];
+      if (oneDim == null) {
+        throw new IllegalArgumentException("point cannot have null values");
+      }
+      if (oneDim.length == 0) {
+        throw new IllegalArgumentException("point cannot have 0-length values");
+      }
+      if (bytesPerDim == -1) {
+        bytesPerDim = oneDim.length;
+      } else if (bytesPerDim != oneDim.length) {
+        throw new IllegalArgumentException("all dimensions must have same bytes length; got " + bytesPerDim + " and " + oneDim.length);
+      }
+    }
+    return getType(point.length, bytesPerDim);
+  }
+
+  private static FieldType getType(int numDims, int bytesPerDim) {
+    FieldType type = new FieldType();
+    type.setDimensions(numDims, bytesPerDim);
+    type.freeze();
+    return type;
+  }
+
+  private static BytesRef pack(byte[]... point) {
+    if (point == null) {
+      throw new IllegalArgumentException("point cannot be null");
+    }
+    if (point.length == 0) {
+      throw new IllegalArgumentException("point cannot be 0 dimensions");
+    }
+    int bytesPerDim = -1;
+    for(byte[] dim : point) {
+      if (dim == null) {
+        throw new IllegalArgumentException("point cannot have null values");
+      }
+      if (bytesPerDim == -1) {
+        if (dim.length == 0) {
+          throw new IllegalArgumentException("point cannot have 0-length values");
+        }
+        bytesPerDim = dim.length;
+      } else if (dim.length != bytesPerDim) {
+        throw new IllegalArgumentException("all dimensions must have same bytes length; got " + bytesPerDim + " and " + dim.length);
+      }
+    }
+    byte[] packed = new byte[bytesPerDim*point.length];
+    for(int i=0;i<point.length;i++) {
+      System.arraycopy(point[i], 0, packed, i*bytesPerDim, bytesPerDim);
+    }
+    return new BytesRef(packed);
+  }
+
+  /** Sugar API: indexes a one-dimensional point */
+  public DimensionalField(String name, byte[] dim1) {
+    super(name, dim1, getType(1, dim1.length));
+  }
+
+  /** Sugar API: indexes a two-dimensional point */
+  public DimensionalField(String name, byte[] dim1, byte[] dim2) {
+    super(name, pack(dim1, dim2), getType(2, dim1.length));
+  }
+
+  /** Sugar API: indexes a three-dimensional point */
+  public DimensionalField(String name, byte[] dim1, byte[] dim2, byte[] dim3) {
+    super(name, pack(dim1, dim2, dim3), getType(3, dim1.length));
+  }
+
+  /** General purpose API: creates a new DimensionalField, indexing the
+   *  provided N-dimensional binary point.
+   *
+   *  @param name field name
+   *  @param point byte[][] value
+   *  @throws IllegalArgumentException if the field name or value is null.
+   */
+  public DimensionalField(String name, byte[][] point) {
+    super(name, pack(point), getType(point));
+  }
+
+  /** Expert API */
+  public DimensionalField(String name, byte[] packedPoint, FieldType type) {
+    super(name, packedPoint, type);
+    if (packedPoint.length != type.dimensionCount() * type.dimensionNumBytes()) {
+      throw new IllegalArgumentException("packedPoint is length=" + packedPoint.length + " but type.dimensionCount()=" + type.dimensionCount() + " and type.dimensionNumBytes()=" + type.dimensionNumBytes());
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/document/Document.java b/lucene/core/src/java/org/apache/lucene/document/Document.java
index 503363c..71f661c 100644
--- a/lucene/core/src/java/org/apache/lucene/document/Document.java
+++ b/lucene/core/src/java/org/apache/lucene/document/Document.java
@@ -312,7 +312,7 @@
     return new FilterIterator<StorableField, Field>(fields.iterator()) {
       @Override
       protected boolean predicateFunction(Field field) {
-        return field.type.stored() || field.type.docValuesType() != DocValuesType.NONE;
+        return field.type.stored() || field.type.docValuesType() != DocValuesType.NONE || field.type.dimensionCount() != 0;
       }
     };
   }
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
index 8728e5b..ab7416b 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
@@ -55,6 +55,8 @@
   private boolean frozen;
   private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
   private DocValuesType docValuesType = DocValuesType.NONE;
+  private int dimensionCount;
+  private int dimensionNumBytes;
 
   /**
    * Create a new mutable FieldType with all of the properties from <code>ref</code>
@@ -71,6 +73,8 @@
     this.numericType = ref.numericType();
     this.numericPrecisionStep = ref.numericPrecisionStep();
     this.docValuesType = ref.docValuesType();
+    this.dimensionCount = dimensionCount;
+    this.dimensionNumBytes = dimensionNumBytes;
     // Do not copy frozen!
   }
   
@@ -342,6 +346,40 @@
     return numericPrecisionStep;
   }
 
+  /**
+   * Enables dimensional indexing.
+   */
+  public void setDimensions(int dimensionCount, int dimensionNumBytes) {
+    if (dimensionCount < 0) {
+      throw new IllegalArgumentException("dimensionCount must be >= 0; got " + dimensionCount);
+    }
+    if (dimensionNumBytes < 0) {
+      throw new IllegalArgumentException("dimensionNumBytes must be >= 0; got " + dimensionNumBytes);
+    }
+    if (dimensionCount == 0) {
+      if (dimensionNumBytes != 0) {
+        throw new IllegalArgumentException("when dimensionCount is 0 dimensionNumBytes must 0; got " + dimensionNumBytes);
+      }
+    } else if (dimensionNumBytes == 0) {
+      if (dimensionCount != 0) {
+        throw new IllegalArgumentException("when dimensionNumBytes is 0 dimensionCount must 0; got " + dimensionCount);
+      }
+    }
+
+    this.dimensionCount = dimensionCount;
+    this.dimensionNumBytes = dimensionNumBytes;
+  }
+
+  @Override
+  public int dimensionCount() {
+    return dimensionCount;
+  }
+
+  @Override
+  public int dimensionNumBytes() {
+    return dimensionNumBytes;
+  }
+
   /** Prints a Field for human consumption. */
   @Override
   public final String toString() {
@@ -381,6 +419,12 @@
         result.append(",numericPrecisionStep=");
         result.append(numericPrecisionStep);
       }
+      if (dimensionCount != 0) {
+        result.append(",dimensionCount=");
+        result.append(dimensionCount);
+        result.append(",dimensionNumBytes=");
+        result.append(dimensionNumBytes);
+      }
     }
     if (docValuesType != DocValuesType.NONE) {
       if (result.length() > 0) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
index 701e65f..0e4f0e3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.NormsProducer;
@@ -74,6 +75,12 @@
    * @lucene.internal
    */
   public abstract FieldsProducer getPostingsReader();
+
+  /**
+   * Expert: retrieve underlying DimensionalReader
+   * @lucene.internal
+   */
+  public abstract DimensionalReader getDimensionalReader();
   
   @Override
   public final void document(int docID, StoredFieldVisitor visitor) throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index bbef6e8..447ec3d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -23,6 +23,8 @@
 import java.util.Map;
 
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.DimensionalFormat;
+import org.apache.lucene.codecs.DimensionalWriter;
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.NormsConsumer;
@@ -91,6 +93,7 @@
     int maxDoc = state.segmentInfo.maxDoc();
     writeNorms(state);
     writeDocValues(state);
+    writeDimensionalValues(state);
     
     // it's possible all docs hit non-aborting exceptions...
     initStoredFieldsWriter();
@@ -118,6 +121,44 @@
     docWriter.codec.fieldInfosFormat().write(state.directory, state.segmentInfo, "", state.fieldInfos, IOContext.DEFAULT);
   }
 
+  /** Writes all buffered dimensional values. */
+  private void writeDimensionalValues(SegmentWriteState state) throws IOException {
+    DimensionalWriter dimensionalWriter = null;
+    boolean success = false;
+    try {
+      for (int i=0;i<fieldHash.length;i++) {
+        PerField perField = fieldHash[i];
+        while (perField != null) {
+          if (perField.dimensionalValuesWriter != null) {
+            if (perField.fieldInfo.getDimensionCount() == 0) {
+              // BUG
+              throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has no dimensional values but wrote them");
+            }
+            if (dimensionalWriter == null) {
+              // lazy init
+              DimensionalFormat fmt = state.segmentInfo.getCodec().dimensionalFormat();
+              dimensionalWriter = fmt.fieldsWriter(state);
+            }
+
+            perField.dimensionalValuesWriter.flush(state, dimensionalWriter);
+            perField.dimensionalValuesWriter = null;
+          } else if (perField.fieldInfo.getDimensionCount() != 0) {
+            // BUG
+            throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has dimensional values but did not write them");
+          }
+          perField = perField.next;
+        }
+      }
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(dimensionalWriter);
+      } else {
+        IOUtils.closeWhileHandlingException(dimensionalWriter);
+      }
+    }
+  }
+
   /** Writes all buffered doc values (called from {@link #flush}). */
   private void writeDocValues(SegmentWriteState state) throws IOException {
     int maxDoc = state.segmentInfo.maxDoc();
@@ -355,6 +396,9 @@
         if (dvType != DocValuesType.NONE) {
           indexDocValue(fp, dvType, field);
         }
+        if (fieldType.dimensionCount() != 0) {
+          indexDimensionalValue(fp, field);
+        }
       }
     } finally {
       if (abort == false) {
@@ -387,8 +431,27 @@
     }
   }
 
-  /** Called from processDocument to index one field's doc
-   *  value */
+  /** Called from processDocument to index one field's dimensional value */
+  private void indexDimensionalValue(PerField fp, StorableField field) throws IOException {
+    int dimensionCount = field.fieldType().dimensionCount();
+
+    int dimensionNumBytes = field.fieldType().dimensionNumBytes();
+
+    // Record dimensions for this field; this setter will throw IllegalArgExc if
+    // the dimensions were already set to something different:
+    if (fp.fieldInfo.getDimensionCount() == 0) {
+      fieldInfos.globalFieldNumbers.setDimensions(fp.fieldInfo.number, fp.fieldInfo.name, dimensionCount, dimensionNumBytes);
+    }
+
+    fp.fieldInfo.setDimensions(dimensionCount, dimensionNumBytes);
+
+    if (fp.dimensionalValuesWriter == null) {
+      fp.dimensionalValuesWriter = new DimensionalValuesWriter(docWriter, fp.fieldInfo);
+    }
+    fp.dimensionalValuesWriter.addPackedValue(docState.docID, field.binaryValue());
+  }
+
+  /** Called from processDocument to index one field's doc value */
   private void indexDocValue(PerField fp, DocValuesType dvType, StorableField field) throws IOException {
 
     if (fp.fieldInfo.getDocValuesType() == DocValuesType.NONE) {
@@ -516,6 +579,9 @@
     // segment:
     DocValuesWriter docValuesWriter;
 
+    // Non-null if this field ever had dimensional values in this segment:
+    DimensionalValuesWriter dimensionalValuesWriter;
+
     /** We use this to know when a PerField is seen for the
      *  first time in the current document. */
     long fieldGen = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/index/DimensionalValues.java b/lucene/core/src/java/org/apache/lucene/index/DimensionalValues.java
new file mode 100644
index 0000000..0eb977e
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/DimensionalValues.java
@@ -0,0 +1,59 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Allows recursively visiting indexed dimensional values
+ *
+ *  @lucene.experimental */
+public abstract class DimensionalValues {
+
+  /** Defautl constructor */
+  protected DimensionalValues() {
+  }
+
+  /** Used by {@link #intersect} to check how each recursive cell corresponds to the query. */
+  public enum Relation {
+    /** Return this if the cell is fully contained by the query */
+    CELL_INSIDE_QUERY,
+    /** Return this if the cell and query do not overlap */
+    QUERY_OUTSIDE_CELL,
+    /** Return this if the cell partially overlapps the query */
+    QUERY_CROSSES_CELL};
+
+  /** We recurse the BKD tree, using a provided instance of this to guide the recursion.
+   *
+   * @lucene.experimental */
+  public interface IntersectVisitor {
+    /** Called for all docs in a leaf cell that's fully contained by the query.  The
+     *  consumer should blindly accept the docID. */
+    void visit(int docID) throws IOException;
+
+    /** Called for all docs in a leaf cell that crosses the query.  The consumer
+     *  should scrutinize the packedValue to decide whether to accept it. */
+    void visit(int docID, byte[] packedValue) throws IOException;
+
+    /** Called for non-leaf cells to test how the cell relates to the query, to
+     *  determine how to further recurse down the treer. */
+    Relation compare(byte[] minPackedValue, byte[] maxPackedValue);
+  }
+
+  /** Finds all documents and points matching the provided visitor */
+  public abstract void intersect(String fieldName, IntersectVisitor visitor) throws IOException;
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/DimensionalValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/DimensionalValuesWriter.java
new file mode 100644
index 0000000..5a8d257
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/DimensionalValuesWriter.java
@@ -0,0 +1,96 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.codecs.DimensionalWriter;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.ByteBlockPool;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Counter;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/** Buffers up pending byte[][] value(s) per doc, then flushes when segment flushes. */
+class DimensionalValuesWriter {
+  private final FieldInfo fieldInfo;
+  private final ByteBlockPool bytes;
+  private final Counter iwBytesUsed;
+  private int[] docIDs;
+  private int numDocs;
+  private final byte[] packedValue;
+
+  public DimensionalValuesWriter(DocumentsWriterPerThread docWriter, FieldInfo fieldInfo) {
+    this.fieldInfo = fieldInfo;
+    this.iwBytesUsed = docWriter.bytesUsed;
+    this.bytes = new ByteBlockPool(docWriter.byteBlockAllocator);
+    docIDs = new int[16];
+    iwBytesUsed.addAndGet(16 * RamUsageEstimator.NUM_BYTES_INT);
+    packedValue = new byte[fieldInfo.getDimensionCount() * fieldInfo.getDimensionNumBytes()];
+  }
+
+  public void addPackedValue(int docID, BytesRef value) {
+    if (value == null) {
+      throw new IllegalArgumentException("field=" + fieldInfo.name + ": dimensional value cannot be null");
+    }
+    if (value.length != fieldInfo.getDimensionCount() * fieldInfo.getDimensionNumBytes()) {
+      throw new IllegalArgumentException("field=" + fieldInfo.name + ": this field's value has length=" + value.length + " but should be " + (fieldInfo.getDimensionCount() * fieldInfo.getDimensionNumBytes()));
+    }
+    if (docIDs.length == numDocs) {
+      docIDs = ArrayUtil.grow(docIDs, numDocs+1);
+      iwBytesUsed.addAndGet((docIDs.length - numDocs) * RamUsageEstimator.NUM_BYTES_INT);
+    }
+    bytes.append(value);
+    docIDs[numDocs] = docID;
+    numDocs++;
+  }
+
+  public void flush(SegmentWriteState state, DimensionalWriter writer) throws IOException {
+
+    final int maxDoc = state.segmentInfo.maxDoc();
+
+    writer.writeField(fieldInfo,
+                      new DimensionalReader() {
+                        @Override
+                        public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
+                          if (fieldName.equals(fieldInfo.name) == false) {
+                            throw new IllegalArgumentException("fieldName must be the same");
+                          }
+                          for(int i=0;i<numDocs;i++) {
+                            bytes.readBytes(packedValue.length * i, packedValue, 0, packedValue.length);
+                            visitor.visit(docIDs[i], packedValue);
+                          }
+                        }
+
+                        @Override
+                        public void checkIntegrity() {
+                          throw new UnsupportedOperationException();
+                        }
+
+                        @Override
+                        public long ramBytesUsed() {
+                          return 0L;
+                        }
+
+                        @Override
+                        public void close() {
+                        }
+                      });
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
index c6c34e3..015dc51 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
@@ -20,6 +20,8 @@
 import java.util.Map;
 import java.util.Objects;
 
+import org.apache.lucene.codecs.DimensionalFormat;
+
 /**
  *  Access to the Field Info file that describes document fields and whether or
  *  not they are indexed. Each segment has a separate Field Info file. Objects
@@ -47,14 +49,20 @@
   private final Map<String,String> attributes;
 
   private long dvGen;
+
+  /** If both of these are positive it means this is a dimensionally indexed
+   *  field (see {@link DimensionalFormat}). */
+  private int dimensionCount;
+  private int dimensionNumBytes;
+
   /**
    * Sole constructor.
    *
    * @lucene.experimental
    */
   public FieldInfo(String name, int number, boolean storeTermVector, boolean omitNorms, 
-      boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues,
-      long dvGen, Map<String,String> attributes) {
+                   boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues,
+                   long dvGen, Map<String,String> attributes, int dimensionCount, int dimensionNumBytes) {
     this.name = Objects.requireNonNull(name);
     this.number = number;
     this.docValuesType = Objects.requireNonNull(docValues, "DocValuesType cannot be null (field: \"" + name + "\")");
@@ -70,6 +78,8 @@
     }
     this.dvGen = dvGen;
     this.attributes = Objects.requireNonNull(attributes);
+    this.dimensionCount = dimensionCount;
+    this.dimensionNumBytes = dimensionNumBytes;
     assert checkConsistency();
   }
 
@@ -94,6 +104,22 @@
         throw new IllegalStateException("non-indexed field '" + name + "' cannot omit norms");
       }
     }
+
+    if (dimensionCount < 0) {
+      throw new IllegalStateException("dimensionCount must be >= 0; got " + dimensionCount);
+    }
+
+    if (dimensionNumBytes < 0) {
+      throw new IllegalStateException("dimensionNumBytes must be >= 0; got " + dimensionNumBytes);
+    }
+
+    if (dimensionCount != 0 && dimensionNumBytes == 0) {
+      throw new IllegalStateException("dimensionNumBytes must be > 0 when dimensionCount=" + dimensionCount);
+    }
+
+    if (dimensionNumBytes != 0 && dimensionCount == 0) {
+      throw new IllegalStateException("dimensionCount must be > 0 when dimensionNumBytes=" + dimensionNumBytes);
+    }
     
     if (dvGen != -1 && docValuesType == DocValuesType.NONE) {
       throw new IllegalStateException("field '" + name + "' cannot have a docvalues update generation without having docvalues");
@@ -103,7 +129,8 @@
   }
 
   // should only be called by FieldInfos#addOrUpdate
-  void update(boolean storeTermVector, boolean omitNorms, boolean storePayloads, IndexOptions indexOptions) {
+  void update(boolean storeTermVector, boolean omitNorms, boolean storePayloads, IndexOptions indexOptions,
+              int dimensionCount, int dimensionNumBytes) {
     if (indexOptions == null) {
       throw new NullPointerException("IndexOptions cannot be null (field: \"" + name + "\")");
     }
@@ -117,6 +144,11 @@
       }
     }
 
+    if (this.dimensionCount == 0 && dimensionCount != 0) {
+      this.dimensionCount = dimensionCount;
+      this.dimensionNumBytes = dimensionNumBytes;
+    }
+
     if (this.indexOptions != IndexOptions.NONE) { // if updated field data is not for indexing, leave the updates out
       this.storeTermVector |= storeTermVector;                // once vector, always vector
       this.storePayloads |= storePayloads;
@@ -133,6 +165,36 @@
     assert checkConsistency();
   }
 
+  /** Record that this field is indexed dimensionally, with the
+   *  specified number of dimensions and bytes per dimension. */
+  public void setDimensions(int count, int numBytes) {
+    if (count <= 0) {
+      throw new IllegalArgumentException("dimension count must be >= 0; got " + count + " for field=\"" + name + "\"");
+    }
+    if (numBytes <= 0) {
+      throw new IllegalArgumentException("dimension numBytes must be >= 0; got " + numBytes + " for field=\"" + name + "\"");
+    }
+    if (dimensionCount != 0 && dimensionCount != count) {
+      throw new IllegalArgumentException("cannot change dimension count from " + dimensionCount + " to " + count + " for field=\"" + name + "\"");
+    }
+    if (dimensionNumBytes != 0 && dimensionNumBytes != numBytes) {
+      throw new IllegalArgumentException("cannot change dimension numBytes from " + dimensionNumBytes + " to " + numBytes + " for field=\"" + name + "\"");
+    }
+
+    dimensionCount = count;
+    dimensionNumBytes = numBytes;
+  }
+
+  /** Return dimension count */
+  public int getDimensionCount() {
+    return dimensionCount;
+  }
+
+  /** Return number of bytes per dimension */
+  public int getDimensionNumBytes() {
+    return dimensionNumBytes;
+  }
+
   void setDocValuesType(DocValuesType type) {
     if (type == null) {
       throw new NullPointerException("DocValuesType cannot be null (field: \"" + name + "\")");
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
index 0023f28..2ae94be 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
@@ -39,6 +39,7 @@
   private final boolean hasVectors;
   private final boolean hasNorms;
   private final boolean hasDocValues;
+  private final boolean hasDimensionalValues;
   
   // used only by fieldInfo(int)
   private final FieldInfo[] byNumberTable; // contiguous
@@ -58,6 +59,7 @@
     boolean hasFreq = false;
     boolean hasNorms = false;
     boolean hasDocValues = false;
+    boolean hasDimensionalValues = false;
     
     TreeMap<Integer, FieldInfo> byNumber = new TreeMap<>();
     for (FieldInfo info : infos) {
@@ -80,6 +82,7 @@
       hasNorms |= info.hasNorms();
       hasDocValues |= info.getDocValuesType() != DocValuesType.NONE;
       hasPayloads |= info.hasPayloads();
+      hasDimensionalValues |= info.getDimensionCount() != 0;
     }
     
     this.hasVectors = hasVectors;
@@ -89,6 +92,7 @@
     this.hasFreq = hasFreq;
     this.hasNorms = hasNorms;
     this.hasDocValues = hasDocValues;
+    this.hasDimensionalValues = hasDimensionalValues;
     this.values = Collections.unmodifiableCollection(byNumber.values());
     Integer max = byNumber.isEmpty() ? null : Collections.max(byNumber.keySet());
     
@@ -142,6 +146,11 @@
   public boolean hasDocValues() {
     return hasDocValues;
   }
+
+  /** Returns true if any fields have DimensionalValues */
+  public boolean hasDimensionalValues() {
+    return hasDimensionalValues;
+  }
   
   /** Returns the number of fields */
   public int size() {
@@ -187,6 +196,16 @@
       return byNumberMap.get(fieldNumber);
     }
   }
+
+  static final class FieldDimensions {
+    public final int dimensionCount;
+    public final int dimensionNumBytes;
+
+    public FieldDimensions(int dimensionCount, int dimensionNumBytes) {
+      this.dimensionCount = dimensionCount;
+      this.dimensionNumBytes = dimensionNumBytes;
+    }
+  }
   
   static final class FieldNumbers {
     
@@ -197,6 +216,8 @@
     // sessions:
     private final Map<String,DocValuesType> docValuesType;
 
+    private final Map<String,FieldDimensions> dimensions;
+
     // TODO: we should similarly catch an attempt to turn
     // norms back on after they were already ommitted; today
     // we silently discard the norm but this is badly trappy
@@ -206,6 +227,7 @@
       this.nameToNumber = new HashMap<>();
       this.numberToName = new HashMap<>();
       this.docValuesType = new HashMap<>();
+      this.dimensions = new HashMap<>();
     }
     
     /**
@@ -214,7 +236,7 @@
      * number assigned if possible otherwise the first unassigned field number
      * is used as the field number.
      */
-    synchronized int addOrGet(String fieldName, int preferredFieldNumber, DocValuesType dvType) {
+    synchronized int addOrGet(String fieldName, int preferredFieldNumber, DocValuesType dvType, int dimensionCount, int dimensionNumBytes) {
       if (dvType != DocValuesType.NONE) {
         DocValuesType currentDVType = docValuesType.get(fieldName);
         if (currentDVType == null) {
@@ -223,6 +245,19 @@
           throw new IllegalArgumentException("cannot change DocValues type from " + currentDVType + " to " + dvType + " for field \"" + fieldName + "\"");
         }
       }
+      if (dimensionCount != 0) {
+        FieldDimensions dims = dimensions.get(fieldName);
+        if (dims != null) {
+          if (dims.dimensionCount != dimensionCount) {
+            throw new IllegalArgumentException("cannot change dimension count from " + dims.dimensionCount + " to " + dimensionCount + " for field=\"" + fieldName + "\"");
+          }
+          if (dims.dimensionNumBytes != dimensionNumBytes) {
+            throw new IllegalArgumentException("cannot change dimension numBytes from " + dims.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + fieldName + "\"");
+          }
+        } else {
+          dimensions.put(fieldName, new FieldDimensions(dimensionCount, dimensionNumBytes));
+        }
+      }
       Integer fieldNumber = nameToNumber.get(fieldName);
       if (fieldNumber == null) {
         final Integer preferredBoxed = Integer.valueOf(preferredFieldNumber);
@@ -257,6 +292,24 @@
       }
     }
 
+    synchronized void verifyConsistentDimensions(Integer number, String name, int dimensionCount, int dimensionNumBytes) {
+      if (name.equals(numberToName.get(number)) == false) {
+        throw new IllegalArgumentException("field number " + number + " is already mapped to field name \"" + numberToName.get(number) + "\", not \"" + name + "\"");
+      }
+      if (number.equals(nameToNumber.get(name)) == false) {
+        throw new IllegalArgumentException("field name \"" + name + "\" is already mapped to field number \"" + nameToNumber.get(name) + "\", not \"" + number + "\"");
+      }
+      FieldDimensions dim = dimensions.get(name);
+      if (dim != null) {
+        if (dim.dimensionCount != dimensionCount) {
+          throw new IllegalArgumentException("cannot change dimension count from " + dim.dimensionCount + " to " + dimensionCount + " for field=\"" + name + "\"");
+        }
+        if (dim.dimensionNumBytes != dimensionNumBytes) {
+          throw new IllegalArgumentException("cannot change dimension numBytes from " + dim.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + name + "\"");
+        }
+      }
+    }
+
     /**
      * Returns true if the {@code fieldName} exists in the map and is of the
      * same {@code dvType}.
@@ -275,12 +328,18 @@
       numberToName.clear();
       nameToNumber.clear();
       docValuesType.clear();
+      dimensions.clear();
     }
 
     synchronized void setDocValuesType(int number, String name, DocValuesType dvType) {
       verifyConsistent(number, name, dvType);
       docValuesType.put(name, dvType);
     }
+
+    synchronized void setDimensions(int number, String name, int dimensionCount, int dimensionNumBytes) {
+      verifyConsistentDimensions(number, name, dimensionCount, dimensionNumBytes);
+      dimensions.put(name, new FieldDimensions(dimensionCount, dimensionNumBytes));
+    }
   }
   
   static final class Builder {
@@ -314,8 +373,8 @@
         // number for this field.  If the field was seen
         // before then we'll get the same name and number,
         // else we'll allocate a new one:
-        final int fieldNumber = globalFieldNumbers.addOrGet(name, -1, DocValuesType.NONE);
-        fi = new FieldInfo(name, fieldNumber, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, new HashMap<>());
+        final int fieldNumber = globalFieldNumbers.addOrGet(name, -1, DocValuesType.NONE, 0, 0);
+        fi = new FieldInfo(name, fieldNumber, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, new HashMap<>(), 0, 0);
         assert !byName.containsKey(fi.name);
         globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, DocValuesType.NONE);
         byName.put(fi.name, fi);
@@ -325,8 +384,9 @@
     }
    
     private FieldInfo addOrUpdateInternal(String name, int preferredFieldNumber,
-        boolean storeTermVector,
-        boolean omitNorms, boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues) {
+                                          boolean storeTermVector,
+                                          boolean omitNorms, boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues,
+                                          int dimensionCount, int dimensionNumBytes) {
       if (docValues == null) {
         throw new NullPointerException("DocValuesType cannot be null");
       }
@@ -337,13 +397,13 @@
         // number for this field.  If the field was seen
         // before then we'll get the same name and number,
         // else we'll allocate a new one:
-        final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, docValues);
-        fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, -1, new HashMap<>());
+        final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, docValues, dimensionCount, dimensionNumBytes);
+        fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, -1, new HashMap<>(), dimensionCount, dimensionNumBytes);
         assert !byName.containsKey(fi.name);
         globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, fi.getDocValuesType());
         byName.put(fi.name, fi);
       } else {
-        fi.update(storeTermVector, omitNorms, storePayloads, indexOptions);
+        fi.update(storeTermVector, omitNorms, storePayloads, indexOptions, dimensionCount, dimensionNumBytes);
 
         if (docValues != DocValuesType.NONE) {
           // Only pay the synchronization cost if fi does not already have a DVType
@@ -364,8 +424,9 @@
     public FieldInfo add(FieldInfo fi) {
       // IMPORTANT - reuse the field number if possible for consistent field numbers across segments
       return addOrUpdateInternal(fi.name, fi.number, fi.hasVectors(),
-                 fi.omitsNorms(), fi.hasPayloads(),
-                 fi.getIndexOptions(), fi.getDocValuesType());
+                                 fi.omitsNorms(), fi.hasPayloads(),
+                                 fi.getIndexOptions(), fi.getDocValuesType(),
+                                 fi.getDimensionCount(), fi.getDimensionNumBytes());
     }
     
     public FieldInfo fieldInfo(String fieldName) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
index b2a8649..b55bb88 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterCodecReader.java
@@ -19,6 +19,7 @@
 
 import java.util.Objects;
 
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.NormsProducer;
@@ -81,6 +82,16 @@
   }
 
   @Override
+  public DimensionalReader getDimensionalReader() {
+    return in.getDimensionalReader();
+  }
+
+  @Override
+  public DimensionalValues getDimensionalValues() {
+    return in.getDimensionalValues();
+  }
+
+  @Override
   public int numDocs() {
     return in.numDocs();
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 6e896af..524fc38 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -381,6 +381,11 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    return in.getDimensionalValues();
+  }
+
+  @Override
   public Fields getTermVectors(int docID)
           throws IOException {
     ensureOpen();
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 642f9af..cf5b554 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -1017,7 +1017,7 @@
     for(SegmentCommitInfo info : segmentInfos) {
       FieldInfos fis = readFieldInfos(info);
       for(FieldInfo fi : fis) {
-        map.addOrGet(fi.name, fi.number, fi.getDocValuesType());
+        map.addOrGet(fi.name, fi.number, fi.getDocValuesType(), fi.getDimensionCount(), fi.getDimensionNumBytes());
       }
     }
 
@@ -2492,7 +2492,8 @@
 
             FieldInfos fis = readFieldInfos(info);
             for(FieldInfo fi : fis) {
-              globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getDocValuesType());
+              // This will throw exceptions if any of the incoming fields have an illegal schema change:
+              globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getDocValuesType(), fi.getDimensionCount(), fi.getDimensionNumBytes());
             }
             infos.add(copySegmentAsIs(info, newSegName, context));
           }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
index ee24158..34b86e3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
@@ -95,5 +95,15 @@
    * DocValues {@link DocValuesType}: how the field's value will be indexed
    * into docValues.
    */
-  public DocValuesType docValuesType();  
+  public DocValuesType docValuesType();
+
+  /**
+   * If this is positive, the field is indexed dimensionally.
+   */
+  public int dimensionCount();
+
+  /**
+   * The number of bytes in each dimension's values.
+   */
+  public int dimensionNumBytes();
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
index 8b27b15..523ee4e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
@@ -17,11 +17,12 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
 import org.apache.lucene.util.Bits;
 
-import java.io.IOException;
-
 /** {@code LeafReader} is an abstract class, providing an interface for accessing an
  index.  Search of an index is done entirely through this abstract interface,
  so that any subclass which implements it is searchable. IndexReaders implemented
@@ -300,6 +301,10 @@
    */
   public abstract Bits getLiveDocs();
 
+  /** Returns the {@link DimensionalReader} used for numeric or
+   *  spatial searches, or null if there are no dimensional fields. */
+  public abstract DimensionalValues getDimensionalValues();
+
   /**
    * Checks consistency of this reader.
    * <p>
diff --git a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
index 59dfb85..5102e2b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -24,7 +24,6 @@
 import java.util.Locale;
 import java.util.Map;
 
-
 /**
  * <p>This class implements a {@link MergePolicy} that tries
  * to merge segments into levels of exponentially
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeState.java b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
index 1144753..a325b3f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergeState.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.NormsProducer;
@@ -65,6 +66,9 @@
   /** Postings to merge */
   public final FieldsProducer[] fieldsProducers;
 
+  /** Dimensional readers to merge */
+  public final DimensionalReader[] dimensionalReaders;
+
   /** New docID base per reader. */
   public final int[] docBase;
 
@@ -86,6 +90,7 @@
     storedFieldsReaders = new StoredFieldsReader[numReaders];
     termVectorsReaders = new TermVectorsReader[numReaders];
     docValuesProducers = new DocValuesProducer[numReaders];
+    dimensionalReaders = new DimensionalReader[numReaders];
     fieldInfos = new FieldInfos[numReaders];
     liveDocs = new Bits[numReaders];
 
@@ -117,6 +122,10 @@
       }
       
       fieldsProducers[i] = reader.getPostingsReader().getMergeInstance();
+      dimensionalReaders[i] = reader.getDimensionalReader();
+      if (dimensionalReaders[i] != null) {
+        dimensionalReaders[i] = dimensionalReaders[i].getMergeInstance();
+      }
     }
 
     this.segmentInfo = segmentInfo;
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDimensionalValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDimensionalValues.java
new file mode 100644
index 0000000..875d67d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDimensionalValues.java
@@ -0,0 +1,79 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+class MultiDimensionalValues extends DimensionalValues {
+
+  private final List<DimensionalValues> subs;
+  private final List<Integer> docBases;
+
+  private MultiDimensionalValues(List<DimensionalValues> subs, List<Integer> docBases) {
+    this.subs = subs;
+    this.docBases = docBases;
+  }
+
+  public static DimensionalValues get(IndexReader r) {
+    final List<LeafReaderContext> leaves = r.leaves();
+    final int size = leaves.size();
+    if (size == 0) {
+      return null;
+    } else if (size == 1) {
+      return leaves.get(0).reader().getDimensionalValues();
+    }
+
+    boolean anyReal = false;
+    List<DimensionalValues> values = new ArrayList<>();
+    List<Integer> docBases = new ArrayList<>();
+    for (int i = 0; i < size; i++) {
+      LeafReaderContext context = leaves.get(i);
+      DimensionalValues v = context.reader().getDimensionalValues();
+      if (v != null) {
+        values.add(v);
+        docBases.add(context.docBase);
+      }
+    }
+
+    return new MultiDimensionalValues(values, docBases);
+  }
+
+  /** Finds all documents and points matching the provided visitor */
+  public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
+    for(int i=0;i<subs.size();i++) {
+      int docBase = docBases.get(i);
+      subs.get(i).intersect(fieldName,
+                        new IntersectVisitor() {
+                          @Override
+                          public void visit(int docID) throws IOException {
+                            visitor.visit(docBase+docID);
+                          }
+                          @Override
+                          public void visit(int docID, byte[] packedValue) throws IOException {
+                            visitor.visit(docBase+docID, packedValue);
+                          }
+                          @Override
+                          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+                            return visitor.compare(minPackedValue, maxPackedValue);
+                          }
+                        });
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
index 9a78fe1..3f268c7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
@@ -315,6 +315,24 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    return new DimensionalValues() {
+      @Override
+      public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
+        LeafReader reader = fieldToReader.get(fieldName);
+        if (reader == null) {
+          throw new IllegalArgumentException("field=\"" + fieldName + "\" did not index dimensional values");
+        }
+        DimensionalValues dimValues = reader.getDimensionalValues();
+        if (dimValues == null) {
+          throw new IllegalArgumentException("field=\"" + fieldName + "\" did not index dimensional values");
+        }
+        dimValues.intersect(fieldName, visitor);
+      }
+    };
+  }
+
+  @Override
   public void checkIntegrity() throws IOException {
     ensureOpen();
     for (LeafReader reader : completeReaderSet) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
index 712f4a9..1b3a12e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
@@ -24,6 +24,7 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
@@ -53,6 +54,7 @@
 
   final StoredFieldsReader fieldsReaderOrig;
   final TermVectorsReader termVectorsReaderOrig;
+  final DimensionalReader dimensionalReader;
   final Directory cfsReader;
   /** 
    * fieldinfos for this core: means gen=-1.
@@ -81,7 +83,7 @@
   private final Set<CoreClosedListener> coreClosedListeners = 
       Collections.synchronizedSet(new LinkedHashSet<CoreClosedListener>());
   
-  SegmentCoreReaders(SegmentReader owner, Directory dir, SegmentCommitInfo si, IOContext context) throws IOException {
+  SegmentCoreReaders(Directory dir, SegmentCommitInfo si, IOContext context) throws IOException {
 
     final Codec codec = si.info.getCodec();
     final Directory cfsDir; // confusing name: if (cfs) it's the cfsdir, otherwise it's the segment's directory.
@@ -122,6 +124,11 @@
         termVectorsReaderOrig = null;
       }
 
+      if (coreFieldInfos.hasDimensionalValues()) {
+        dimensionalReader = codec.dimensionalFormat().fieldsReader(segmentReadState);
+      } else {
+        dimensionalReader = null;
+      }
       success = true;
     } finally {
       if (!success) {
@@ -150,7 +157,7 @@
       Throwable th = null;
       try {
         IOUtils.close(termVectorsLocal, fieldsReaderLocal, fields, termVectorsReaderOrig, fieldsReaderOrig,
-            cfsReader, normsProducer);
+                      cfsReader, normsProducer, dimensionalReader);
       } catch (Throwable throwable) {
         th = throwable;
       } finally {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
index b31a0dd..1b7829e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -21,6 +21,7 @@
 import java.util.List;
 
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DimensionalWriter;
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.NormsConsumer;
@@ -108,6 +109,17 @@
       long t1 = System.nanoTime();
       mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge doc values [" + numMerged + " docs]");
     }
+
+    if (mergeState.infoStream.isEnabled("SM")) {
+      t0 = System.nanoTime();
+    }
+    if (mergeState.mergeFieldInfos.hasDimensionalValues()) {
+      mergeDimensionalValues(segmentWriteState);
+    }
+    if (mergeState.infoStream.isEnabled("SM")) {
+      long t1 = System.nanoTime();
+      mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge dimensional values values [" + numMerged + " docs]");
+    }
     
     if (mergeState.mergeFieldInfos.hasNorms()) {
       if (mergeState.infoStream.isEnabled("SM")) {
@@ -151,6 +163,12 @@
     }
   }
 
+  private void mergeDimensionalValues(SegmentWriteState segmentWriteState) throws IOException {
+    try (DimensionalWriter writer = codec.dimensionalFormat().fieldsWriter(segmentWriteState)) {
+      writer.merge(mergeState);
+    }
+  }
+
   private void mergeNorms(SegmentWriteState segmentWriteState) throws IOException {
     try (NormsConsumer consumer = codec.normsFormat().normsConsumer(segmentWriteState)) {
       consumer.merge(mergeState);
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
index ed7d57f..57a4b9a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
@@ -21,6 +21,7 @@
 import java.util.Collections;
 
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.FieldsProducer;
@@ -62,7 +63,7 @@
   // TODO: why is this public?
   public SegmentReader(SegmentCommitInfo si, IOContext context) throws IOException {
     this.si = si;
-    core = new SegmentCoreReaders(this, si.info.dir, si, context);
+    core = new SegmentCoreReaders(si.info.dir, si, context);
     segDocValues = new SegmentDocValues();
     
     boolean success = false;
@@ -217,6 +218,12 @@
   }
   
   @Override
+  public DimensionalValues getDimensionalValues() {
+    ensureOpen();
+    return core.dimensionalReader;
+  }
+
+  @Override
   public NormsProducer getNormsReader() {
     ensureOpen();
     return core.normsProducer;
@@ -235,6 +242,12 @@
   }
 
   @Override
+  public DimensionalReader getDimensionalReader() {
+    ensureOpen();
+    return core.dimensionalReader;
+  }
+
+  @Override
   public String toString() {
     // SegmentInfo.toString takes dir and number of
     // *pending* deletions; so we reverse compute that here:
diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
index 9ee7b89..d07897e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.Iterator;
 
+import org.apache.lucene.codecs.DimensionalReader;
 import org.apache.lucene.codecs.DocValuesProducer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.NormsProducer;
@@ -91,6 +92,16 @@
         }
 
         @Override
+        public DimensionalValues getDimensionalValues() {
+          return reader.getDimensionalValues();
+        }
+
+        @Override
+        public DimensionalReader getDimensionalReader() {
+          return dimensionalValuesToReader(reader.getDimensionalValues());
+        }
+
+        @Override
         public Bits getLiveDocs() {
           return reader.getLiveDocs();
         }
@@ -117,6 +128,29 @@
       };
     }
   }
+
+  private static DimensionalReader dimensionalValuesToReader(DimensionalValues values) {
+    return new DimensionalReader() {
+      @Override
+      public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
+        values.intersect(fieldName, visitor);
+      }
+
+      @Override
+      public void checkIntegrity() throws IOException {
+        // We already checkIntegrity the entire reader up front
+      }
+
+      @Override
+      public void close() {
+      }
+
+      @Override
+      public long ramBytesUsed() {
+        return 0;
+      }
+    };
+  }
   
   private static NormsProducer readerToNormsProducer(final LeafReader reader) {
     return new NormsProducer() {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
index fc2d48b..ee00482 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
@@ -21,10 +21,10 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.index.MultiDocValues.MultiSortedDocValues;
 import org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues;
 import org.apache.lucene.index.MultiDocValues.OrdinalMap;
+import org.apache.lucene.util.Bits;
 
 /**
  * This class forces a composite reader (eg a {@link
@@ -234,6 +234,12 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    ensureOpen();
+    return MultiDimensionalValues.get(in);
+  }
+
+  @Override
   public FieldInfos getFieldInfos() {
     ensureOpen();
     return MultiFields.getMergedFieldInfos(in);
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index d58fa7e..b250288 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -21,6 +21,8 @@
 import java.util.Arrays;
 
 import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
+import org.apache.lucene.index.DimensionalValues.Relation;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.RamUsageEstimator;
@@ -29,7 +31,7 @@
  *
  * @lucene.experimental */
 
-public final class BKDReader implements Accountable {
+public class BKDReader implements Accountable {
   // Packed array of byte[] holding all split values in the full binary tree:
   final private byte[] splitPackedValues; 
   final private long[] leafBlockFPs;
@@ -37,31 +39,11 @@
   final int numDims;
   final int bytesPerDim;
   final IndexInput in;
-  final int packedBytesLength;
   final int maxPointsInLeafNode;
-
-  enum Relation {CELL_INSIDE_QUERY, QUERY_CROSSES_CELL, QUERY_OUTSIDE_CELL};
-
-  /** We recurse the BKD tree, using a provided instance of this to guide the recursion.
-   *
-   * @lucene.experimental */
-  public interface IntersectVisitor {
-    /** Called for all docs in a leaf cell that's fully contained by the query.  The
-     *  consumer should blindly accept the docID. */
-    void visit(int docID);
-
-    /** Called for all docs in a leaf cell that crosses the query.  The consumer
-     *  should scrutinize the packedValue to decide whether to accept it. */
-    void visit(int docID, byte[] packedValue);
-
-    /** Called for non-leaf cells to test how the cell relates to the query, to
-     *  determine how to further recurse down the treer. */
-    Relation compare(byte[] minPackedValue, byte[] maxPackedValue);
-  }
+  protected final int packedBytesLength;
 
   /** Caller must pre-seek the provided {@link IndexInput} to the index location that {@link BKDWriter#finish} returned */
   public BKDReader(IndexInput in) throws IOException {
-
     CodecUtil.checkHeader(in, BKDWriter.CODEC_NAME, BKDWriter.VERSION_START, BKDWriter.VERSION_START);
     numDims = in.readVInt();
     maxPointsInLeafNode = in.readVInt();
@@ -84,23 +66,28 @@
     this.in = in;
   }
 
+  protected BKDReader(IndexInput in, int numDims, int maxPointsInLeafNode, int bytesPerDim, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
+    this.in = in;
+    this.numDims = numDims;
+    this.maxPointsInLeafNode = maxPointsInLeafNode;
+    this.bytesPerDim = bytesPerDim;
+    packedBytesLength = numDims * bytesPerDim;
+    this.leafNodeOffset = leafBlockFPs.length;
+    this.leafBlockFPs = leafBlockFPs;
+    this.splitPackedValues = splitPackedValues;
+  }
+
   private static final class IntersectState {
     final IndexInput in;
     final int[] scratchDocIDs;
     final byte[] scratchPackedValue;
 
-    // Minimum point of the N-dim rect containing the query shape:
-    final byte[] minPacked;
-    // Maximum point of the N-dim rect containing the query shape:
-    final byte[] maxPacked;
     final IntersectVisitor visitor;
 
     public IntersectState(IndexInput in, int packedBytesLength,
-                          int maxPointsInLeafNode, byte[] minPacked, byte[] maxPacked,
+                          int maxPointsInLeafNode,
                           IntersectVisitor visitor) {
       this.in = in;
-      this.minPacked = minPacked;
-      this.maxPacked = maxPacked;
       this.visitor = visitor;
       this.scratchDocIDs = new int[maxPointsInLeafNode];
       this.scratchPackedValue = new byte[packedBytesLength];
@@ -108,15 +95,8 @@
   }
 
   public void intersect(IntersectVisitor visitor) throws IOException {
-    byte[] minPacked = new byte[packedBytesLength];
-    byte[] maxPacked = new byte[packedBytesLength];
-    Arrays.fill(maxPacked, (byte) 0xff);
-    intersect(minPacked, maxPacked, visitor);
-  }
-
-  public void intersect(byte[] minPacked, byte[] maxPacked, IntersectVisitor visitor) throws IOException {
     IntersectState state = new IntersectState(in.clone(), packedBytesLength,
-                                              maxPointsInLeafNode, minPacked, maxPacked,
+                                              maxPointsInLeafNode,
                                               visitor);
     byte[] rootMinPacked = new byte[packedBytesLength];
     byte[] rootMaxPacked = new byte[packedBytesLength];
@@ -129,83 +109,85 @@
     //System.out.println("R: addAll nodeID=" + nodeID);
 
     if (nodeID >= leafNodeOffset) {
-      //System.out.println("R:   leaf");
-
-      // Leaf node
-      state.in.seek(leafBlockFPs[nodeID-leafNodeOffset]);
-      
-      // How many points are stored in this leaf cell:
-      int count = state.in.readVInt();
-
-      // TODO: especially for the 1D case, this was a decent speedup, because caller could know it should budget for around XXX docs:
-      //state.docs.grow(count);
-      int docID = 0;
-      for(int i=0;i<count;i++) {
-        docID += state.in.readVInt();
-        state.visitor.visit(docID);
-      }
+      visitDocIDs(state.in, leafBlockFPs[nodeID-leafNodeOffset], state.visitor);
     } else {
       addAll(state, 2*nodeID);
       addAll(state, 2*nodeID+1);
     }
   }
 
+  protected void visitDocIDs(IndexInput in, long blockFP, IntersectVisitor visitor) throws IOException {
+    // Leaf node
+    in.seek(blockFP);
+      
+    // How many points are stored in this leaf cell:
+    int count = in.readVInt();
+
+    // TODO: especially for the 1D case, this was a decent speedup, because caller could know it should budget for around XXX docs:
+    //state.docs.grow(count);
+    int docID = 0;
+    for(int i=0;i<count;i++) {
+      docID += in.readVInt();
+      visitor.visit(docID);
+    }
+  }
+
+  protected int readDocIDs(IndexInput in, long blockFP, int[] docIDs) throws IOException {
+    in.seek(blockFP);
+
+    // How many points are stored in this leaf cell:
+    int count = in.readVInt();
+
+    // TODO: we could maybe pollute the IntersectVisitor API with a "grow" method if this maybe helps perf
+    // enough (it did before, esp. for the 1D case):
+    //state.docs.grow(count);
+    int docID = 0;
+    for(int i=0;i<count;i++) {
+      docID += in.readVInt();
+      docIDs[i] = docID;
+    }
+
+    return count;
+  }
+
+  protected void visitDocValues(byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
+    for(int i=0;i<count;i++) {
+      in.readBytes(scratchPackedValue, 0, scratchPackedValue.length);
+      visitor.visit(docIDs[i], scratchPackedValue);
+    }
+  }
+
   private void intersect(IntersectState state,
-                        int nodeID,
-                        byte[] cellMinPacked, byte[] cellMaxPacked)
+                         int nodeID,
+                         byte[] cellMinPacked, byte[] cellMaxPacked)
     throws IOException {
 
-    //System.out.println("\nR: intersect nodeID=" + nodeID + " cellMin=" + BKDUtil.bytesToInt(cellMinPacked, 0) + " cellMax=" + BKDUtil.bytesToInt(cellMaxPacked, 0));
+    /*
+    System.out.println("\nR: intersect nodeID=" + nodeID);
+    for(int dim=0;dim<numDims;dim++) {
+      System.out.println("  dim=" + dim + "\n    cellMin=" + new BytesRef(cellMinPacked, dim*bytesPerDim, bytesPerDim) + "\n    cellMax=" + new BytesRef(cellMaxPacked, dim*bytesPerDim, bytesPerDim));
+    }
+    */
 
-    // Optimization: only check the visitor when the current cell does not fully contain the bbox.  E.g. if the
-    // query is a small area around London, UK, most of the high nodes in the BKD tree as we recurse will fully
-    // contain the query, so we quickly recurse down until the nodes cross the query:
-    boolean cellContainsQuery = BKDUtil.contains(bytesPerDim,
-                                                 cellMinPacked, cellMaxPacked,
-                                                 state.minPacked, state.maxPacked);
+    Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked);
 
-    //System.out.println("R: cellContainsQuery=" + cellContainsQuery);
-
-    if (cellContainsQuery == false) {
-
-      Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked);
-      //System.out.println("R: relation=" + r);
-
-      if (r == Relation.QUERY_OUTSIDE_CELL) {
-        // This cell is fully outside of the query shape: stop recursing
-        return;
-      } else if (r == Relation.CELL_INSIDE_QUERY) {
-        // This cell is fully inside of the query shape: recursively add all points in this cell without filtering
-        addAll(state, nodeID);
-        return;
-      } else {
-        // The cell crosses the shape boundary, so we fall through and do full filtering
-      }
+    if (r == Relation.QUERY_OUTSIDE_CELL) {
+      // This cell is fully outside of the query shape: stop recursing
+      return;
+    } else if (r == Relation.CELL_INSIDE_QUERY) {
+      // This cell is fully inside of the query shape: recursively add all points in this cell without filtering
+      addAll(state, nodeID);
+      return;
+    } else {
+      // The cell crosses the shape boundary, or the cell fully contains the query, so we fall through and do full filtering
     }
 
     if (nodeID >= leafNodeOffset) {
       // Leaf node; scan and filter all points in this block:
-      //System.out.println("    intersect leaf nodeID=" + nodeID + " vs leafNodeOffset=" + leafNodeOffset + " fp=" + leafBlockFPs[nodeID-leafNodeOffset]);
-
-      state.in.seek(leafBlockFPs[nodeID-leafNodeOffset]);
-
-      // How many points are stored in this leaf cell:
-      int count = state.in.readVInt();
-
-      // TODO: we could maybe pollute the IntersectVisitor API with a "grow" method if this maybe helps perf
-      // enough (it did before, esp. for the 1D case):
-      //state.docs.grow(count);
-      int docID = 0;
-      for(int i=0;i<count;i++) {
-        docID += state.in.readVInt();
-        state.scratchDocIDs[i] = docID;
-      }
+      int count = readDocIDs(state.in, leafBlockFPs[nodeID-leafNodeOffset], state.scratchDocIDs);
 
       // Again, this time reading values and checking with the visitor
-      for(int i=0;i<count;i++) {
-        state.in.readBytes(state.scratchPackedValue, 0, state.scratchPackedValue.length);
-        state.visitor.visit(state.scratchDocIDs[i], state.scratchPackedValue);
-      }
+      visitDocValues(state.scratchPackedValue, state.in, state.scratchDocIDs, count, state.visitor);
 
     } else {
       
@@ -222,23 +204,19 @@
       // TODO: can we alloc & reuse this up front?
       byte[] splitPackedValue = new byte[packedBytesLength];
 
-      if (BKDUtil.compare(bytesPerDim, state.minPacked, splitDim, splitValue, 0) <= 0) {
-        // The query bbox overlaps our left cell, so we must recurse:
-        System.arraycopy(state.maxPacked, 0, splitPackedValue, 0, packedBytesLength);
-        System.arraycopy(splitValue, 0, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
-        intersect(state,
-                  2*nodeID,
-                  cellMinPacked, splitPackedValue);
-      }
+      // Recurse on left sub-tree:
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitValue, 0, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      intersect(state,
+                2*nodeID,
+                cellMinPacked, splitPackedValue);
 
-      if (BKDUtil.compare(bytesPerDim, state.maxPacked, splitDim, splitValue, 0) >= 0) {
-        // The query bbox overlaps our left cell, so we must recurse:
-        System.arraycopy(state.minPacked, 0, splitPackedValue, 0, packedBytesLength);
-        System.arraycopy(splitValue, 0, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
-        intersect(state,
-                  2*nodeID+1,
-                  splitPackedValue, cellMaxPacked);
-      }
+      // Recurse on right sub-tree:
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitValue, 0, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      intersect(state,
+                2*nodeID+1,
+                splitPackedValue, cellMaxPacked);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDUtil.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDUtil.java
index aef1871..1125168 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDUtil.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDUtil.java
@@ -20,14 +20,14 @@
 import java.math.BigInteger;
 import java.util.Arrays;
 
-/** Utility methods to handle N-dimensional packed byte[] as if they were numbers! */
-final class BKDUtil {
+/** Utility methods to convert to/from N-dimensional packed byte[] as numbers */
+public final class BKDUtil {
 
   private BKDUtil() {
     // No instance
   }
 
-  /** result = a - b, where a >= b */
+  /** result = a - b, where a &gt;= b */
   public static void subtract(int bytesPerDim, int dim, byte[] a, byte[] b, byte[] result) {
     int start = dim * bytesPerDim;
     int end = start + bytesPerDim;
@@ -47,7 +47,7 @@
     }
   }
   
-  /** Returns positive int if a > b, negative int if a < b and 0 if a == b */
+  /** Returns positive int if a &gt; b, negative int if a &lt; b and 0 if a == b */
   public static int compare(int bytesPerDim, byte[] a, int aIndex, byte[] b, int bIndex) {
     for(int i=0;i<bytesPerDim;i++) {
       int cmp = (a[aIndex*bytesPerDim+i]&0xff) - (b[bIndex*bytesPerDim+i]&0xff);
@@ -76,7 +76,7 @@
     return true;
   }
 
-  static void intToBytes(int x, byte[] dest, int index) {
+  public static void intToBytes(int x, byte[] dest, int index) {
     // Flip the sign bit, so negative ints sort before positive ints correctly:
     x ^= 0x80000000;
     for(int i=0;i<4;i++) {
@@ -84,7 +84,7 @@
     }
   }
 
-  static int bytesToInt(byte[] src, int index) {
+  public static int bytesToInt(byte[] src, int index) {
     int x = 0;
     for(int i=0;i<4;i++) {
       x |= (src[4*index+i] & 0xff) << (24-i*8);
@@ -93,14 +93,14 @@
     return x ^ 0x80000000;
   }
 
-  static void sortableBigIntBytes(byte[] bytes) {
+  public static void sortableBigIntBytes(byte[] bytes) {
     bytes[0] ^= 0x80;
     for(int i=1;i<bytes.length;i++)  {
       bytes[i] ^= 0;
     }
   }
 
-  static void bigIntToBytes(BigInteger bigInt, byte[] result, int dim, int numBytesPerDim) {
+  public static void bigIntToBytes(BigInteger bigInt, byte[] result, int dim, int numBytesPerDim) {
     byte[] bigIntBytes = bigInt.toByteArray();
     byte[] fullBigIntBytes;
 
@@ -122,7 +122,7 @@
     assert bytesToBigInt(result, dim, numBytesPerDim).equals(bigInt): "bigInt=" + bigInt + " converted=" + bytesToBigInt(result, dim, numBytesPerDim);
   }
 
-  static BigInteger bytesToBigInt(byte[] bytes, int dim, int numBytesPerDim) {
+  public static BigInteger bytesToBigInt(byte[] bytes, int dim, int numBytesPerDim) {
     byte[] bigIntBytes = new byte[numBytesPerDim];
     System.arraycopy(bytes, dim*numBytesPerDim, bigIntBytes, 0, numBytesPerDim);
     sortableBigIntBytes(bigIntBytes);
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index 38a22a2..3ee8a4b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -26,7 +26,6 @@
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.TrackingDirectoryWrapper;
@@ -36,8 +35,8 @@
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InPlaceMergeSorter;
 import org.apache.lucene.util.LongBitSet;
-import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
 import org.apache.lucene.util.OfflineSorter;
+import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
 import org.apache.lucene.util.RamUsageEstimator;
 
 // TODO
@@ -71,11 +70,11 @@
  *
  * @lucene.experimental */
 
-public final class BKDWriter implements Closeable {
+public class BKDWriter implements Closeable {
 
-  static final String CODEC_NAME = "BKD";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
+  public static final String CODEC_NAME = "BKD";
+  public static final int VERSION_START = 0;
+  public static final int VERSION_CURRENT = VERSION_START;
 
   /** How many bytes each docs takes in the fixed-width offline format */
   private final int bytesPerDoc;
@@ -85,16 +84,16 @@
   public static final float DEFAULT_MAX_MB_SORT_IN_HEAP = 16.0f;
 
   /** Maximum number of dimensions */
-  public static final int MAX_DIMS = 15;
+  public static final int MAX_DIMS = 255;
 
   /** How many dimensions we are indexing */
-  final int numDims;
+  protected final int numDims;
 
   /** How many bytes each value in each dimension takes. */
-  final int bytesPerDim;
+  protected final int bytesPerDim;
 
   /** numDims * bytesPerDim */
-  final int packedBytesLength;
+  protected final int packedBytesLength;
 
   final TrackingDirectoryWrapper tempDir;
   final String tempFileNamePrefix;
@@ -108,7 +107,7 @@
   private HeapPointWriter heapPointWriter;
 
   private IndexOutput tempInput;
-  private final int maxPointsInLeafNode;
+  protected final int maxPointsInLeafNode;
   private final int maxPointsSortInHeap;
 
   private long pointCount;
@@ -452,12 +451,18 @@
 
     // Write index:
     long indexFP = out.getFilePointer();
+    writeIndex(out, leafBlockFPs, splitPackedValues);
+    return indexFP;
+  }
+
+  /** Subclass can change how it writes the index. */
+  protected void writeIndex(IndexOutput out, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
     CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
     out.writeVInt(numDims);
     out.writeVInt(maxPointsInLeafNode);
     out.writeVInt(bytesPerDim);
 
-    out.writeVInt(numLeaves);
+    out.writeVInt(leafBlockFPs.length);
 
     // NOTE: splitPackedValues[0] is unused, because nodeID is 1-based:
     out.writeBytes(splitPackedValues, 0, splitPackedValues.length);
@@ -465,8 +470,21 @@
     for (int i=0;i<leafBlockFPs.length;i++) {
       out.writeVLong(leafBlockFPs[i]);
     }
+  }
 
-    return indexFP;
+  protected void writeLeafBlockDocs(IndexOutput out, int[] docIDs, int start, int count) throws IOException {
+    out.writeVInt(count);
+
+    int lastDocID = 0;
+    for (int i=0;i<count;i++) {
+      int docID = docIDs[start + i];
+      out.writeVInt(docID - lastDocID);
+      lastDocID = docID;
+    }
+  }
+
+  protected void writeLeafBlockPackedValue(IndexOutput out, byte[] bytes, int offset, int length) throws IOException {
+    out.writeBytes(bytes, 0, length);
   }
 
   @Override
@@ -608,20 +626,12 @@
       // Sort by docID in the leaf so we can delta-vInt encode:
       sortHeapPointWriter(heapSource, Math.toIntExact(source.start), Math.toIntExact(source.count), -1);
 
-      int lastDocID = 0;
-
       // Save the block file pointer:
       leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
 
-      out.writeVInt(Math.toIntExact(source.count));
-
       // Write docIDs first, as their own chunk, so that at intersect time we can add all docIDs w/o
       // loading the values:
-      for (int i=0;i<source.count;i++) {
-        int docID = heapSource.docIDs[Math.toIntExact(source.start + i)];
-        out.writeVInt(docID - lastDocID);
-        lastDocID = docID;
-      }
+      writeLeafBlockDocs(out, heapSource.docIDs, Math.toIntExact(source.start), Math.toIntExact(source.count));
 
       // TODO: we should delta compress / only write suffix bytes, like terms dict (the values will all be "close together" since we are at
       // a leaf cell):
@@ -633,7 +643,7 @@
 
         // Make sure this value does in fact fall within this leaf cell:
         assert valueInBounds(scratchPackedValue, minPackedValue, maxPackedValue);
-        out.writeBytes(scratchPackedValue, 0, scratchPackedValue.length);
+        writeLeafBlockPackedValue(out, scratchPackedValue, 0, scratchPackedValue.length);
       }
 
     } else {
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
index c9f242a..782187f 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
@@ -89,8 +89,13 @@
       if ((method.getModifiers() & Modifier.PUBLIC) != 0 && method.getName().startsWith("set")) {
         final Class<?>[] parameterTypes = method.getParameterTypes();
         final Object[] args = new Object[parameterTypes.length];
-        for (int i = 0; i < args.length; ++i) {
-          args[i] = randomValue(parameterTypes[i]);
+        if (method.getName().equals("setDimensions")) {
+          args[0] = 1 + random().nextInt(15);
+          args[1] = 1 + random().nextInt(100);
+        } else {
+          for (int i = 0; i < args.length; ++i) {
+            args[i] = randomValue(parameterTypes[i]);
+          }
         }
         method.invoke(ft, args);
       }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDimensionalValues.java b/lucene/core/src/test/org/apache/lucene/index/TestDimensionalValues.java
new file mode 100644
index 0000000..58a60eb
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDimensionalValues.java
@@ -0,0 +1,1023 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
+import org.apache.lucene.document.DimensionalField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
+import org.apache.lucene.index.DimensionalValues.Relation;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.bkd.BKDUtil;
+import org.apache.lucene.util.bkd.BKDWriter;
+
+// TODO: randomize the bkd settings w/ Lucene60DimensionalFormat
+
+// TODO: factor out a BaseTestDimensionFormat
+
+public class TestDimensionalValues extends LuceneTestCase {
+  public void testBasic() throws Exception {
+    Directory dir = getDirectory(20);
+    // TODO: randomize codec once others support dimensional format
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    iwc.setMergePolicy(newLogMergePolicy());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    byte[] point = new byte[4];
+    for(int i=0;i<20;i++) {
+      Document doc = new Document();
+      BKDUtil.intToBytes(i, point, 0);
+      doc.add(new DimensionalField("dim", point));
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    w.close();
+
+    DirectoryReader r = DirectoryReader.open(dir);
+    LeafReader sub = getOnlySegmentReader(r);
+    DimensionalValues values = sub.getDimensionalValues();
+
+    // Simple test: make sure intersect can visit every doc:
+    BitSet seen = new BitSet();
+    values.intersect("dim",
+                     new IntersectVisitor() {
+                       @Override
+                       public Relation compare(byte[] minPacked, byte[] maxPacked) {
+                         return Relation.QUERY_CROSSES_CELL;
+                       }
+                       public void visit(int docID) {
+                         throw new IllegalStateException();
+                       }
+                       public void visit(int docID, byte[] packedValue) {
+                         seen.set(docID);
+                         assertEquals(docID, BKDUtil.bytesToInt(packedValue, 0));
+                       }
+                     });
+    IOUtils.close(r, dir);
+  }
+
+  public void testMerge() throws Exception {
+    Directory dir = getDirectory(20);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    iwc.setMergePolicy(newLogMergePolicy());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    byte[] point = new byte[4];
+    for(int i=0;i<20;i++) {
+      Document doc = new Document();
+      BKDUtil.intToBytes(i, point, 0);
+      doc.add(new DimensionalField("dim", point));
+      w.addDocument(doc);
+      if (i == 10) {
+        w.commit();
+      }
+    }
+    w.forceMerge(1);
+    w.close();
+
+    DirectoryReader r = DirectoryReader.open(dir);
+    LeafReader sub = getOnlySegmentReader(r);
+    DimensionalValues values = sub.getDimensionalValues();
+
+    // Simple test: make sure intersect can visit every doc:
+    BitSet seen = new BitSet();
+    values.intersect("dim",
+                     new IntersectVisitor() {
+                       @Override
+                       public Relation compare(byte[] minPacked, byte[] maxPacked) {
+                         return Relation.QUERY_CROSSES_CELL;
+                       }
+                       public void visit(int docID) {
+                         throw new IllegalStateException();
+                       }
+                       public void visit(int docID, byte[] packedValue) {
+                         seen.set(docID);
+                         assertEquals(docID, BKDUtil.bytesToInt(packedValue, 0));
+                       }
+                     });
+    IOUtils.close(r, dir);
+  }
+
+  /** Make sure we close open files, delete temp files, etc., on exception */
+  public void testWithExceptions() throws Exception {
+    int numDocs = atLeast(10000);
+    int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+    int numDims = TestUtil.nextInt(random(), 1, 5);
+
+    byte[][][] docValues = new byte[numDocs][][];
+
+    for(int docID=0;docID<numDocs;docID++) {
+      byte[][] values = new byte[numDims][];
+      for(int dim=0;dim<numDims;dim++) {
+        values[dim] = new byte[numBytesPerDim];
+        random().nextBytes(values[dim]);
+      }
+      docValues[docID] = values;
+    }
+
+    double maxMBHeap = 0.05;
+    // Keep retrying until we 1) we allow a big enough heap, and 2) we hit a random IOExc from MDW:
+    boolean done = false;
+    while (done == false) {
+      try (MockDirectoryWrapper dir = newMockFSDirectory(createTempDir())) {
+        try {
+          dir.setRandomIOExceptionRate(0.05);
+          dir.setRandomIOExceptionRateOnOpen(0.05);
+          if (dir instanceof MockDirectoryWrapper) {
+            dir.setEnableVirusScanner(false);
+          }
+          verify(dir, docValues, null, numDims, numBytesPerDim, 50, maxMBHeap);
+        } catch (AssertionError ae) {
+          if (ae.getMessage().contains("does not exist; files=")) {
+            // OK: likely we threw the random IOExc when IW was asserting the commit files exist
+            done = true;
+          } else {
+            throw ae;
+          }
+        } catch (IllegalArgumentException iae) {
+          // This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/ more heap
+          assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
+          System.out.println("  more heap");
+          maxMBHeap *= 1.25;
+        } catch (IOException ioe) {
+          if (ioe.getMessage().contains("a random IOException")) {
+            // BKDWriter should fully clean up after itself:
+            done = true;
+          } else {
+            throw ioe;
+          }
+        }
+      }
+    }
+  }
+
+  public void testMultiValued() throws Exception {
+    int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+    int numDims = TestUtil.nextInt(random(), 1, 5);
+
+    int numDocs = atLeast(1000);
+    List<byte[][]> docValues = new ArrayList<>();
+    List<Integer> docIDs = new ArrayList<>();
+
+    for(int docID=0;docID<numDocs;docID++) {
+      int numValuesInDoc = TestUtil.nextInt(random(), 1, 5);
+      for(int ord=0;ord<numValuesInDoc;ord++) {
+        docIDs.add(docID);
+        byte[][] values = new byte[numDims][];
+        for(int dim=0;dim<numDims;dim++) {
+          values[dim] = new byte[numBytesPerDim];
+          random().nextBytes(values[dim]);
+        }
+        docValues.add(values);
+      }
+    }
+
+    byte[][][] docValuesArray = docValues.toArray(new byte[docValues.size()][][]);
+    int[] docIDsArray = new int[docIDs.size()];
+    for(int i=0;i<docIDsArray.length;i++) {
+      docIDsArray[i] = docIDs.get(i);
+    }
+
+    verify(docValuesArray, docIDsArray, numDims, numBytesPerDim);
+  }
+
+  public void testAllEqual() throws Exception {
+    int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+    int numDims = TestUtil.nextInt(random(), 1, 5);
+
+    int numDocs = atLeast(1000);
+    byte[][][] docValues = new byte[numDocs][][];
+
+    for(int docID=0;docID<numDocs;docID++) {
+      if (docID == 0) {
+        byte[][] values = new byte[numDims][];
+        for(int dim=0;dim<numDims;dim++) {
+          values[dim] = new byte[numBytesPerDim];
+          random().nextBytes(values[dim]);
+        }
+        docValues[docID] = values;
+      } else {
+        docValues[docID] = docValues[0];
+      }
+    }
+
+    verify(docValues, null, numDims, numBytesPerDim);
+  }
+
+  public void testOneDimEqual() throws Exception {
+    int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+    int numDims = TestUtil.nextInt(random(), 1, 5);
+
+    int numDocs = atLeast(1000);
+    int theEqualDim = random().nextInt(numDims);
+    byte[][][] docValues = new byte[numDocs][][];
+
+    for(int docID=0;docID<numDocs;docID++) {
+      byte[][] values = new byte[numDims][];
+      for(int dim=0;dim<numDims;dim++) {
+        values[dim] = new byte[numBytesPerDim];
+        random().nextBytes(values[dim]);
+      }
+      docValues[docID] = values;
+      if (docID > 0) {
+        docValues[docID][theEqualDim] = docValues[0][theEqualDim];
+      }
+    }
+
+    verify(docValues, null, numDims, numBytesPerDim);
+  }
+
+  // Tests on N-dimensional points where each dimension is a BigInteger
+  public void testBigIntNDims() throws Exception {
+
+    int numDocs = atLeast(1000);
+    try (Directory dir = getDirectory(numDocs)) {
+      int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+      int numDims = TestUtil.nextInt(random(), 1, 5);
+      IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+      iwc.setCodec(new SimpleTextCodec());
+      RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+      BigInteger[][] docs = new BigInteger[numDocs][];
+
+      for(int docID=0;docID<numDocs;docID++) {
+        BigInteger[] values = new BigInteger[numDims];
+        if (VERBOSE) {
+          System.out.println("  docID=" + docID);
+        }
+        byte[][] bytes = new byte[numDims][];
+        for(int dim=0;dim<numDims;dim++) {
+          values[dim] = randomBigInt(numBytesPerDim);
+          bytes[dim] = new byte[numBytesPerDim];
+          BKDUtil.bigIntToBytes(values[dim], bytes[dim], 0, numBytesPerDim);
+          if (VERBOSE) {
+            System.out.println("    " + dim + " -> " + values[dim]);
+          }
+        }
+        docs[docID] = values;
+        Document doc = new Document();
+        doc.add(new DimensionalField("field", bytes));
+        w.addDocument(doc);
+      }
+
+      DirectoryReader r = w.getReader();
+      w.close();
+
+      DimensionalValues dimValues = MultiDimensionalValues.get(r);
+
+      int iters = atLeast(100);
+      for(int iter=0;iter<iters;iter++) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: iter=" + iter);
+        }
+
+        // Random N dims rect query:
+        BigInteger[] queryMin = new BigInteger[numDims];
+        BigInteger[] queryMax = new BigInteger[numDims];    
+        for(int dim=0;dim<numDims;dim++) {
+          queryMin[dim] = randomBigInt(numBytesPerDim);
+          queryMax[dim] = randomBigInt(numBytesPerDim);
+          if (queryMin[dim].compareTo(queryMax[dim]) > 0) {
+            BigInteger x = queryMin[dim];
+            queryMin[dim] = queryMax[dim];
+            queryMax[dim] = x;
+          }
+        }
+
+        final BitSet hits = new BitSet();
+        dimValues.intersect("field", new IntersectVisitor() {
+            @Override
+            public void visit(int docID) {
+              hits.set(docID);
+              //System.out.println("visit docID=" + docID);
+            }
+
+            @Override
+            public void visit(int docID, byte[] packedValue) {
+              //System.out.println("visit check docID=" + docID);
+              for(int dim=0;dim<numDims;dim++) {
+                BigInteger x = BKDUtil.bytesToBigInt(packedValue, dim, numBytesPerDim);
+                if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
+                  //System.out.println("  no");
+                  return;
+                }
+              }
+
+              //System.out.println("  yes");
+              hits.set(docID);
+            }
+
+            @Override
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
+              boolean crosses = false;
+              for(int dim=0;dim<numDims;dim++) {
+                BigInteger min = BKDUtil.bytesToBigInt(minPacked, dim, numBytesPerDim);
+                BigInteger max = BKDUtil.bytesToBigInt(maxPacked, dim, numBytesPerDim);
+                assert max.compareTo(min) >= 0;
+
+                if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
+                  return Relation.QUERY_OUTSIDE_CELL;
+                } else if (min.compareTo(queryMin[dim]) < 0 || max.compareTo(queryMax[dim]) > 0) {
+                  crosses = true;
+                }
+              }
+
+              if (crosses) {
+                return Relation.QUERY_CROSSES_CELL;
+              } else {
+                return Relation.CELL_INSIDE_QUERY;
+              }
+            }
+          });
+
+        for(int docID=0;docID<numDocs;docID++) {
+          BigInteger[] docValues = docs[docID];
+          boolean expected = true;
+          for(int dim=0;dim<numDims;dim++) {
+            BigInteger x = docValues[dim];
+            if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
+              expected = false;
+              break;
+            }
+          }
+          boolean actual = hits.get(docID);
+          assertEquals("docID=" + docID, expected, actual);
+        }
+      }
+      r.close();
+      }
+  }
+
+  public void testRandomBinaryTiny() throws Exception {
+    doTestRandomBinary(10);
+  }
+
+  public void testRandomBinaryMedium() throws Exception {
+    doTestRandomBinary(10000);
+  }
+
+  // TODO: enable this, but not using simple text:
+  /*
+  @Nightly
+  public void testRandomBinaryBig() throws Exception {
+    doTestRandomBinary(200000);
+  }
+  */
+
+  // Suddenly add dimensional values to an existing field:
+  public void testUpgradeFieldToDimensional() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(newStringField("dim", "foo", Field.Store.NO));
+    w.addDocument(doc);
+    w.close();
+    
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir, iwc);
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.close();
+    dir.close();
+  }
+
+  // Illegal schema change tests:
+
+  public void testIllegalDimChangeOneDoc() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension count from 1 to 2 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalDimChangeTwoDocs() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension count from 1 to 2 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalDimChangeTwoSegments() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.commit();
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension count from 1 to 2 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalDimChangeTwoWriters() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension count from 1 to 2 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalDimChangeViaAddIndexesDirectory() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    w.addDocument(doc);
+    try {
+      w.addIndexes(new Directory[] {dir});
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension count from 2 to 1 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(w, dir, dir2);
+  }
+
+  public void testIllegalDimChangeViaAddIndexesCodecReader() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(dir);
+    try {
+      w.addIndexes(new CodecReader[] {getOnlySegmentReader(r)});
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension count from 2 to 1 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(r, w, dir, dir2);
+  }
+
+  public void testIllegalDimChangeViaAddIndexesSlowCodecReader() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(dir);
+    try {
+      TestUtil.addIndexesSlowly(w, r);
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension count from 2 to 1 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(r, w, dir, dir2);
+  }
+
+  public void testIllegalTooManyDimensions() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    byte[][] values = new byte[BKDWriter.MAX_DIMS+1][];
+    for(int i=0;i<values.length;i++) {
+      values[i] = new byte[4];
+    }
+    doc.add(new DimensionalField("dim", values));
+    w.addDocument(doc);
+    try {
+      w.close();
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("numDims must be 1 .. 255 (got: 256)", iae.getMessage());
+    }
+    dir.close();
+  }
+
+  public void testIllegalNumBytesChangeOneDoc() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    doc.add(new DimensionalField("dim", new byte[6]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension numBytes from 4 to 6 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalNumBytesChangeTwoDocs() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension numBytes from 4 to 6 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalNumBytesChangeTwoSegments() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.commit();
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension numBytes from 4 to 6 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalNumBytesChangeTwoWriters() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    try {
+      w.addDocument(doc);
+    } catch (IllegalArgumentException iae) {
+      // expected
+      assertEquals("cannot change dimension numBytes from 4 to 6 for field=\"dim\"", iae.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIllegalNumBytesChangeViaAddIndexesDirectory() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    w.addDocument(doc);
+    try {
+      w.addIndexes(new Directory[] {dir});
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension numBytes from 6 to 4 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(w, dir, dir2);
+  }
+
+  public void testIllegalNumBytesChangeViaAddIndexesCodecReader() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(dir);
+    try {
+      w.addIndexes(new CodecReader[] {getOnlySegmentReader(r)});
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension numBytes from 6 to 4 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(r, w, dir, dir2);
+  }
+
+  public void testIllegalNumBytesChangeViaAddIndexesSlowCodecReader() throws Exception {
+    Directory dir = getDirectory(1);
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    IndexWriter w = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[4]));
+    w.addDocument(doc);
+    w.close();
+
+    Directory dir2 = getDirectory(1);
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setCodec(new SimpleTextCodec());
+    w = new IndexWriter(dir2, iwc);
+    doc = new Document();
+    doc.add(new DimensionalField("dim", new byte[6]));
+    w.addDocument(doc);
+    DirectoryReader r = DirectoryReader.open(dir);
+    try {
+      TestUtil.addIndexesSlowly(w, r);
+    } catch (IllegalArgumentException iae) {
+      assertEquals("cannot change dimension numBytes from 6 to 4 for field=\"dim\"", iae.getMessage());
+    }
+    IOUtils.close(r, w, dir, dir2);
+  }
+
+  private void doTestRandomBinary(int count) throws Exception {
+    int numDocs = TestUtil.nextInt(random(), count, count*2);
+    int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
+    int numDims = TestUtil.nextInt(random(), 1, 5);
+
+    byte[][][] docValues = new byte[numDocs][][];
+
+    for(int docID=0;docID<numDocs;docID++) {
+      byte[][] values = new byte[numDims][];
+      for(int dim=0;dim<numDims;dim++) {
+        values[dim] = new byte[numBytesPerDim];
+        // TODO: sometimes test on a "small" volume too, so we test the high density cases, higher chance of boundary, etc. cases:
+        random().nextBytes(values[dim]);
+      }
+      docValues[docID] = values;
+    }
+
+    verify(docValues, null, numDims, numBytesPerDim);
+  }
+
+  /** docIDs can be null, for the single valued case, else it maps value to docID, but all values for one doc must be adjacent */
+  private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim) throws Exception {
+    try (Directory dir = getDirectory(docValues.length)) {
+      while (true) {
+        int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 100);
+        double maxMB = (float) 0.1 + (3*random().nextDouble());
+        try {
+          verify(dir, docValues, docIDs, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB);
+          return;
+        } catch (IllegalArgumentException iae) {
+          // This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry
+          assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
+        }
+      }
+    }
+  }
+
+  private void verify(Directory dir, byte[][][] docValues, int[] ids, int numDims, int numBytesPerDim, int maxPointsInLeafNode, double maxMB) throws Exception {
+    int numValues = docValues.length;
+    if (VERBOSE) {
+      System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim + " maxPointsInLeafNode=" + maxPointsInLeafNode + " maxMB=" + maxMB);
+    }
+    //System.out.println("DIR: " + ((FSDirectory) dir).getDirectory());
+
+    //IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));  
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    //iwc.setUseCompoundFile(false);
+    //iwc.getMergePolicy().setNoCFSRatio(0.0);
+    iwc.setCodec(new SimpleTextCodec());
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    DirectoryReader r = null;
+
+    try {
+
+      Document doc = null;
+      int lastID = -1;
+      for(int ord=0;ord<numValues;ord++) {
+        int id;
+        if (ids == null) {
+          id = ord;
+        } else {
+          id = ids[ord];
+        }
+        if (id != lastID) {
+          if (doc != null) {
+            w.addDocument(doc);
+          }
+          doc = new Document();
+          doc.add(new NumericDocValuesField("id", id));
+        }
+        doc.add(new DimensionalField("field", docValues[ord]));
+        lastID = id;
+
+        if (random().nextInt(30) == 17) {
+          // randomly index some documents without this field
+          w.addDocument(new Document());
+          if (VERBOSE) {
+            System.out.println("add empty doc");
+          }
+        }
+
+        if (random().nextInt(30) == 17) {
+          // randomly index some documents with this field, but we will delete them:
+          Document xdoc = new Document();
+          xdoc.add(new DimensionalField("field", docValues[ord]));
+          xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
+          w.addDocument(xdoc);
+          if (VERBOSE) {
+            System.out.println("add doc doc-to-delete");
+          }
+        }
+
+        if (VERBOSE) {
+          System.out.println("  ord=" + ord + " id=" + id);
+          for(int dim=0;dim<numDims;dim++) {
+            System.out.println("    dim=" + dim + " value=" + new BytesRef(docValues[ord][dim]));
+          }
+        }
+      }
+      w.addDocument(doc);
+      w.deleteDocuments(new Term("nukeme", "yes"));
+
+      if (random().nextBoolean()) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: now force merge");
+        }
+        w.forceMerge(1);
+      }
+
+      r = w.getReader();
+      w.close();
+
+      //System.out.println("TEST: r=" + r);
+
+      DimensionalValues dimValues = MultiDimensionalValues.get(r);
+      if (VERBOSE) {
+        System.out.println("  dimValues=" + dimValues);
+      }
+      assertNotNull(dimValues);
+
+      NumericDocValues idValues = MultiDocValues.getNumericValues(r, "id");
+      Bits liveDocs = MultiFields.getLiveDocs(r);
+
+      int iters = atLeast(100);
+      for(int iter=0;iter<iters;iter++) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: iter=" + iter);
+        }
+
+        // Random N dims rect query:
+        byte[][] queryMin = new byte[numDims][];
+        byte[][] queryMax = new byte[numDims][];    
+        for(int dim=0;dim<numDims;dim++) {    
+          queryMin[dim] = new byte[numBytesPerDim];
+          random().nextBytes(queryMin[dim]);
+          queryMax[dim] = new byte[numBytesPerDim];
+          random().nextBytes(queryMax[dim]);
+          if (BKDUtil.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
+            byte[] x = queryMin[dim];
+            queryMin[dim] = queryMax[dim];
+            queryMax[dim] = x;
+          }
+        }
+
+        if (VERBOSE) {
+          for(int dim=0;dim<numDims;dim++) {
+            System.out.println("  dim=" + dim + "\n    queryMin=" + new BytesRef(queryMin[dim]) + "\n    queryMax=" + new BytesRef(queryMax[dim]));
+          }
+        }
+
+        final BitSet hits = new BitSet();
+
+        dimValues.intersect("field", new DimensionalValues.IntersectVisitor() {
+            @Override
+            public void visit(int docID) {
+              if (liveDocs == null || liveDocs.get(docID)) {
+                hits.set((int) idValues.get(docID));
+              }
+              //System.out.println("visit docID=" + docID);
+            }
+
+            @Override
+            public void visit(int docID, byte[] packedValue) {
+              if (liveDocs != null && liveDocs.get(docID) == false) {
+                return;
+              }
+              //System.out.println("visit check docID=" + docID);
+              for(int dim=0;dim<numDims;dim++) {
+                //System.out.println("  dim=" + dim + " value=" + new BytesRef(packedValue, dim*bytesPerDim, bytesPerDim));
+                if (BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
+                    BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
+                  //System.out.println("  no");
+                  return;
+                }
+              }
+
+              //System.out.println("  yes");
+              hits.set((int) idValues.get(docID));
+            }
+
+            @Override
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
+              boolean crosses = false;
+              //System.out.println("compare");
+              for(int dim=0;dim<numDims;dim++) {
+                if (BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
+                    BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
+                  //System.out.println("  query_outside_cell");
+                  return Relation.QUERY_OUTSIDE_CELL;
+                } else if (BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
+                           BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
+                  crosses = true;
+                }
+              }
+
+              if (crosses) {
+                //System.out.println("  query_crosses_cell");
+                return Relation.QUERY_CROSSES_CELL;
+              } else {
+                //System.out.println("  cell_inside_query");
+                return Relation.CELL_INSIDE_QUERY;
+              }
+            }
+          });
+
+        BitSet expected = new BitSet();
+        for(int ord=0;ord<numValues;ord++) {
+          boolean matches = true;
+          for(int dim=0;dim<numDims;dim++) {
+            byte[] x = docValues[ord][dim];
+            if (BKDUtil.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
+                BKDUtil.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
+              matches = false;
+              break;
+            }
+          }
+
+          if (matches) {
+            int id;
+            if (ids == null) {
+              id = ord;
+            } else {
+              id = ids[ord];
+            }
+            expected.set(id);
+          }
+        }
+
+        int limit = Math.max(expected.length(), hits.length());
+        for(int id=0;id<limit;id++) {
+          assertEquals("docID=" + id, expected.get(id), hits.get(id));
+        }
+      }
+    } finally {
+      IOUtils.closeWhileHandlingException(r, w);
+    }
+  }
+
+  private BigInteger randomBigInt(int numBytes) {
+    BigInteger x = new BigInteger(numBytes*8-1, random());
+    if (random().nextBoolean()) {
+      x = x.negate();
+    }
+    return x;
+  }
+
+  private static Directory noVirusChecker(Directory dir) {
+    if (dir instanceof MockDirectoryWrapper) {
+      ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
+    }
+    return dir;
+  }
+
+  private Directory getDirectory(int numPoints) throws IOException {
+    Directory dir;
+    if (numPoints > 100000) {
+      dir = newFSDirectory(createTempDir("TestBKDTree"));
+    } else {
+      dir = newDirectory();
+    }
+    noVirusChecker(dir);
+    //dir = FSDirectory.open(createTempDir());
+    return dir;
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index f5a63aa..ca04410 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -89,6 +89,16 @@
       public DocValuesType docValuesType() {
         return DocValuesType.NONE;
       }
+
+      @Override
+      public int dimensionCount() {
+        return 0;
+      }
+
+      @Override
+      public int dimensionNumBytes() {
+        return 0;
+      }
     };
 
     public MyField(int counter) {
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
index 08dcbdc..446462f 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
@@ -24,6 +24,8 @@
 import java.util.BitSet;
 import java.util.List;
 
+import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
+import org.apache.lucene.index.DimensionalValues.Relation;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
@@ -31,12 +33,9 @@
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.RamUsageTester;
 import org.apache.lucene.util.TestUtil;
 
-@SuppressSysoutChecks(bugUrl = "Stuff gets printed.")
 public class TestBKD extends LuceneTestCase {
 
   public void testBasicInts1D() throws Exception {
@@ -62,7 +61,7 @@
         final int queryMax = 87;
 
         final BitSet hits = new BitSet();
-        r.intersect(new BKDReader.IntersectVisitor() {
+        r.intersect(new IntersectVisitor() {
             @Override
             public void visit(int docID) {
               hits.set(docID);
@@ -83,7 +82,7 @@
             }
 
             @Override
-            public BKDReader.Relation compare(byte[] minPacked, byte[] maxPacked) {
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
               int min = BKDUtil.bytesToInt(minPacked, 0);
               int max = BKDUtil.bytesToInt(maxPacked, 0);
               assert max >= min;
@@ -92,11 +91,11 @@
               }
 
               if (max < queryMin || min > queryMax) {
-                return BKDReader.Relation.QUERY_OUTSIDE_CELL;
+                return Relation.QUERY_OUTSIDE_CELL;
               } else if (min >= queryMin && max <= queryMax) {
-                return BKDReader.Relation.CELL_INSIDE_QUERY;
+                return Relation.CELL_INSIDE_QUERY;
               } else {
-                return BKDReader.Relation.QUERY_CROSSES_CELL;
+                return Relation.QUERY_CROSSES_CELL;
               }
             }
           });
@@ -168,7 +167,7 @@
           }
 
           final BitSet hits = new BitSet();
-          r.intersect(new BKDReader.IntersectVisitor() {
+          r.intersect(new IntersectVisitor() {
             @Override
             public void visit(int docID) {
               hits.set(docID);
@@ -191,7 +190,7 @@
             }
 
             @Override
-            public BKDReader.Relation compare(byte[] minPacked, byte[] maxPacked) {
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
               boolean crosses = false;
               for(int dim=0;dim<numDims;dim++) {
                 int min = BKDUtil.bytesToInt(minPacked, dim);
@@ -199,16 +198,16 @@
                 assert max >= min;
 
                 if (max < queryMin[dim] || min > queryMax[dim]) {
-                  return BKDReader.Relation.QUERY_OUTSIDE_CELL;
+                  return Relation.QUERY_OUTSIDE_CELL;
                 } else if (min < queryMin[dim] || max > queryMax[dim]) {
                   crosses = true;
                 }
               }
 
               if (crosses) {
-                return BKDReader.Relation.QUERY_CROSSES_CELL;
+                return Relation.QUERY_CROSSES_CELL;
               } else {
-                return BKDReader.Relation.CELL_INSIDE_QUERY;
+                return Relation.CELL_INSIDE_QUERY;
               }
             }
           });
@@ -289,7 +288,7 @@
           }
 
           final BitSet hits = new BitSet();
-          r.intersect(new BKDReader.IntersectVisitor() {
+          r.intersect(new IntersectVisitor() {
             @Override
             public void visit(int docID) {
               hits.set(docID);
@@ -312,7 +311,7 @@
             }
 
             @Override
-            public BKDReader.Relation compare(byte[] minPacked, byte[] maxPacked) {
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
               boolean crosses = false;
               for(int dim=0;dim<numDims;dim++) {
                 BigInteger min = BKDUtil.bytesToBigInt(minPacked, dim, numBytesPerDim);
@@ -320,16 +319,16 @@
                 assert max.compareTo(min) >= 0;
 
                 if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
-                  return BKDReader.Relation.QUERY_OUTSIDE_CELL;
+                  return Relation.QUERY_OUTSIDE_CELL;
                 } else if (min.compareTo(queryMin[dim]) < 0 || max.compareTo(queryMax[dim]) > 0) {
                   crosses = true;
                 }
               }
 
               if (crosses) {
-                return BKDReader.Relation.QUERY_CROSSES_CELL;
+                return Relation.QUERY_CROSSES_CELL;
               } else {
-                return BKDReader.Relation.CELL_INSIDE_QUERY;
+                return Relation.CELL_INSIDE_QUERY;
               }
             }
           });
@@ -384,7 +383,6 @@
         } catch (IllegalArgumentException iae) {
           // This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/ more heap
           assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
-          System.out.println("  more heap");
           maxMBHeap *= 1.25;
         } catch (IOException ioe) {
           if (ioe.getMessage().contains("a random IOException")) {
@@ -405,7 +403,7 @@
     doTestRandomBinary(10);
   }
 
-  public void testRandomBinarydMedium() throws Exception {
+  public void testRandomBinaryMedium() throws Exception {
     doTestRandomBinary(10000);
   }
 
@@ -601,7 +599,7 @@
         }
 
         final BitSet hits = new BitSet();
-        r.intersect(new BKDReader.IntersectVisitor() {
+        r.intersect(new IntersectVisitor() {
             @Override
             public void visit(int docID) {
               hits.set(docID);
@@ -624,16 +622,12 @@
             }
 
             @Override
-            public BKDReader.Relation compare(byte[] minPacked, byte[] maxPacked) {
+            public Relation compare(byte[] minPacked, byte[] maxPacked) {
               boolean crosses = false;
               for(int dim=0;dim<numDims;dim++) {
-                BigInteger min = BKDUtil.bytesToBigInt(minPacked, dim, numBytesPerDim);
-                BigInteger max = BKDUtil.bytesToBigInt(maxPacked, dim, numBytesPerDim);
-                assert max.compareTo(min) >= 0;
-
                 if (BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
                     BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
-                  return BKDReader.Relation.QUERY_OUTSIDE_CELL;
+                  return Relation.QUERY_OUTSIDE_CELL;
                 } else if (BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
                            BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
                   crosses = true;
@@ -641,9 +635,9 @@
               }
 
               if (crosses) {
-                return BKDReader.Relation.QUERY_CROSSES_CELL;
+                return Relation.QUERY_CROSSES_CELL;
               } else {
-                return BKDReader.Relation.CELL_INSIDE_QUERY;
+                return Relation.CELL_INSIDE_QUERY;
               }
             }
           });
@@ -696,7 +690,6 @@
     } else {
       dir = newDirectory();
     }
-    System.out.println("DIR: " + dir);
     if (dir instanceof MockDirectoryWrapper) {
       ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
     }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
index e8d5c8f..4e5d28e 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
@@ -22,6 +22,7 @@
 import java.util.Iterator;
 
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DimensionalValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -78,8 +79,8 @@
       indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
     }
     FieldInfo fieldInfo = new FieldInfo(field, 0,
-        true, true, terms.hasPayloads(),
-        indexOptions, DocValuesType.NONE, -1, Collections.emptyMap());
+                                        true, true, terms.hasPayloads(),
+                                        indexOptions, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0);
     fieldInfos = new FieldInfos(new FieldInfo[]{fieldInfo});
   }
 
@@ -148,6 +149,11 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    return null;
+  }
+
+  @Override
   public void checkIntegrity() throws IOException {
   }
 
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index dd3e556..28c7513 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -34,8 +34,8 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DimensionalValues;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.FieldInvertState;
@@ -44,6 +44,7 @@
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.OrdTermState;
+import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
@@ -63,12 +64,12 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefArray;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
+import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.Counter;
-import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.IntBlockPool.SliceReader;
 import org.apache.lucene.util.IntBlockPool.SliceWriter;
+import org.apache.lucene.util.IntBlockPool;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RecyclingByteBlockAllocator;
 import org.apache.lucene.util.RecyclingIntBlockAllocator;
@@ -442,9 +443,8 @@
         sumTotalTermFreq = info.sumTotalTermFreq;
       } else {
         fieldInfo = new FieldInfo(fieldName, fields.size(), true, false, this.storePayloads,
-            this.storeOffsets
-                ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
-            DocValuesType.NONE, -1, Collections.emptyMap());
+                                  this.storeOffsets ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
+                                  DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0);
         sliceArray = new SliceByteStartArray(BytesRefHash.DEFAULT_CAPACITY);
         terms = new BytesRefHash(byteBlockPool, BytesRefHash.DEFAULT_CAPACITY, sliceArray);
       }
@@ -820,6 +820,11 @@
     }
 
     @Override
+    public DimensionalValues getDimensionalValues() {
+      return null;
+    }
+
+    @Override
     public void checkIntegrity() throws IOException {
       // no-op
     }
diff --git a/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java b/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
index c01c35b..5b8636b 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java
@@ -211,6 +211,11 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    return in.getDimensionalValues();
+  }
+
+  @Override
   public int numDocs() {
     return in.numDocs();
   }
diff --git a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
index c9c150a..7fcb108 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -254,6 +254,38 @@
     }
   }
 
+  private static class SortingDimensionalValues extends DimensionalValues {
+
+    private final DimensionalValues in;
+    private final Sorter.DocMap docMap;
+
+    public SortingDimensionalValues(final DimensionalValues in, Sorter.DocMap docMap) {
+      this.in = in;
+      this.docMap = docMap;
+    }
+
+    @Override
+    public void intersect(String field, IntersectVisitor visitor) throws IOException {
+      in.intersect(field,
+                   new IntersectVisitor() {
+                     @Override
+                     public void visit(int docID) throws IOException {
+                       visitor.visit(docMap.newToOld(docID));
+                     }
+
+                     @Override
+                     public void visit(int docID, byte[] packedValue) throws IOException {
+                       visitor.visit(docMap.newToOld(docID), packedValue);
+                     }
+
+                     @Override
+                     public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+                       return visitor.compare(minPackedValue, maxPackedValue);
+                     }
+                   });
+    }
+  }
+
   private static class SortingSortedDocValues extends SortedDocValues {
 
     private final SortedDocValues in;
@@ -799,6 +831,17 @@
   }
 
   @Override
+  public DimensionalValues getDimensionalValues() {
+    final DimensionalValues inDimensionalValues = in.getDimensionalValues();
+    if (inDimensionalValues == null) {
+      return null;
+    } else {
+      // TODO: this is untested!
+      return new SortingDimensionalValues(inDimensionalValues, docMap);
+    }
+  }
+
+  @Override
   public NumericDocValues getNormValues(String field) throws IOException {
     final NumericDocValues norm = in.getNormValues(field);
     if (norm == null) {
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
index 00683a7..9b65d0c 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
@@ -216,7 +216,7 @@
         }
       }
       filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
-                                      fi.hasPayloads(), fi.getIndexOptions(), type, -1, Collections.emptyMap()));
+                                      fi.hasPayloads(), fi.getIndexOptions(), type, -1, Collections.emptyMap(), 0, 0));
     }
     fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
   }
diff --git a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
index d9a3f20..26c19e4 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
@@ -382,5 +382,6 @@
       assertEquals("incorrect term vector for doc " + i, sortedValues[i].toString(), terms.iterator().next().utf8ToString());
     }
   }
-  
+
+  // TODO: need DimensionalValues test here, once default codec supports DimensionalValues
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java
index 5b0afaa..9913129 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingCodec.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.codecs.DimensionalFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FilterCodec;
 import org.apache.lucene.codecs.LiveDocsFormat;
@@ -53,6 +54,7 @@
   private final LiveDocsFormat liveDocs = new AssertingLiveDocsFormat();
   private final PostingsFormat defaultFormat = new AssertingPostingsFormat();
   private final DocValuesFormat defaultDVFormat = new AssertingDocValuesFormat();
+  private final DimensionalFormat dimensionalFormat = new AssertingDimensionalFormat();
 
   public AssertingCodec() {
     super("Asserting", TestUtil.getDefaultCodec());
@@ -89,6 +91,11 @@
   }
 
   @Override
+  public DimensionalFormat dimensionalFormat() {
+    return dimensionalFormat;
+  }
+
+  @Override
   public String toString() {
     return "Asserting(" + delegate + ")";
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDimensionalFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDimensionalFormat.java
new file mode 100644
index 0000000..ccd73d8
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDimensionalFormat.java
@@ -0,0 +1,122 @@
+package org.apache.lucene.codecs.asserting;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.codecs.DimensionalFormat;
+import org.apache.lucene.codecs.DimensionalReader;
+import org.apache.lucene.codecs.DimensionalWriter;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.TestUtil;
+
+/**
+ * Just like the default dimensional format but with additional asserts.
+ */
+
+public final class AssertingDimensionalFormat extends DimensionalFormat {
+  private final DimensionalFormat in = TestUtil.getDefaultCodec().dimensionalFormat();
+  
+  @Override
+  public DimensionalWriter fieldsWriter(SegmentWriteState state) throws IOException {
+    return new AssertingDimensionalWriter(state, in.fieldsWriter(state));
+  }
+
+  @Override
+  public DimensionalReader fieldsReader(SegmentReadState state) throws IOException {
+    return new AssertingDimensionalReader(in.fieldsReader(state));
+  }
+  
+  static class AssertingDimensionalReader extends DimensionalReader {
+    private final DimensionalReader in;
+    
+    AssertingDimensionalReader(DimensionalReader in) {
+      this.in = in;
+      // do a few simple checks on init
+      assert toString() != null;
+      assert ramBytesUsed() >= 0;
+      assert getChildResources() != null;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      in.close();
+      in.close(); // close again
+    }
+
+    @Override
+    public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
+      // TODO: wrap the visitor and make sure things are being reasonable
+      in.intersect(fieldName, visitor);
+    }
+
+    @Override
+    public long ramBytesUsed() {
+      long v = in.ramBytesUsed();
+      assert v >= 0;
+      return v;
+    }
+    
+    @Override
+    public Collection<Accountable> getChildResources() {
+      Collection<Accountable> res = in.getChildResources();
+      TestUtil.checkReadOnly(res);
+      return res;
+    }
+
+    @Override
+    public void checkIntegrity() throws IOException {
+      in.checkIntegrity();
+    }
+    
+    @Override
+    public DimensionalReader getMergeInstance() throws IOException {
+      return new AssertingDimensionalReader(in.getMergeInstance());
+    }
+
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "(" + in.toString() + ")";
+    }
+  }
+
+  static class AssertingDimensionalWriter extends DimensionalWriter {
+    private final DimensionalWriter in;
+    private final SegmentWriteState writeState;
+
+    AssertingDimensionalWriter(SegmentWriteState writeState, DimensionalWriter in) {
+      this.writeState = writeState;
+      this.in = in;
+    }
+    
+    @Override
+    public void writeField(FieldInfo fieldInfo, DimensionalReader values) throws IOException {
+      in.writeField(fieldInfo, values);
+    }
+
+    @Override
+    public void close() throws IOException {
+      in.close();
+      in.close(); // close again
+    }
+  }
+}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index 3d187c0..f1dbe87 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -311,7 +311,8 @@
     SegmentInfo segmentInfo = new SegmentInfo(dir, Version.LATEST, "_0", 1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
     FieldInfo proto = oneDocReader.getFieldInfos().fieldInfo("field");
     FieldInfo field = new FieldInfo(proto.name, proto.number, proto.hasVectors(), proto.omitsNorms(), proto.hasPayloads(), 
-                                    proto.getIndexOptions(), proto.getDocValuesType(), proto.getDocValuesGen(), new HashMap<>());
+                                    proto.getIndexOptions(), proto.getDocValuesType(), proto.getDocValuesGen(), new HashMap<>(),
+                                    0, 0);
 
     FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { field } );
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
index b7840c6..2d20ec8 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
@@ -66,7 +66,8 @@
                                         oldInfo.getIndexOptions(),   // indexOptions
                                         oldInfo.getDocValuesType(),  // docValuesType
                                         oldInfo.getDocValuesGen(),   // dvGen
-                                        oldInfo.attributes());       // attributes
+                                        oldInfo.attributes(),        // attributes
+                                        0, 0);                       // dimensional count,numBytes
       shuffled.set(i, newInfo);
     }
     
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
index 8e91db2..27ba502 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
@@ -48,8 +48,8 @@
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.util.automaton.Automaton;
-import org.apache.lucene.util.automaton.AutomatonTestUtil.RandomAcceptedStrings;
 import org.apache.lucene.util.automaton.AutomatonTestUtil;
+import org.apache.lucene.util.automaton.AutomatonTestUtil.RandomAcceptedStrings;
 import org.apache.lucene.util.automaton.CompiledAutomaton;
 
 import static org.junit.Assert.assertEquals;
@@ -58,7 +58,6 @@
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
-
 /** Helper class extracted from BasePostingsFormatTestCase to exercise a postings format. */
 public class RandomPostingsTester {
 
@@ -123,7 +122,8 @@
 
       fieldInfoArray[fieldUpto] = new FieldInfo(field, fieldUpto, false, false, true,
                                                 IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS,
-                                                DocValuesType.NONE, -1, new HashMap<>());
+                                                DocValuesType.NONE, -1, new HashMap<>(),
+                                                0, 0);
       fieldUpto++;
 
       SortedMap<BytesRef,SeedAndOrd> postings = new TreeMap<>();
@@ -638,7 +638,8 @@
                                                    indexOptions,
                                                    DocValuesType.NONE,
                                                    -1,
-                                                   new HashMap<>());
+                                                   new HashMap<>(),
+                                                   0, 0);
     }
 
     FieldInfos newFieldInfos = new FieldInfos(newFieldInfoArray);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index 3bd884c..1c83efe 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -23,9 +23,8 @@
 import java.util.List;
 import java.util.Random;
 
-import junit.framework.Assert;
-
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DimensionalValues;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
@@ -46,6 +45,8 @@
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertTrue;
 
+import junit.framework.Assert;
+
 /**
  * Utility class for sanity-checking queries.
  */
@@ -256,6 +257,11 @@
       }
 
       @Override
+      public DimensionalValues getDimensionalValues() {
+        return null;
+      }
+
+      @Override
       public void checkIntegrity() throws IOException {}
 
       @Override
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index bf48ee5..5f90c61 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -740,7 +740,8 @@
               fieldInfo.getIndexOptions(),
               DocValuesType.NONE,
               fieldInfo.getDocValuesGen(),
-              fieldInfo.attributes());
+              fieldInfo.attributes(),
+              0, 0);
           newInfos.add(f);
 
         } else {
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 6cdfa64..50b1d40 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -334,7 +334,8 @@
                                       fieldInfo.getIndexOptions(),
                                       DocValuesType.NONE,
                                       fieldInfo.getDocValuesGen(),
-                                      fieldInfo.attributes());
+                                      fieldInfo.attributes(),
+                                      0, 0);
           newInfos.add(f);
 
         } else {
diff --git a/solr/core/src/java/org/apache/solr/search/Insanity.java b/solr/core/src/java/org/apache/solr/search/Insanity.java
index 283c8d2..ea4e145 100644
--- a/solr/core/src/java/org/apache/solr/search/Insanity.java
+++ b/solr/core/src/java/org/apache/solr/search/Insanity.java
@@ -66,7 +66,8 @@
       for (FieldInfo fi : in.getFieldInfos()) {
         if (fi.name.equals(insaneField)) {
           filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
-                                          fi.hasPayloads(), fi.getIndexOptions(), DocValuesType.NONE, -1, Collections.emptyMap()));
+                                          fi.hasPayloads(), fi.getIndexOptions(), DocValuesType.NONE, -1, Collections.emptyMap(),
+                                          0, 0));
         } else {
           filteredInfos.add(fi);
         }
diff --git a/solr/core/src/test/org/apache/solr/search/TestDocSet.java b/solr/core/src/test/org/apache/solr/search/TestDocSet.java
index bdd5844..9708e82 100644
--- a/solr/core/src/test/org/apache/solr/search/TestDocSet.java
+++ b/solr/core/src/test/org/apache/solr/search/TestDocSet.java
@@ -23,6 +23,7 @@
 import java.util.Random;
 
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DimensionalValues;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
@@ -427,6 +428,11 @@
       }
 
       @Override
+      public DimensionalValues getDimensionalValues() {
+        return null;
+      }
+
+      @Override
       protected void doClose() {
       }