LUCENE-6481: merge trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/LUCENE-6481@1683615 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-tools/idea/.idea/codeStyleSettings.xml b/dev-tools/idea/.idea/codeStyleSettings.xml
new file mode 100644
index 0000000..976fbcd
--- /dev/null
+++ b/dev-tools/idea/.idea/codeStyleSettings.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectCodeStyleSettingsManager">
+    <option name="PER_PROJECT_SETTINGS">
+      <value>
+        <option name="USE_SAME_INDENTS" value="true" />
+        <option name="IGNORE_SAME_INDENTS_FOR_LANGUAGES" value="true" />
+        <option name="OTHER_INDENT_OPTIONS">
+          <value>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+            <option name="USE_TAB_CHARACTER" value="false" />
+            <option name="SMART_TABS" value="false" />
+            <option name="LABEL_INDENT_SIZE" value="0" />
+            <option name="LABEL_INDENT_ABSOLUTE" value="false" />
+            <option name="USE_RELATIVE_INDENTS" value="false" />
+          </value>
+        </option>
+        <option name="CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND" value="20" />
+        <option name="NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND" value="20" />
+        <option name="PACKAGES_TO_USE_IMPORT_ON_DEMAND">
+          <value />
+        </option>
+        <option name="IMPORT_LAYOUT_TABLE">
+          <value>
+            <package name="javax" withSubpackages="true" static="false" />
+            <package name="java" withSubpackages="true" static="false" />
+            <emptyLine />
+            <package name="" withSubpackages="true" static="false" />
+            <emptyLine />
+            <package name="" withSubpackages="true" static="true" />
+          </value>
+        </option>
+        <XML>
+          <option name="XML_LEGACY_SETTINGS_IMPORTED" value="true" />
+        </XML>
+        <codeStyleSettings language="CSS">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="Groovy">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="HTML">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="JAVA">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="JSON">
+          <indentOptions>
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="JavaScript">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="Python">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="TypeScript">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+        <codeStyleSettings language="XML">
+          <indentOptions>
+            <option name="INDENT_SIZE" value="2" />
+            <option name="CONTINUATION_INDENT_SIZE" value="4" />
+            <option name="TAB_SIZE" value="2" />
+          </indentOptions>
+        </codeStyleSettings>
+      </value>
+    </option>
+    <option name="USE_PER_PROJECT_SETTINGS" value="true" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/dev-tools/maven/solr/core/src/test/pom.xml.template b/dev-tools/maven/solr/core/src/test/pom.xml.template
index de14304..9a5e9cb 100644
--- a/dev-tools/maven/solr/core/src/test/pom.xml.template
+++ b/dev-tools/maven/solr/core/src/test/pom.xml.template
@@ -65,6 +65,9 @@
     <testResources>
       <testResource>
         <directory>${module-path}/../test-files</directory>
+        <excludes>
+          <exclude>**/*.java</exclude>
+        </excludes>
       </testResource>
       <testResource>
         <directory>${project.build.testSourceDirectory}</directory>
diff --git a/dev-tools/scripts/checkJavaDocs.py b/dev-tools/scripts/checkJavaDocs.py
index e7b13eb..afa5dc1 100644
--- a/dev-tools/scripts/checkJavaDocs.py
+++ b/dev-tools/scripts/checkJavaDocs.py
@@ -274,7 +274,7 @@
       if lineLower.startswith('package ') or lineLower.startswith('<h1 title="package" '):
         sawPackage = True
       elif sawPackage:
-        if lineLower.startswith('<table ') or lineLower.startswith('<b>see: '):
+        if lineLower.startswith('<table ') or lineLower.startswith('<b>see: ') or lineLower.startswith('<p>see:'):
           desc = ' '.join(desc)
           desc = reMarkup.sub(' ', desc)
           desc = desc.strip()
diff --git a/dev-tools/scripts/createPatch.py b/dev-tools/scripts/createPatch.py
index bfc9505..c2cffec 100644
--- a/dev-tools/scripts/createPatch.py
+++ b/dev-tools/scripts/createPatch.py
@@ -90,7 +90,7 @@
     flags += 'bBw'
 
   args = ['diff', flags]
-  for ignore in ('.svn', '.git', 'build', '.caches', '.idea', 'idea-build', 'eclipse-build', '.settings'):
+  for ignore in ('.svn', '.git', 'build', 'dist', '.caches', '.idea', 'idea-build', 'eclipse-build', '.settings'):
     args.append('-x')
     args.append(ignore)
   args.append(from_dir)
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index ba11e93..74ea18d 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -1372,38 +1372,41 @@
 
   os.chdir(unpackPath)
 
-  for suffix in '',:
-    print('    run TestBackwardsCompatibility%s..' % suffix)
-    command = 'ant test -Dtestcase=TestBackwardsCompatibility%s -Dtests.verbose=true' % suffix
-    p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-    stdout, stderr = p.communicate()
-    if p.returncode is not 0:
-      # Not good: the test failed!
-      raise RuntimeError('%s failed:\n%s' % (command, stdout))
-    stdout = stdout.decode('utf-8')
+  print('    run TestBackwardsCompatibility..')
+  command = 'ant test -Dtestcase=TestBackwardsCompatibility -Dtests.verbose=true'
+  p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  stdout, stderr = p.communicate()
+  if p.returncode is not 0:
+    # Not good: the test failed!
+    raise RuntimeError('%s failed:\n%s' % (command, stdout))
+  stdout = stdout.decode('utf-8')
 
-    if stderr is not None:
-      # Should not happen since we redirected stderr to stdout:
-      raise RuntimeError('stderr non-empty')
+  if stderr is not None:
+    # Should not happen since we redirected stderr to stdout:
+    raise RuntimeError('stderr non-empty')
 
-    reIndexName = re.compile(r'TEST: index[\s*=\s*](.*?)(-cfs|-nocfs)$', re.MULTILINE)
-    for name, cfsPart in reIndexName.findall(stdout):
-      # Fragile: decode the inconsistent naming schemes we've used in TestBWC's indices:
-      #print('parse name %s' % name)
-      tup = tuple(name.split('.'))
-      if len(tup) == 3:
-        # ok
-        tup = tuple(int(x) for x in tup)
-      elif tup == ('4', '0', '0', '1'):
-        # CONFUSING: this is the 4.0.0-alpha index??
-        tup = 4, 0, 0, 0
-      elif tup == ('4', '0', '0', '2'):
-        # CONFUSING: this is the 4.0.0-beta index??
-        tup = 4, 0, 0, 1
-      else:
-        raise RuntimeError('could not parse version %s' % name)
-          
-      testedIndices.add(tup)
+  reIndexName = re.compile(r'TEST: index[\s*=\s*](.*?)(-cfs|-nocfs)$', re.MULTILINE)
+  for name, cfsPart in reIndexName.findall(stdout):
+    # Fragile: decode the inconsistent naming schemes we've used in TestBWC's indices:
+    #print('parse name %s' % name)
+    tup = tuple(name.split('.'))
+    if len(tup) == 3:
+      # ok
+      tup = tuple(int(x) for x in tup)
+    elif tup == ('4', '0', '0', '1'):
+      # CONFUSING: this is the 4.0.0-alpha index??
+      tup = 4, 0, 0, 0
+    elif tup == ('4', '0', '0', '2'):
+      # CONFUSING: this is the 4.0.0-beta index??
+      tup = 4, 0, 0, 1
+    elif name == '5x-with-4x-segments':
+      # Mixed version test case; ignore it for our purposes because we only
+      # tally up the "tests single Lucene version" indices
+      continue
+    else:
+      raise RuntimeError('could not parse version %s' % name)
+
+    testedIndices.add(tup)
 
   l = list(testedIndices)
   l.sort()
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 05e3b23..4439484 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -33,7 +33,59 @@
   implementation returning the empty list.  (Robert Muir)
 
 ======================= Lucene 5.3.0 =======================
-(No Changes)
+
+New Features
+
+* LUCENE-6485: Add CustomSeparatorBreakIterator to postings
+  highlighter which splits on any character. For example, it 
+  can be used with getMultiValueSeparator render whole field
+  values.  (Luca Cavanna via Robert Muir)
+
+* LUCENE-6459: Add common suggest API that mirrors Lucene's
+  Query/IndexSearcher APIs for Document based suggester.
+  Adds PrefixCompletionQuery, RegexCompletionQuery,
+  FuzzyCompletionQuery and ContextQuery.
+  (Areek Zillur via Mike McCandless)
+
+* LUCENE-6487: Spatial Geo3D API now has a WGS84 ellipsoid world model option.
+  (Karl Wright via David Smiley)
+
+* LUCENE-6477: Add experimental BKD geospatial tree doc values format
+  and queries, for fast "bbox/polygon contains lat/lon points" (Mike
+  McCandless)
+
+API Changes
+
+* LUCENE-6508: Simplify Lock api, there is now just 
+  Directory.obtainLock() which returns a Lock that can be 
+  released (or fails with exception). Add lock verification 
+  to IndexWriter. Improve exception messages when locking fails.
+  (Uwe Schindler, Mike McCandless, Robert Muir)
+
+Bug fixes
+
+* LUCENE-6500: ParallelCompositeReader did not always call
+  closed listeners. This was fixed by LUCENE-6501.
+  (Adrien Grand, Uwe Schindler)
+
+* LUCENE-6520: Geo3D GeoPath.done() would throw an NPE if adjacent path
+  segments were co-linear. (Karl Wright via David Smiley)
+
+Changes in Runtime Behavior
+
+* LUCENE-6501: The subreader structure in ParallelCompositeReader
+  was flattened, because the current implementation had too many
+  hidden bugs regarding refounting and close listeners.
+  If you create a new ParallelCompositeReader, it will just take
+  all leaves of the passed readers and form a flat structure of
+  ParallelLeafReaders instead of trying to assemble the original
+  structure of composite and leaf readers.  (Adrien Grand,
+  Uwe Schindler)
+
+Build
+
+* LUCENE-6518: Don't report false thread leaks from IBM J9
+  ClassCache Reaper in test framework. (Dawid Weiss)
 
 ======================= Lucene 5.2.0 =======================
 
@@ -189,6 +241,12 @@
   documents with no payloads and now returns an empty BytesRef instead
   (Marius Grama via Michael McCandless)
 
+* LUCENE-6505: NRT readers now reflect segments_N filename and commit
+  user data from previous commits (Mike McCandless)
+
+* LUCENE-6507: Don't let NativeFSLock.close() release other locks
+  (Simon Willnauer, Robert Muir, Uwe Schindler, Mike McCandless)
+
 API Changes
 
 * LUCENE-6377: SearcherFactory#newSearcher now accepts the previous reader
@@ -216,7 +274,7 @@
   (Paul Elschot via Adrien Grand)
 
 * LUCENE-6466: Moved SpanQuery.getSpans() and .extractTerms() to SpanWeight
-  (Alan Woodward)
+  (Alan Woodward, Robert Muir)
 
 * LUCENE-6497: Allow subclasses of FieldType to check frozen state
   (Ryan Ernst)
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
index bcc42b9..96e7b59 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
@@ -151,7 +151,7 @@
       public void renameFile(String source, String dest) { throw new UnsupportedOperationException(); }
       
       @Override
-      public Lock makeLock(String name) { throw new UnsupportedOperationException(); }
+      public Lock obtainLock(String name) { throw new UnsupportedOperationException(); }
     };
   }
 
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 3468778..ecf1ca8 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -1337,6 +1337,9 @@
 # Do *not* filter stack traces emitted to the console.
 ant -Dtests.filterstacks=false
 
+# Skip checking for no-executed tests in modules
+ant -Dtests.ifNoTests=ignore ...
+
 # Output test files and reports.
 ${tests-output}/tests-report.txt    - full ASCII tests report
 ${tests-output}/tests-failures.txt  - failures only (if any)
@@ -1490,7 +1493,7 @@
       });
       statsFile.delete();
 
-      if (total == 0) {
+      if (total == 0 && !"ignore".equals(project.getProperty("tests.ifNoTests"))) {
         throw new BuildException("Not even a single test was executed (a typo in the filter pattern maybe?).");
       }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
index 7d7fdc2..5c14b86 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
@@ -179,7 +179,7 @@
   }
   
   @Override
-  public Lock makeLock(String name) {
+  public Lock obtainLock(String name) {
     throw new UnsupportedOperationException();
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 41c97fc..4753220 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -47,7 +47,6 @@
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.Lock;
-import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.util.Accountables;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -356,7 +355,7 @@
 
   /** Create a new CheckIndex on the directory. */
   public CheckIndex(Directory dir) throws IOException {
-    this(dir, dir.makeLock(IndexWriter.WRITE_LOCK_NAME));
+    this(dir, dir.obtainLock(IndexWriter.WRITE_LOCK_NAME));
   }
   
   /** 
@@ -370,9 +369,6 @@
     this.dir = dir;
     this.writeLock = writeLock;
     this.infoStream = null;
-    if (!writeLock.obtain(IndexWriterConfig.WRITE_LOCK_TIMEOUT)) { // obtain write lock
-      throw new LockObtainFailedException("Index locked for write: " + writeLock);
-    }
   }
   
   private void ensureOpen() {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
index 8a6232c..0edd72a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -95,6 +95,7 @@
  */
 
 final class DocumentsWriter implements Closeable, Accountable {
+  private final Directory directoryOrig; // no wrapping, for infos
   private final Directory directory;
 
   private volatile boolean closed;
@@ -123,7 +124,8 @@
   private final Queue<Event> events;
 
   
-  DocumentsWriter(IndexWriter writer, LiveIndexWriterConfig config, Directory directory) {
+  DocumentsWriter(IndexWriter writer, LiveIndexWriterConfig config, Directory directoryOrig, Directory directory) {
+    this.directoryOrig = directoryOrig;
     this.directory = directory;
     this.config = config;
     this.infoStream = config.getInfoStream();
@@ -393,7 +395,7 @@
     if (state.isActive() && state.dwpt == null) {
       final FieldInfos.Builder infos = new FieldInfos.Builder(
           writer.globalFieldNumberMap);
-      state.dwpt = new DocumentsWriterPerThread(writer.newSegmentName(),
+      state.dwpt = new DocumentsWriterPerThread(writer.newSegmentName(), directoryOrig,
                                                 directory, config, infoStream, deleteQueue, infos,
                                                 writer.pendingNumDocs, writer.enableTestPoints);
     }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index 8ad9934..321ab9b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -158,9 +158,9 @@
   private final LiveIndexWriterConfig indexWriterConfig;
   private final boolean enableTestPoints;
   
-  public DocumentsWriterPerThread(String segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
+  public DocumentsWriterPerThread(String segmentName, Directory directoryOrig, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue,
                                   FieldInfos.Builder fieldInfos, AtomicLong pendingNumDocs, boolean enableTestPoints) throws IOException {
-    this.directoryOrig = directory;
+    this.directoryOrig = directoryOrig;
     this.directory = new TrackingDirectoryWrapper(directory);
     this.fieldInfos = fieldInfos;
     this.indexWriterConfig = indexWriterConfig;
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
index 2500c03..d4fd100 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
@@ -102,8 +102,9 @@
   private List<CommitPoint> commitsToDelete = new ArrayList<>();
 
   private final InfoStream infoStream;
-  private Directory directory;
-  private IndexDeletionPolicy policy;
+  private final Directory directoryOrig; // for commit point metadata
+  private final Directory directory;
+  private final IndexDeletionPolicy policy;
 
   final boolean startingCommitDeleted;
   private SegmentInfos lastSegmentInfos;
@@ -126,7 +127,7 @@
    * any files not referenced by any of the commits.
    * @throws IOException if there is a low-level IO error
    */
-  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos,
+  public IndexFileDeleter(Directory directoryOrig, Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos,
                           InfoStream infoStream, IndexWriter writer, boolean initialIndexExists) throws IOException {
     Objects.requireNonNull(writer);
     this.infoStream = infoStream;
@@ -139,6 +140,7 @@
     }
 
     this.policy = policy;
+    this.directoryOrig = directoryOrig;
     this.directory = directory;
 
     // First pass: walk the files and initialize our ref
@@ -165,7 +167,7 @@
             }
             SegmentInfos sis = null;
             try {
-              sis = SegmentInfos.readCommit(directory, fileName);
+              sis = SegmentInfos.readCommit(directoryOrig, fileName);
             } catch (FileNotFoundException | NoSuchFileException e) {
               // LUCENE-948: on NFS (and maybe others), if
               // you have writers switching back and forth
@@ -179,7 +181,7 @@
               }
             }
             if (sis != null) {
-              final CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis);
+              final CommitPoint commitPoint = new CommitPoint(commitsToDelete, directoryOrig, sis);
               if (sis.getGeneration() == segmentInfos.getGeneration()) {
                 currentCommitPoint = commitPoint;
               }
@@ -205,14 +207,14 @@
       // try now to explicitly open this commit point:
       SegmentInfos sis = null;
       try {
-        sis = SegmentInfos.readCommit(directory, currentSegmentsFile);
+        sis = SegmentInfos.readCommit(directoryOrig, currentSegmentsFile);
       } catch (IOException e) {
         throw new CorruptIndexException("unable to read current segments_N file", currentSegmentsFile, e);
       }
       if (infoStream.isEnabled("IFD")) {
         infoStream.message("IFD", "forced open of current segments file " + segmentInfos.getSegmentsFileName());
       }
-      currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
+      currentCommitPoint = new CommitPoint(commitsToDelete, directoryOrig, sis);
       commits.add(currentCommitPoint);
       incRef(sis, true);
     }
@@ -557,7 +559,7 @@
 
     if (isCommit) {
       // Append to our commits list:
-      commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos));
+      commits.add(new CommitPoint(commitsToDelete, directoryOrig, segmentInfos));
 
       // Tell policy so it can remove commits:
       policy.onCommit(commits);
@@ -780,14 +782,14 @@
     Collection<String> files;
     String segmentsFileName;
     boolean deleted;
-    Directory directory;
+    Directory directoryOrig;
     Collection<CommitPoint> commitsToDelete;
     long generation;
     final Map<String,String> userData;
     private final int segmentCount;
 
-    public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
-      this.directory = directory;
+    public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directoryOrig, SegmentInfos segmentInfos) throws IOException {
+      this.directoryOrig = directoryOrig;
       this.commitsToDelete = commitsToDelete;
       userData = segmentInfos.getUserData();
       segmentsFileName = segmentInfos.getSegmentsFileName();
@@ -818,7 +820,7 @@
 
     @Override
     public Directory getDirectory() {
-      return directory;
+      return directoryOrig;
     }
 
     @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 1019903..d37cd35 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -60,6 +60,7 @@
 import org.apache.lucene.store.MergeInfo;
 import org.apache.lucene.store.RateLimitedIndexOutput;
 import org.apache.lucene.store.TrackingDirectoryWrapper;
+import org.apache.lucene.store.LockValidatingDirectoryWrapper;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -118,9 +119,7 @@
 
   <p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
   another <code>IndexWriter</code> on the same directory will lead to a
-  {@link LockObtainFailedException}. The {@link LockObtainFailedException}
-  is also thrown if an IndexReader on the same directory is used to delete documents
-  from the index.</p>
+  {@link LockObtainFailedException}.</p>
   
   <a name="deletionPolicy"></a>
   <p>Expert: <code>IndexWriter</code> allows an optional
@@ -254,8 +253,9 @@
   // when unrecoverable disaster strikes, we populate this with the reason that we had to close IndexWriter
   volatile Throwable tragedy;
 
-  private final Directory directory;  // where this index resides
-  private final Directory mergeDirectory;  // used for merging
+  private final Directory directoryOrig;       // original user directory
+  private final Directory directory;           // wrapped with additional checks
+  private final Directory mergeDirectory;      // wrapped with throttling: used for merging
   private final Analyzer analyzer;    // how to analyze text
 
   private final AtomicLong changeCount = new AtomicLong(); // increments every time a change is completed
@@ -645,7 +645,7 @@
       // Make sure no new readers can be opened if another thread just closed us:
       ensureOpen(false);
 
-      assert info.info.dir == directory: "info.dir=" + info.info.dir + " vs " + directory;
+      assert info.info.dir == directoryOrig: "info.dir=" + info.info.dir + " vs " + directoryOrig;
 
       ReadersAndUpdates rld = readerMap.get(info);
       if (rld == null) {
@@ -754,29 +754,37 @@
   public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
     conf.setIndexWriter(this); // prevent reuse by other instances
     config = conf;
-
-    directory = d;
-
-    // Directory we use for merging, so we can abort running merges, and so
-    // merge schedulers can optionally rate-limit per-merge IO:
-    mergeDirectory = addMergeRateLimiters(d);
-
-    analyzer = config.getAnalyzer();
     infoStream = config.getInfoStream();
-    mergeScheduler = config.getMergeScheduler();
-    mergeScheduler.setInfoStream(infoStream);
-    codec = config.getCodec();
-
-    bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
-    poolReaders = config.getReaderPooling();
-
-    writeLock = directory.makeLock(WRITE_LOCK_NAME);
-
-    if (!writeLock.obtain(config.getWriteLockTimeout())) // obtain write lock
-      throw new LockObtainFailedException("Index locked for write: " + writeLock);
-
+    
+    // obtain the write.lock. If the user configured a timeout,
+    // we wrap with a sleeper and this might take some time.
+    long timeout = config.getWriteLockTimeout();
+    final Directory lockDir;
+    if (timeout == 0) {
+      // user doesn't want sleep/retries
+      lockDir = d;
+    } else {
+      lockDir = new SleepingLockWrapper(d, timeout);
+    }
+    writeLock = lockDir.obtainLock(WRITE_LOCK_NAME);
+    
     boolean success = false;
     try {
+      directoryOrig = d;
+      directory = new LockValidatingDirectoryWrapper(d, writeLock);
+
+      // Directory we use for merging, so we can abort running merges, and so
+      // merge schedulers can optionally rate-limit per-merge IO:
+      mergeDirectory = addMergeRateLimiters(directory);
+
+      analyzer = config.getAnalyzer();
+      mergeScheduler = config.getMergeScheduler();
+      mergeScheduler.setInfoStream(infoStream);
+      codec = config.getCodec();
+
+      bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
+      poolReaders = config.getReaderPooling();
+
       OpenMode mode = config.getOpenMode();
       boolean create;
       if (mode == OpenMode.CREATE) {
@@ -822,7 +830,7 @@
 
         // Do not use SegmentInfos.read(Directory) since the spooky
         // retrying it does is not necessary here (we hold the write lock):
-        segmentInfos = SegmentInfos.readCommit(directory, lastSegmentsFile);
+        segmentInfos = SegmentInfos.readCommit(directoryOrig, lastSegmentsFile);
 
         IndexCommit commit = config.getIndexCommit();
         if (commit != null) {
@@ -831,9 +839,9 @@
           // preserve write-once.  This is important if
           // readers are open against the future commit
           // points.
-          if (commit.getDirectory() != directory)
-            throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory");
-          SegmentInfos oldInfos = SegmentInfos.readCommit(directory, commit.getSegmentsFileName());
+          if (commit.getDirectory() != directoryOrig)
+            throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory, expected=" + directoryOrig + ", got=" + commit.getDirectory());
+          SegmentInfos oldInfos = SegmentInfos.readCommit(directoryOrig, commit.getSegmentsFileName());
           segmentInfos.replace(oldInfos);
           changed();
           if (infoStream.isEnabled("IW")) {
@@ -848,13 +856,13 @@
       // start with previous field numbers, but new FieldInfos
       globalFieldNumberMap = getFieldNumberMap();
       config.getFlushPolicy().init(config);
-      docWriter = new DocumentsWriter(this, config, directory);
+      docWriter = new DocumentsWriter(this, config, directoryOrig, directory);
       eventQueue = docWriter.eventQueue();
 
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
       synchronized(this) {
-        deleter = new IndexFileDeleter(directory,
+        deleter = new IndexFileDeleter(directoryOrig, directory,
                                        config.getIndexDeletionPolicy(),
                                        segmentInfos, infoStream, this,
                                        initialIndexExists);
@@ -937,7 +945,7 @@
   private void messageState() {
     if (infoStream.isEnabled("IW") && didMessageState == false) {
       didMessageState = true;
-      infoStream.message("IW", "\ndir=" + directory + "\n" +
+      infoStream.message("IW", "\ndir=" + directoryOrig + "\n" +
             "index=" + segString() + "\n" +
             "version=" + Version.LATEST.toString() + "\n" +
             config.toString());
@@ -1036,7 +1044,8 @@
 
   /** Returns the Directory used by this index. */
   public Directory getDirectory() {
-    return directory;
+    // return the original directory the user supplied, unwrapped.
+    return directoryOrig;
   }
 
   /** Returns the analyzer used by this index. */
@@ -2274,7 +2283,7 @@
     for(int i=0;i<dirs.length;i++) {
       if (dups.contains(dirs[i]))
         throw new IllegalArgumentException("Directory " + dirs[i] + " appears more than once");
-      if (dirs[i] == directory)
+      if (dirs[i] == directoryOrig)
         throw new IllegalArgumentException("Cannot add directory to itself");
       dups.add(dirs[i]);
     }
@@ -2288,13 +2297,13 @@
     for(int i=0;i<dirs.length;i++) {
       boolean success = false;
       try {
-        Lock lock = dirs[i].makeLock(WRITE_LOCK_NAME);
+        Lock lock = dirs[i].obtainLock(WRITE_LOCK_NAME);
         locks.add(lock);
-        lock.obtain(config.getWriteLockTimeout());
         success = true;
       } finally {
         if (success == false) {
           // Release all previously acquired locks:
+          // TODO: addSuppressed? it could be many...
           IOUtils.closeWhileHandlingException(locks);
         }
       }
@@ -2334,8 +2343,6 @@
    *
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
-   * @throws LockObtainFailedException if we were unable to
-   *   acquire the write lock in at least one directory
    * @throws IllegalArgumentException if addIndexes would cause
    *   the index to exceed {@link #MAX_DOCS}
    */
@@ -2496,7 +2503,7 @@
       // abortable so that IW.close(false) is able to stop it
       TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
 
-      SegmentInfo info = new SegmentInfo(directory, Version.LATEST, mergedName, -1,
+      SegmentInfo info = new SegmentInfo(directoryOrig, Version.LATEST, mergedName, -1,
                                          false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
 
       SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
@@ -2600,7 +2607,7 @@
     
     //System.out.println("copy seg=" + info.info.name + " version=" + info.info.getVersion());
     // Same SI as before but we change directory and name
-    SegmentInfo newInfo = new SegmentInfo(directory, info.info.getVersion(), segName, info.info.maxDoc(),
+    SegmentInfo newInfo = new SegmentInfo(directoryOrig, info.info.getVersion(), segName, info.info.maxDoc(),
                                           info.info.getUseCompoundFile(), info.info.getCodec(), 
                                           info.info.getDiagnostics(), info.info.getId(), info.info.getAttributes());
     SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getDelGen(), 
@@ -2880,10 +2887,17 @@
             // we committed, if anything goes wrong after this, we are screwed and it's a tragedy:
             commitCompleted = true;
 
+            if (infoStream.isEnabled("IW")) {
+              infoStream.message("IW", "commit: done writing segments file \"" + committedSegmentsFileName + "\"");
+            }
+
             // NOTE: don't use this.checkpoint() here, because
             // we do not want to increment changeCount:
             deleter.checkpoint(pendingCommit, true);
 
+            // Carry over generation to our master SegmentInfos:
+            segmentInfos.updateGeneration(pendingCommit);
+
             lastCommitChangeCount = pendingCommitChangeCount;
             rollbackSegments = pendingCommit.createBackupSegmentInfos();
 
@@ -2922,7 +2936,6 @@
     }
 
     if (infoStream.isEnabled("IW")) {
-      infoStream.message("IW", "commit: wrote segments file \"" + committedSegmentsFileName + "\"");
       infoStream.message("IW", String.format(Locale.ROOT, "commit: took %.1f msec", (System.nanoTime()-startCommitTime)/1000000.0));
       infoStream.message("IW", "commit: done");
     }
@@ -3069,7 +3082,7 @@
   private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
     for(SegmentCommitInfo info : merge.segments) {
       if (!segmentInfos.contains(info)) {
-        throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directory);
+        throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directoryOrig);
       }
     }
   }
@@ -3593,7 +3606,7 @@
         }
         return false;
       }
-      if (info.info.dir != directory) {
+      if (info.info.dir != directoryOrig) {
         isExternal = true;
       }
       if (segmentsToMerge.containsKey(info)) {
@@ -3726,7 +3739,7 @@
     // ConcurrentMergePolicy we keep deterministic segment
     // names.
     final String mergeSegmentName = newSegmentName();
-    SegmentInfo si = new SegmentInfo(directory, Version.LATEST, mergeSegmentName, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
+    SegmentInfo si = new SegmentInfo(directoryOrig, Version.LATEST, mergeSegmentName, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>());
     Map<String,String> details = new HashMap<>();
     details.put("mergeMaxNumSegments", "" + merge.maxNumSegments);
     details.put("mergeFactor", Integer.toString(merge.segments.size()));
@@ -4297,6 +4310,10 @@
           // (this method unwinds everything it did on
           // an exception)
           toSync.prepareCommit(directory);
+          if (infoStream.isEnabled("IW")) {
+            infoStream.message("IW", "startCommit: wrote pending segments file \"" + IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", toSync.getGeneration()) + "\"");
+          }
+
           //System.out.println("DONE prepareCommit");
 
           pendingCommitSet = true;
@@ -4355,9 +4372,17 @@
    * currently locked.
    * @param directory the directory to check for a lock
    * @throws IOException if there is a low-level IO error
+   * @deprecated Use of this method can only lead to race conditions. Try
+   *             to actually obtain a lock instead.
    */
+  @Deprecated
   public static boolean isLocked(Directory directory) throws IOException {
-    return directory.makeLock(WRITE_LOCK_NAME).isLocked();
+    try {
+      directory.obtainLock(WRITE_LOCK_NAME).close();
+      return false;
+    } catch (LockObtainFailedException failed) {
+      return true;
+    }
   }
 
   /** If {@link DirectoryReader#open(IndexWriter,boolean)} has
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
index 89ace39..623a342 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -265,7 +265,8 @@
   /**
    * Sets the maximum time to wait for a write lock (in milliseconds) for this
    * instance. You can change the default value for all instances by calling
-   * {@link #setDefaultWriteLockTimeout(long)}.
+   * {@link #setDefaultWriteLockTimeout(long)}. Note that the value can be zero,
+   * for no sleep/retry behavior.
    *
    * <p>Only takes effect when IndexWriter is first created. */
   public IndexWriterConfig setWriteLockTimeout(long writeLockTimeout) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
index f6905d4..5132f60 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
@@ -23,12 +23,13 @@
 import java.util.List;
 import java.util.Set;
 
-/** An {@link CompositeReader} which reads multiple, parallel indexes.  Each index added
- * must have the same number of documents, and exactly the same hierarchical subreader structure,
- * but typically each contains different fields. Deletions are taken from the first reader.
- * Each document contains the union of the fields of all
- * documents with the same document number.  When searching, matches for a
- * query term are from the first index added that has the field.
+/** An {@link CompositeReader} which reads multiple, parallel indexes.  Each
+ * index added must have the same number of documents, and exactly the same
+ * number of leaves (with equal {@code maxDoc}), but typically each contains
+ * different fields. Deletions are taken from the first reader. Each document
+ * contains the union of the fields of all documents with the same document
+ * number.  When searching, matches for a query term are from the first index
+ * added that has the field.
  *
  * <p>This is useful, e.g., with collections that have large fields which
  * change rarely and small fields that change more frequently.  The smaller
@@ -46,7 +47,7 @@
  * by number of documents per segment. If you use different {@link MergePolicy}s
  * it might happen that the segment structure of your index is no longer predictable.
  */
-public class ParallelCompositeReader extends BaseCompositeReader<IndexReader> {
+public class ParallelCompositeReader extends BaseCompositeReader<LeafReader> {
   private final boolean closeSubReaders;
   private final Set<IndexReader> completeReaderSet =
     Collections.newSetFromMap(new IdentityHashMap<IndexReader,Boolean>());
@@ -67,7 +68,7 @@
    *  readers and storedFieldReaders; when a document is
    *  loaded, only storedFieldsReaders will be used. */
   public ParallelCompositeReader(boolean closeSubReaders, CompositeReader[] readers, CompositeReader[] storedFieldReaders) throws IOException {
-    super(prepareSubReaders(readers, storedFieldReaders));
+    super(prepareLeafReaders(readers, storedFieldReaders));
     this.closeSubReaders = closeSubReaders;
     Collections.addAll(completeReaderSet, readers);
     Collections.addAll(completeReaderSet, storedFieldReaders);
@@ -81,84 +82,62 @@
     completeReaderSet.addAll(getSequentialSubReaders());
   }
 
-  private static IndexReader[] prepareSubReaders(CompositeReader[] readers, CompositeReader[] storedFieldsReaders) throws IOException {
+  private static LeafReader[] prepareLeafReaders(CompositeReader[] readers, CompositeReader[] storedFieldsReaders) throws IOException {
     if (readers.length == 0) {
       if (storedFieldsReaders.length > 0)
         throw new IllegalArgumentException("There must be at least one main reader if storedFieldsReaders are used.");
-      return new IndexReader[0];
+      return new LeafReader[0];
     } else {
-      final List<? extends IndexReader> firstSubReaders = readers[0].getSequentialSubReaders();
+      final List<? extends LeafReaderContext> firstLeaves = readers[0].leaves();
 
       // check compatibility:
-      final int maxDoc = readers[0].maxDoc(), noSubs = firstSubReaders.size();
-      final int[] childMaxDoc = new int[noSubs];
-      final boolean[] childAtomic = new boolean[noSubs];
-      for (int i = 0; i < noSubs; i++) {
-        final IndexReader r = firstSubReaders.get(i);
-        childMaxDoc[i] = r.maxDoc();
-        childAtomic[i] = r instanceof LeafReader;
+      final int maxDoc = readers[0].maxDoc(), noLeaves = firstLeaves.size();
+      final int[] leafMaxDoc = new int[noLeaves];
+      for (int i = 0; i < noLeaves; i++) {
+        final LeafReader r = firstLeaves.get(i).reader();
+        leafMaxDoc[i] = r.maxDoc();
       }
-      validate(readers, maxDoc, childMaxDoc, childAtomic);
-      validate(storedFieldsReaders, maxDoc, childMaxDoc, childAtomic);
+      validate(readers, maxDoc, leafMaxDoc);
+      validate(storedFieldsReaders, maxDoc, leafMaxDoc);
 
-      // hierarchically build the same subreader structure as the first CompositeReader with Parallel*Readers:
-      final IndexReader[] subReaders = new IndexReader[noSubs];
-      for (int i = 0; i < subReaders.length; i++) {
-        if (firstSubReaders.get(i) instanceof LeafReader) {
-          final LeafReader[] atomicSubs = new LeafReader[readers.length];
-          for (int j = 0; j < readers.length; j++) {
-            atomicSubs[j] = (LeafReader) readers[j].getSequentialSubReaders().get(i);
-          }
-          final LeafReader[] storedSubs = new LeafReader[storedFieldsReaders.length];
-          for (int j = 0; j < storedFieldsReaders.length; j++) {
-            storedSubs[j] = (LeafReader) storedFieldsReaders[j].getSequentialSubReaders().get(i);
-          }
-          // We pass true for closeSubs and we prevent closing of subreaders in doClose():
-          // By this the synthetic throw-away readers used here are completely invisible to ref-counting
-          subReaders[i] = new ParallelLeafReader(true, atomicSubs, storedSubs) {
-            @Override
-            protected void doClose() {}
-          };
-        } else {
-          assert firstSubReaders.get(i) instanceof CompositeReader;
-          final CompositeReader[] compositeSubs = new CompositeReader[readers.length];
-          for (int j = 0; j < readers.length; j++) {
-            compositeSubs[j] = (CompositeReader) readers[j].getSequentialSubReaders().get(i);
-          }
-          final CompositeReader[] storedSubs = new CompositeReader[storedFieldsReaders.length];
-          for (int j = 0; j < storedFieldsReaders.length; j++) {
-            storedSubs[j] = (CompositeReader) storedFieldsReaders[j].getSequentialSubReaders().get(i);
-          }
-          // We pass true for closeSubs and we prevent closing of subreaders in doClose():
-          // By this the synthetic throw-away readers used here are completely invisible to ref-counting
-          subReaders[i] = new ParallelCompositeReader(true, compositeSubs, storedSubs) {
-            @Override
-            protected void doClose() {}
-          };
+      // flatten structure of each Composite to just LeafReader[]
+      // and combine parallel structure with ParallelLeafReaders:
+      final LeafReader[] wrappedLeaves = new LeafReader[noLeaves];
+      for (int i = 0; i < wrappedLeaves.length; i++) {
+        final LeafReader[] subs = new LeafReader[readers.length];
+        for (int j = 0; j < readers.length; j++) {
+          subs[j] = readers[j].leaves().get(i).reader();
         }
+        final LeafReader[] storedSubs = new LeafReader[storedFieldsReaders.length];
+        for (int j = 0; j < storedFieldsReaders.length; j++) {
+          storedSubs[j] = storedFieldsReaders[j].leaves().get(i).reader();
+        }
+        // We pass true for closeSubs and we prevent touching of subreaders in doClose():
+        // By this the synthetic throw-away readers used here are completely invisible to ref-counting
+        wrappedLeaves[i] = new ParallelLeafReader(true, subs, storedSubs) {
+          @Override
+          protected void doClose() {}
+        };
       }
-      return subReaders;
+      return wrappedLeaves;
     }
   }
   
-  private static void validate(CompositeReader[] readers, int maxDoc, int[] childMaxDoc, boolean[] childAtomic) {
+  private static void validate(CompositeReader[] readers, int maxDoc, int[] leafMaxDoc) {
     for (int i = 0; i < readers.length; i++) {
       final CompositeReader reader = readers[i];
-      final List<? extends IndexReader> subs = reader.getSequentialSubReaders();
+      final List<? extends LeafReaderContext> subs = reader.leaves();
       if (reader.maxDoc() != maxDoc) {
         throw new IllegalArgumentException("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
       }
       final int noSubs = subs.size();
-      if (noSubs != childMaxDoc.length) {
-        throw new IllegalArgumentException("All readers must have same number of subReaders");
+      if (noSubs != leafMaxDoc.length) {
+        throw new IllegalArgumentException("All readers must have same number of leaf readers");
       }
       for (int subIDX = 0; subIDX < noSubs; subIDX++) {
-        final IndexReader r = subs.get(subIDX);
-        if (r.maxDoc() != childMaxDoc[subIDX]) {
-          throw new IllegalArgumentException("All readers must have same corresponding subReader maxDoc");
-        }
-        if (!(childAtomic[subIDX] ? (r instanceof LeafReader) : (r instanceof CompositeReader))) {
-          throw new IllegalArgumentException("All readers must have same corresponding subReader types (atomic or composite)");
+        final LeafReader r = subs.get(subIDX).reader();
+        if (r.maxDoc() != leafMaxDoc[subIDX]) {
+          throw new IllegalArgumentException("All leaf readers must have same corresponding subReader maxDoc");
         }
       }
     }    
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
index b101fef..f9f5db8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -441,7 +441,6 @@
           segnOutput.writeInt(e.getKey());
           segnOutput.writeSetOfStrings(e.getValue());
         }
-        assert si.dir == directory;
       }
       segnOutput.writeMapOfStrings(userData);
       CodecUtil.writeFooter(segnOutput);
@@ -757,6 +756,8 @@
     } else {
       userData = data;
     }
+
+    changed();
   }
 
   /** Replaces all segments in this instance, but keeps
diff --git a/lucene/core/src/java/org/apache/lucene/index/SleepingLockWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SleepingLockWrapper.java
new file mode 100644
index 0000000..3a01d3b
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/SleepingLockWrapper.java
@@ -0,0 +1,113 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FilterDirectory;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.ThreadInterruptedException;
+
+/** 
+ * Directory that wraps another, and that sleeps and retries
+ * if obtaining the lock fails.
+ * <p>
+ * This is not a good idea.
+ */
+final class SleepingLockWrapper extends FilterDirectory {
+ 
+  /** 
+   * Pass this lockWaitTimeout to try forever to obtain the lock. 
+   */
+  public static final long LOCK_OBTAIN_WAIT_FOREVER = -1;
+  
+  /** 
+   * How long {@link #obtainLock} waits, in milliseconds,
+   * in between attempts to acquire the lock. 
+   */
+  public static long DEFAULT_POLL_INTERVAL = 1000;
+  
+  private final long lockWaitTimeout;
+  private final long pollInterval;
+  
+  /**
+   * Create a new SleepingLockFactory
+   * @param delegate        underlying directory to wrap
+   * @param lockWaitTimeout length of time to wait in milliseconds 
+   *                        or {@link #LOCK_OBTAIN_WAIT_FOREVER} to retry forever.
+   */
+  public SleepingLockWrapper(Directory delegate, long lockWaitTimeout) {
+    this(delegate, lockWaitTimeout, DEFAULT_POLL_INTERVAL);
+  }
+  
+  /**
+   * Create a new SleepingLockFactory
+   * @param delegate        underlying directory to wrap
+   * @param lockWaitTimeout length of time to wait in milliseconds 
+   *                        or {@link #LOCK_OBTAIN_WAIT_FOREVER} to retry forever.
+   * @param pollInterval    poll once per this interval in milliseconds until
+   *                        {@code lockWaitTimeout} is exceeded.
+   */
+  public SleepingLockWrapper(Directory delegate, long lockWaitTimeout, long pollInterval) {
+    super(delegate);
+    this.lockWaitTimeout = lockWaitTimeout;
+    this.pollInterval = pollInterval;
+    if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER) {
+      throw new IllegalArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
+    }
+    if (pollInterval < 0) {
+      throw new IllegalArgumentException("pollInterval must be a non-negative number (got " + pollInterval + ")");
+    }
+  }
+
+  @Override
+  public Lock obtainLock(String lockName) throws IOException {
+    LockObtainFailedException failureReason = null;
+    long maxSleepCount = lockWaitTimeout / pollInterval;
+    long sleepCount = 0;
+    
+    do {
+      try {
+        return in.obtainLock(lockName);
+      } catch (LockObtainFailedException failed) {
+        if (failureReason == null) {
+          failureReason = failed;
+        }
+      }
+      try {
+        Thread.sleep(pollInterval);
+      } catch (InterruptedException ie) {
+        throw new ThreadInterruptedException(ie);
+      }
+    } while (sleepCount++ < maxSleepCount || lockWaitTimeout == LOCK_OBTAIN_WAIT_FOREVER);
+    
+    // we failed to obtain the lock in the required time
+    String reason = "Lock obtain timed out: " + this.toString();
+    if (failureReason != null) {
+      reason += ": " + failureReason;
+    }
+    throw new LockObtainFailedException(reason, failureReason);
+  }
+
+  @Override
+  public String toString() {
+    return "SleepingLockWrapper(" + in + ")";
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 7ce87cc..9fddd20 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -18,6 +18,8 @@
  */
 
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Scorer;
@@ -28,7 +30,6 @@
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanScorer;
-import org.apache.lucene.search.spans.SpanSimilarity;
 import org.apache.lucene.search.spans.SpanWeight;
 import org.apache.lucene.search.spans.Spans;
 import org.apache.lucene.util.Bits;
@@ -40,6 +41,7 @@
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 
 /**
@@ -78,8 +80,7 @@
     for (SpanQuery q : clauses) {
       subWeights.add(q.createWeight(searcher, false, PayloadSpanCollector.FACTORY));
     }
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, subWeights);
-    return new PayloadNearSpanWeight(subWeights, similarity);
+    return new PayloadNearSpanWeight(subWeights, searcher, needsScores ? getTermContexts(subWeights) : null);
   }
 
   @Override
@@ -138,18 +139,19 @@
 
   public class PayloadNearSpanWeight extends SpanNearWeight {
 
-    public PayloadNearSpanWeight(List<SpanWeight> subWeights, SpanSimilarity similarity)
+    public PayloadNearSpanWeight(List<SpanWeight> subWeights, IndexSearcher searcher, Map<Term, TermContext> terms)
         throws IOException {
-      super(subWeights, similarity, PayloadSpanCollector.FACTORY);
+      super(subWeights, searcher, terms, PayloadSpanCollector.FACTORY);
     }
 
     @Override
     public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
       PayloadSpanCollector collector = (PayloadSpanCollector) collectorFactory.newCollector();
       Spans spans = super.getSpans(context, acceptDocs, collector);
+      Similarity.SimScorer simScorer = simWeight == null ? null : similarity.simScorer(simWeight, context);
       return (spans == null)
               ? null
-              : new PayloadNearSpanScorer(spans, this, collector, similarity.simScorer(context));
+              : new PayloadNearSpanScorer(spans, this, collector, simScorer);
     }
     
     @Override
@@ -160,7 +162,7 @@
         if (newDoc == doc) {
           float freq = scorer.freq();
           Explanation freqExplanation = Explanation.match(freq, "phraseFreq=" + freq);
-          SimScorer docScorer = similarity.simScorer(context);
+          SimScorer docScorer = similarity.simScorer(simWeight, context);
           Explanation scoreExplanation = docScorer.explain(doc, freqExplanation);
           Explanation expl = Explanation.match(
               scoreExplanation.getValue(),
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index 6a0420d..55f2c81 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -30,7 +30,6 @@
 import org.apache.lucene.search.spans.SpanCollector;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanScorer;
-import org.apache.lucene.search.spans.SpanSimilarity;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.search.spans.SpanWeight;
 import org.apache.lucene.search.spans.Spans;
@@ -38,6 +37,8 @@
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
 import java.util.Objects;
 
 /**
@@ -71,8 +72,7 @@
   @Override
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
     TermContext context = TermContext.build(searcher.getTopReaderContext(), term);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, searcher.termStatistics(term, context));
-    return new PayloadTermWeight(context, similarity);
+    return new PayloadTermWeight(context, searcher, needsScores ? Collections.singletonMap(term, context) : null);
   }
 
   private static class PayloadTermCollector implements SpanCollector {
@@ -107,18 +107,19 @@
 
   private class PayloadTermWeight extends SpanTermWeight {
 
-    public PayloadTermWeight(TermContext context, SpanSimilarity similarity)
+    public PayloadTermWeight(TermContext context, IndexSearcher searcher, Map<Term, TermContext> terms)
         throws IOException {
-      super(context, similarity, PayloadSpanCollector.FACTORY);
+      super(context, searcher, terms, PayloadSpanCollector.FACTORY);
     }
 
     @Override
     public PayloadTermSpanScorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
       PayloadTermCollector collector = new PayloadTermCollector();
       Spans spans = super.getSpans(context, acceptDocs, collector);
+      Similarity.SimScorer simScorer = simWeight == null ? null : similarity.simScorer(simWeight, context);
       return (spans == null)
               ? null
-              : new PayloadTermSpanScorer(spans, this, collector, similarity.simScorer(context));
+              : new PayloadTermSpanScorer(spans, this, collector, simScorer);
     }
 
     protected class PayloadTermSpanScorer extends SpanScorer {
@@ -208,7 +209,7 @@
         if (newDoc == doc) {
           float freq = scorer.sloppyFreq();
           Explanation freqExplanation = Explanation.match(freq, "phraseFreq=" + freq);
-          SimScorer docScorer = similarity.simScorer(context);
+          SimScorer docScorer = similarity.simScorer(simWeight, context);
           Explanation scoreExplanation = docScorer.explain(doc, freqExplanation);
           Explanation expl = Explanation.match(
               scoreExplanation.getValue(),
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java
index 198d7fe..40066a3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainQuery.java
@@ -21,6 +21,7 @@
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Bits;
 
@@ -54,9 +55,9 @@
     final SpanWeight bigWeight;
     final SpanWeight littleWeight;
 
-    public SpanContainWeight(SpanSimilarity similarity, SpanCollectorFactory factory,
+    public SpanContainWeight(IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory,
                              SpanWeight bigWeight, SpanWeight littleWeight) throws IOException {
-      super(SpanContainQuery.this, similarity, factory);
+      super(SpanContainQuery.this, searcher, terms, factory);
       this.bigWeight = bigWeight;
       this.littleWeight = littleWeight;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainingQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainingQuery.java
index 88c304a..ab97dda 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainingQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanContainingQuery.java
@@ -18,11 +18,14 @@
  */
 
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Map;
 
 /** Keep matches that contain another Spans. */
 public class SpanContainingQuery extends SpanContainQuery {
@@ -51,15 +54,15 @@
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
     SpanWeight bigWeight = big.createWeight(searcher, false, factory);
     SpanWeight littleWeight = little.createWeight(searcher, false, factory);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, bigWeight, littleWeight);
-    return new SpanContainingWeight(similarity, factory, bigWeight, littleWeight);
+    return new SpanContainingWeight(searcher, needsScores ? getTermContexts(bigWeight, littleWeight) : null,
+                                      factory, bigWeight, littleWeight);
   }
 
   public class SpanContainingWeight extends SpanContainWeight {
 
-    public SpanContainingWeight(SpanSimilarity similarity, SpanCollectorFactory factory,
+    public SpanContainingWeight(IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory,
                                 SpanWeight bigWeight, SpanWeight littleWeight) throws IOException {
-      super(similarity, factory, bigWeight, littleWeight);
+      super(searcher, terms, factory, bigWeight, littleWeight);
     }
 
     /**
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
index b7011ea..8799e96 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
@@ -174,10 +174,7 @@
     
       @Override
       protected void addClause(SpanOrQuery topLevel, Term term, int docCount, float boost, TermContext states) {
-        // TODO: would be nice to not lose term-state here.
-        // we could add a hack option to SpanOrQuery, but the hack would only work if this is the top-level Span
-        // (if you put this thing in another span query, it would extractTerms/double-seek anyway)
-        final SpanTermQuery q = new SpanTermQuery(term);
+        final SpanTermQuery q = new SpanTermQuery(term, states);
         q.setBoost(boost);
         topLevel.addClause(q);
       }
@@ -221,7 +218,7 @@
 
         @Override
         protected void addClause(SpanOrQuery topLevel, Term term, int docFreq, float boost, TermContext states) {
-          final SpanTermQuery q = new SpanTermQuery(term);
+          final SpanTermQuery q = new SpanTermQuery(term, states);
           q.setBoost(boost);
           topLevel.addClause(q);
         }
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
index a0c431b..742abff 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
@@ -117,16 +117,15 @@
     for (SpanQuery q : clauses) {
       subWeights.add(q.createWeight(searcher, false, factory));
     }
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, subWeights);
-    return new SpanNearWeight(subWeights, similarity, factory);
+    return new SpanNearWeight(subWeights, searcher, needsScores ? getTermContexts(subWeights) : null, factory);
   }
 
   public class SpanNearWeight extends SpanWeight {
 
     final List<SpanWeight> subWeights;
 
-    public SpanNearWeight(List<SpanWeight> subWeights, SpanSimilarity similarity, SpanCollectorFactory factory) throws IOException {
-      super(SpanNearQuery.this, similarity, factory);
+    public SpanNearWeight(List<SpanWeight> subWeights, IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory) throws IOException {
+      super(SpanNearQuery.this, searcher, terms, factory);
       this.subWeights = subWeights;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java
index 73eea43..578dae4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java
@@ -106,8 +106,8 @@
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
     SpanWeight includeWeight = include.createWeight(searcher, false, factory);
     SpanWeight excludeWeight = exclude.createWeight(searcher, false, factory);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, includeWeight);
-    return new SpanNotWeight(similarity, factory, includeWeight, excludeWeight);
+    return new SpanNotWeight(searcher, needsScores ? getTermContexts(includeWeight, excludeWeight) : null,
+                                  factory, includeWeight, excludeWeight);
   }
 
   public class SpanNotWeight extends SpanWeight {
@@ -115,9 +115,9 @@
     final SpanWeight includeWeight;
     final SpanWeight excludeWeight;
 
-    public SpanNotWeight(SpanSimilarity similarity, SpanCollectorFactory factory,
+    public SpanNotWeight(IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory,
                          SpanWeight includeWeight, SpanWeight excludeWeight) throws IOException {
-      super(SpanNotQuery.this, similarity, factory);
+      super(SpanNotQuery.this, searcher, terms, factory);
       this.includeWeight = includeWeight;
       this.excludeWeight = excludeWeight;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java
index 85a8909..2c4e255 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java
@@ -143,16 +143,15 @@
     for (SpanQuery q : clauses) {
       subWeights.add(q.createWeight(searcher, false, factory));
     }
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, subWeights);
-    return new SpanOrWeight(similarity, factory, subWeights);
+    return new SpanOrWeight(searcher, needsScores ? getTermContexts(subWeights) : null, factory, subWeights);
   }
 
   public class SpanOrWeight extends SpanWeight {
 
     final List<SpanWeight> subWeights;
 
-    public SpanOrWeight(SpanSimilarity similarity, SpanCollectorFactory factory, List<SpanWeight> subWeights) throws IOException {
-      super(SpanOrQuery.this, similarity, factory);
+    public SpanOrWeight(IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory, List<SpanWeight> subWeights) throws IOException {
+      super(SpanOrQuery.this, searcher, terms, factory);
       this.subWeights = subWeights;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java
index a848bb3..f125581 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java
@@ -71,22 +71,21 @@
   @Override
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
     SpanWeight matchWeight = match.createWeight(searcher, false, factory);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, matchWeight);
-    return new SpanPositionCheckWeight(matchWeight, similarity, factory);
+    return new SpanPositionCheckWeight(matchWeight, searcher, needsScores ? getTermContexts(matchWeight) : null, factory);
   }
 
   public class SpanPositionCheckWeight extends SpanWeight {
 
     final SpanWeight matchWeight;
 
-    public SpanPositionCheckWeight(SpanWeight matchWeight, SpanSimilarity similarity,
+    public SpanPositionCheckWeight(SpanWeight matchWeight, IndexSearcher searcher, Map<Term, TermContext> terms,
                                    SpanCollectorFactory collectorFactory) throws IOException {
-      super(SpanPositionCheckQuery.this, similarity, collectorFactory);
+      super(SpanPositionCheckQuery.this, searcher, terms, collectorFactory);
       this.matchWeight = matchWeight;
     }
 
-    public SpanPositionCheckWeight(SpanWeight matchWeight, SpanSimilarity similarity) throws IOException {
-      this(matchWeight, similarity, SpanCollectorFactory.NO_OP_FACTORY);
+    public SpanPositionCheckWeight(SpanWeight matchWeight, IndexSearcher searcher, Map<Term, TermContext> terms) throws IOException {
+      this(matchWeight, searcher, terms, SpanCollectorFactory.NO_OP_FACTORY);
     }
 
     @Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanQuery.java
index fea6d98..6460b38 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanQuery.java
@@ -17,11 +17,16 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Weight;
 
 import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.TreeMap;
 
 /** Base class for span-based queries. */
 public abstract class SpanQuery extends Query {
@@ -46,4 +51,28 @@
   public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
     return createWeight(searcher, needsScores, SpanCollectorFactory.NO_OP_FACTORY);
   }
+
+  /**
+   * Build a map of terms to termcontexts, for use in constructing SpanWeights
+   * @lucene.internal
+   */
+  protected static Map<Term, TermContext> getTermContexts(SpanWeight... weights) {
+    Map<Term, TermContext> terms = new TreeMap<>();
+    for (SpanWeight w : weights) {
+      w.extractTermContexts(terms);
+    }
+    return terms;
+  }
+
+  /**
+   * Build a map of terms to termcontexts, for use in constructing SpanWeights
+   * @lucene.internal
+   */
+  protected static Map<Term, TermContext> getTermContexts(Collection<SpanWeight> weights) {
+    Map<Term, TermContext> terms = new TreeMap<>();
+    for (SpanWeight w : weights) {
+      w.extractTermContexts(terms);
+    }
+    return terms;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanSimilarity.java
deleted file mode 100644
index 517f7ab..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanSimilarity.java
+++ /dev/null
@@ -1,202 +0,0 @@
-package org.apache.lucene.search.spans;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermContext;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.search.similarities.Similarity;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Encapsulates similarity statistics required for SpanScorers
- */
-public abstract class SpanSimilarity {
-
-  /**
-   * The field term statistics are taken from
-   */
-  protected final String field;
-
-  /**
-   * Create a new SpanSimilarity
-   * @param field the similarity field for term statistics
-   */
-  protected SpanSimilarity(String field) {
-    this.field = field;
-  }
-
-  /**
-   * Create a SimScorer for this SpanSimilarity's statistics
-   * @param context the LeafReaderContext to calculate the scorer for
-   * @return a SimScorer, or null if no scoring is required
-   * @throws IOException on error
-   */
-  public abstract Similarity.SimScorer simScorer(LeafReaderContext context) throws IOException;
-
-  /**
-   * @return the field for term statistics
-   */
-  public String getField() {
-    return field;
-  }
-
-  /**
-   * See {@link org.apache.lucene.search.Weight#getValueForNormalization()}
-   *
-   * @return the value for normalization
-   * @throws IOException on error
-   */
-  public abstract float getValueForNormalization() throws IOException;
-
-  /**
-   * See {@link org.apache.lucene.search.Weight#normalize(float,float)}
-   *
-   * @param queryNorm the query norm
-   * @param topLevelBoost the top level boost
-   */
-  public abstract void normalize(float queryNorm, float topLevelBoost);
-
-  /**
-   * A SpanSimilarity class that calculates similarity statistics based on the term statistics
-   * of a set of terms.
-   */
-  public static class ScoringSimilarity extends SpanSimilarity {
-
-    private final Similarity similarity;
-    private final Similarity.SimWeight stats;
-
-    private ScoringSimilarity(SpanQuery query, IndexSearcher searcher, TermStatistics... termStats) throws IOException {
-      super(query.getField());
-      this.similarity = searcher.getSimilarity();
-      this.stats = similarity.computeWeight(query.getBoost(), searcher.collectionStatistics(field), termStats);
-    }
-
-    @Override
-    public Similarity.SimScorer simScorer(LeafReaderContext context) throws IOException {
-      return similarity.simScorer(stats, context);
-    }
-
-    @Override
-    public String getField() {
-      return field;
-    }
-
-    @Override
-    public float getValueForNormalization() throws IOException {
-      return stats.getValueForNormalization();
-    }
-
-    @Override
-    public void normalize(float queryNorm, float topLevelBoost) {
-      stats.normalize(queryNorm, topLevelBoost);
-    }
-
-  }
-
-  /**
-   * A SpanSimilarity class that does no scoring
-   */
-  public static class NonScoringSimilarity extends SpanSimilarity {
-
-    private NonScoringSimilarity(String field) {
-      super(field);
-    }
-
-    @Override
-    public Similarity.SimScorer simScorer(LeafReaderContext context) throws IOException {
-      return null;
-    }
-
-    @Override
-    public float getValueForNormalization() throws IOException {
-      return 0;
-    }
-
-    @Override
-    public void normalize(float queryNorm, float topLevelBoost) {
-
-    }
-  }
-
-  /**
-   * Build a SpanSimilarity
-   * @param query the SpanQuery to be run
-   * @param searcher the searcher
-   * @param needsScores whether or not scores are required
-   * @param stats an array of TermStatistics to use in creating the similarity
-   * @return a SpanSimilarity, or null if there are no statistics to use
-   * @throws IOException on error
-   */
-  public static SpanSimilarity build(SpanQuery query, IndexSearcher searcher,
-                                     boolean needsScores, TermStatistics... stats) throws IOException {
-    return needsScores ? new ScoringSimilarity(query, searcher, stats) : new NonScoringSimilarity(query.getField());
-  }
-
-  /**
-   * Build a SpanSimilarity
-   * @param query the SpanQuery to be run
-   * @param searcher the searcher
-   * @param needsScores whether or not scores are required
-   * @param weights a set of {@link org.apache.lucene.search.spans.SpanWeight}s to extract terms from
-   * @return a SpanSimilarity, or null if there are no statistics to use
-   * @throws IOException on error
-   */
-  public static SpanSimilarity build(SpanQuery query, IndexSearcher searcher, boolean needsScores, List<SpanWeight> weights) throws IOException {
-    return build(query, searcher, needsScores, weights.toArray(new SpanWeight[weights.size()]));
-  }
-
-  /**
-   * Build a SpanSimilarity
-   * @param query the SpanQuery to run
-   * @param searcher the searcher
-   * @param needsScores whether or not scores are required
-   * @param weights an array of {@link org.apache.lucene.search.spans.SpanWeight}s to extract terms from
-   * @return a SpanSimilarity, or null if there are no statistics to use
-   * @throws IOException on error
-   */
-  public static SpanSimilarity build(SpanQuery query, IndexSearcher searcher, boolean needsScores, SpanWeight... weights) throws IOException {
-
-    if (!needsScores)
-      return new NonScoringSimilarity(query.getField());
-
-    Map<Term, TermContext> contexts = new HashMap<>();
-    for (SpanWeight w : weights) {
-      w.extractTermContexts(contexts);
-    }
-
-    if (contexts.size() == 0)
-      return null;
-
-    TermStatistics[] stats = new TermStatistics[contexts.size()];
-    int i = 0;
-    for (Term term : contexts.keySet()) {
-      stats[i] = searcher.termStatistics(term, contexts.get(term));
-      i++;
-    }
-
-    return new ScoringSimilarity(query, searcher, stats);
-  }
-
-}
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index f6647a1..6a0c9ce 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -17,8 +17,10 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
@@ -29,6 +31,7 @@
 import org.apache.lucene.util.ToStringUtils;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -37,11 +40,23 @@
  * This should not be used for terms that are indexed at position Integer.MAX_VALUE.
  */
 public class SpanTermQuery extends SpanQuery {
-  protected Term term;
+
+  protected final Term term;
+  protected final TermContext termContext;
 
   /** Construct a SpanTermQuery matching the named term's spans. */
   public SpanTermQuery(Term term) {
     this.term = Objects.requireNonNull(term);
+    this.termContext = null;
+  }
+
+  /**
+   * Expert: Construct a SpanTermQuery matching the named term's spans, using
+   * the provided TermContext
+   */
+  public SpanTermQuery(Term term, TermContext context) {
+    this.term = Objects.requireNonNull(term);
+    this.termContext = context;
   }
 
   /** Return the term whose spans are matched. */
@@ -52,18 +67,25 @@
 
   @Override
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
-    TermContext context = TermContext.build(searcher.getTopReaderContext(), term);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, searcher.termStatistics(term, context));
-    return new SpanTermWeight(context, similarity, factory);
+    final TermContext context;
+    final IndexReaderContext topContext = searcher.getTopReaderContext();
+    if (termContext == null || termContext.topReaderContext != topContext) {
+      context = TermContext.build(topContext, term);
+    }
+    else {
+      context = termContext;
+    }
+    return new SpanTermWeight(context, searcher, needsScores ? Collections.singletonMap(term, context) : null, factory);
   }
 
   public class SpanTermWeight extends SpanWeight {
 
     final TermContext termContext;
 
-    public SpanTermWeight(TermContext termContext, SpanSimilarity similarity, SpanCollectorFactory factory) throws IOException {
-      super(SpanTermQuery.this, similarity, factory);
+    public SpanTermWeight(TermContext termContext, IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory) throws IOException {
+      super(SpanTermQuery.this, searcher, terms, factory);
       this.termContext = termContext;
+      assert termContext != null : "TermContext must not be null";
     }
 
     @Override
@@ -79,8 +101,11 @@
     @Override
     public Spans getSpans(final LeafReaderContext context, Bits acceptDocs, SpanCollector collector) throws IOException {
 
+      assert termContext.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termContext.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
+
       final TermState state = termContext.get(context.ord);
       if (state == null) { // term is not present in that reader
+        assert context.reader().docFreq(term) == 0 : "no termstate found but term exists in reader term=" + term;
         return null;
       }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 45d5a9a..8e360ed 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -21,9 +21,13 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.Bits;
 
@@ -35,20 +39,39 @@
  */
 public abstract class SpanWeight extends Weight {
 
-  protected final SpanSimilarity similarity;
+  protected final Similarity similarity;
+  protected final Similarity.SimWeight simWeight;
   protected final SpanCollectorFactory collectorFactory;
+  protected final String field;
 
   /**
    * Create a new SpanWeight
    * @param query the parent query
-   * @param similarity a SpanSimilarity to be used for scoring
+   * @param searcher the IndexSearcher to query against
+   * @param termContexts a map of terms to termcontexts for use in building the similarity.  May
+   *                     be null if scores are not required
    * @param collectorFactory a SpanCollectorFactory to be used for Span collection
    * @throws IOException on error
    */
-  public SpanWeight(SpanQuery query, SpanSimilarity similarity, SpanCollectorFactory collectorFactory) throws IOException {
+  public SpanWeight(SpanQuery query, IndexSearcher searcher, Map<Term, TermContext> termContexts, SpanCollectorFactory collectorFactory) throws IOException {
     super(query);
-    this.similarity = similarity;
+    this.field = query.getField();
+    this.similarity = searcher.getSimilarity();
     this.collectorFactory = collectorFactory;
+    this.simWeight = buildSimWeight(query, searcher, termContexts);
+  }
+
+  private Similarity.SimWeight buildSimWeight(SpanQuery query, IndexSearcher searcher, Map<Term, TermContext> termContexts) throws IOException {
+    if (termContexts == null || termContexts.size() == 0 || query.getField() == null)
+      return null;
+    TermStatistics[] termStats = new TermStatistics[termContexts.size()];
+    int i = 0;
+    for (Term term : termContexts.keySet()) {
+      termStats[i] = searcher.termStatistics(term, termContexts.get(term));
+      i++;
+    }
+    CollectionStatistics collectionStats = searcher.collectionStatistics(query.getField());
+    return searcher.getSimilarity().computeWeight(query.getBoost(), collectionStats, termStats);
   }
 
   /**
@@ -81,27 +104,28 @@
 
   @Override
   public float getValueForNormalization() throws IOException {
-    return similarity == null ? 1.0f : similarity.getValueForNormalization();
+    return simWeight == null ? 1.0f : simWeight.getValueForNormalization();
   }
 
   @Override
   public void normalize(float queryNorm, float topLevelBoost) {
-    if (similarity != null) {
-      similarity.normalize(queryNorm, topLevelBoost);
+    if (simWeight != null) {
+      simWeight.normalize(queryNorm, topLevelBoost);
     }
   }
 
   @Override
   public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
-    if (similarity == null) {
+    if (field == null) {
       return null;
     }
-    Terms terms = context.reader().terms(similarity.getField());
+    Terms terms = context.reader().terms(field);
     if (terms != null && terms.hasPositions() == false) {
-      throw new IllegalStateException("field \"" + similarity.getField() + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
+      throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
     }
     Spans spans = getSpans(context, acceptDocs, collectorFactory.newCollector());
-    return (spans == null) ? null : new SpanScorer(spans, this, similarity.simScorer(context));
+    Similarity.SimScorer simScorer = simWeight == null ? null : similarity.simScorer(simWeight, context);
+    return (spans == null) ? null : new SpanScorer(spans, this, simScorer);
   }
 
   @Override
@@ -111,7 +135,7 @@
       int newDoc = scorer.advance(doc);
       if (newDoc == doc) {
         float freq = scorer.sloppyFreq();
-        SimScorer docScorer = similarity.simScorer(context);
+        SimScorer docScorer = similarity.simScorer(simWeight, context);
         Explanation freqExplanation = Explanation.match(freq, "phraseFreq=" + freq);
         Explanation scoreExplanation = docScorer.explain(doc, freqExplanation);
         return Explanation.match(scoreExplanation.getValue(),
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWithinQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWithinQuery.java
index 7b13d81..3b7ef38 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWithinQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWithinQuery.java
@@ -18,11 +18,14 @@
  */
 
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Map;
 
 /** Keep matches that are contained within another Spans. */
 public class SpanWithinQuery extends SpanContainQuery {
@@ -52,15 +55,15 @@
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
     SpanWeight bigWeight = big.createWeight(searcher, false, factory);
     SpanWeight littleWeight = little.createWeight(searcher, false, factory);
-    SpanSimilarity similarity = SpanSimilarity.build(this, searcher, needsScores, bigWeight, littleWeight);
-    return new SpanWithinWeight(similarity, factory, bigWeight, littleWeight);
+    return new SpanWithinWeight(searcher, needsScores ? getTermContexts(bigWeight, littleWeight) : null,
+                                      factory, bigWeight, littleWeight);
   }
 
   public class SpanWithinWeight extends SpanContainWeight {
 
-    public SpanWithinWeight(SpanSimilarity similarity, SpanCollectorFactory factory,
+    public SpanWithinWeight(IndexSearcher searcher, Map<Term, TermContext> terms, SpanCollectorFactory factory,
                             SpanWeight bigWeight, SpanWeight littleWeight) throws IOException {
-      super(similarity, factory, bigWeight, littleWeight);
+      super(searcher, terms, factory, bigWeight, littleWeight);
     }
 
     /**
diff --git a/lucene/core/src/java/org/apache/lucene/store/BaseDirectory.java b/lucene/core/src/java/org/apache/lucene/store/BaseDirectory.java
index 616c90e..9950465 100644
--- a/lucene/core/src/java/org/apache/lucene/store/BaseDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/BaseDirectory.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
 
 /**
  * Base implementation for a concrete {@link Directory} that uses a {@link LockFactory} for locking.
@@ -40,8 +41,8 @@
   }
 
   @Override
-  public final Lock makeLock(String name) {
-    return lockFactory.makeLock(this, name);
+  public final Lock obtainLock(String name) throws IOException {
+    return lockFactory.obtainLock(this, name);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/store/Directory.java b/lucene/core/src/java/org/apache/lucene/store/Directory.java
index 3ab6dbf..ac2f5fc 100644
--- a/lucene/core/src/java/org/apache/lucene/store/Directory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/Directory.java
@@ -50,8 +50,7 @@
   public abstract String[] listAll() throws IOException;
 
   /** Removes an existing file in the directory. */
-  public abstract void deleteFile(String name)
-       throws IOException;
+  public abstract void deleteFile(String name) throws IOException;
 
   /**
    * Returns the length of a file in the directory. This method follows the
@@ -110,10 +109,14 @@
     return new BufferedChecksumIndexInput(openInput(name, context));
   }
   
-  /** Construct a {@link Lock}.
+  /** 
+   * Returns an obtained {@link Lock}.
    * @param name the name of the lock file
+   * @throws LockObtainFailedException (optional specific exception) if the lock could
+   *         not be obtained because it is currently held elsewhere.
+   * @throws IOException if any i/o error occurs attempting to gain the lock
    */
-  public abstract Lock makeLock(String name);
+  public abstract Lock obtainLock(String name) throws IOException;
 
   /** Closes the store. */
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/store/FSLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/FSLockFactory.java
index e49ef18..d666075 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FSLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FSLockFactory.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 /**
  * Base class for file system based locking implementation.
  * This class is explicitly checking that the passed {@link Directory}
@@ -32,14 +34,17 @@
   }
 
   @Override
-  public final Lock makeLock(Directory dir, String lockName) {
+  public final Lock obtainLock(Directory dir, String lockName) throws IOException {
     if (!(dir instanceof FSDirectory)) {
       throw new UnsupportedOperationException(getClass().getSimpleName() + " can only be used with FSDirectory subclasses, got: " + dir);
     }
-    return makeFSLock((FSDirectory) dir, lockName);
+    return obtainFSLock((FSDirectory) dir, lockName);
   }
   
-  /** Implement this method to create a lock for a FSDirectory instance. */
-  protected abstract Lock makeFSLock(FSDirectory dir, String lockName);
+  /** 
+   * Implement this method to obtain a lock for a FSDirectory instance. 
+   * @throws IOException if the lock could not be obtained.
+   */
+  protected abstract Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException;
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
index 7a42087..24750fa 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
@@ -71,8 +71,8 @@
   }
   
   @Override
-  public Lock makeLock(String name) {
-    return getDirectory(name).makeLock(name);
+  public Lock obtainLock(String name) throws IOException {
+    return getDirectory(name).obtainLock(name);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
index 765b5c2..9364242 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
@@ -90,8 +90,8 @@
   }
 
   @Override
-  public Lock makeLock(String name) {
-    return in.makeLock(name);
+  public Lock obtainLock(String name) throws IOException {
+    return in.obtainLock(name);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/store/Lock.java b/lucene/core/src/java/org/apache/lucene/store/Lock.java
index a59c59b..70b855b 100644
--- a/lucene/core/src/java/org/apache/lucene/store/Lock.java
+++ b/lucene/core/src/java/org/apache/lucene/store/Lock.java
@@ -20,126 +20,39 @@
 import java.io.Closeable;
 import java.io.IOException;
 
-import org.apache.lucene.util.ThreadInterruptedException;
-
 /** An interprocess mutex lock.
  * <p>Typical use might look like:<pre class="prettyprint">
- * new Lock.With(directory.makeLock("my.lock")) {
- *     public Object doBody() {
- *       <i>... code to execute while locked ...</i>
- *     }
- *   }.run();
+ *   try (final Lock lock = directory.obtainLock("my.lock")) {
+ *     // ... code to execute while locked ...
+ *   }
  * </pre>
  *
- * @see Directory#makeLock(String)
+ * @see Directory#obtainLock(String)
  *
  * @lucene.internal
  */
 public abstract class Lock implements Closeable {
 
-  /** How long {@link #obtain(long)} waits, in milliseconds,
-   *  in between attempts to acquire the lock. */
-  public static long LOCK_POLL_INTERVAL = 1000;
-
-  /** Pass this value to {@link #obtain(long)} to try
-   *  forever to obtain the lock. */
-  public static final long LOCK_OBTAIN_WAIT_FOREVER = -1;
-
-  /** Attempts to obtain exclusive access and immediately return
-   *  upon success or failure.  Use {@link #close} to
-   *  release the lock.
-   * @return true iff exclusive access is obtained
+  /** 
+   * Releases exclusive access.
+   * <p>
+   * Note that exceptions thrown from close may require
+   * human intervention, as it may mean the lock was no
+   * longer valid, or that fs permissions prevent removal
+   * of the lock file, or other reasons.
+   * <p>
+   * {@inheritDoc} 
+   * @throws LockReleaseFailedException optional specific exception) if 
+   *         the lock could not be properly released.
    */
-  public abstract boolean obtain() throws IOException;
-
-  /**
-   * If a lock obtain called, this failureReason may be set
-   * with the "root cause" Exception as to why the lock was
-   * not obtained.
-   */
-  protected Throwable failureReason;
-
-  /** Attempts to obtain an exclusive lock within amount of
-   *  time given. Polls once per {@link #LOCK_POLL_INTERVAL}
-   *  (currently 1000) milliseconds until lockWaitTimeout is
-   *  passed.
-   * @param lockWaitTimeout length of time to wait in
-   *        milliseconds or {@link
-   *        #LOCK_OBTAIN_WAIT_FOREVER} to retry forever
-   * @return true if lock was obtained
-   * @throws LockObtainFailedException if lock wait times out
-   * @throws IllegalArgumentException if lockWaitTimeout is
-   *         out of bounds
-   * @throws IOException if obtain() throws IOException
-   */
-  public final boolean obtain(long lockWaitTimeout) throws IOException {
-    failureReason = null;
-    boolean locked = obtain();
-    if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER)
-      throw new IllegalArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
-
-    long maxSleepCount = lockWaitTimeout / LOCK_POLL_INTERVAL;
-    long sleepCount = 0;
-    while (!locked) {
-      if (lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER && sleepCount++ >= maxSleepCount) {
-        String reason = "Lock obtain timed out: " + this.toString();
-        if (failureReason != null) {
-          reason += ": " + failureReason;
-        }
-        throw new LockObtainFailedException(reason, failureReason);
-      }
-      try {
-        Thread.sleep(LOCK_POLL_INTERVAL);
-      } catch (InterruptedException ie) {
-        throw new ThreadInterruptedException(ie);
-      }
-      locked = obtain();
-    }
-    return locked;
-  }
-
-  /** Releases exclusive access. */
   public abstract void close() throws IOException;
-
-  /** Returns true if the resource is currently locked.  Note that one must
-   * still call {@link #obtain()} before using the resource. */
-  public abstract boolean isLocked() throws IOException;
-
-
-  /** Utility class for executing code with exclusive access. */
-  public abstract static class With {
-    private Lock lock;
-    private long lockWaitTimeout;
-
-
-    /** Constructs an executor that will grab the named lock. */
-    public With(Lock lock, long lockWaitTimeout) {
-      this.lock = lock;
-      this.lockWaitTimeout = lockWaitTimeout;
-    }
-
-    /** Code to execute with exclusive access. */
-    protected abstract Object doBody() throws IOException;
-
-    /** Calls {@link #doBody} while <i>lock</i> is obtained.  Blocks if lock
-     * cannot be obtained immediately.  Retries to obtain lock once per second
-     * until it is obtained, or until it has tried ten times. Lock is released when
-     * {@link #doBody} exits.
-     * @throws LockObtainFailedException if lock could not
-     * be obtained
-     * @throws IOException if {@link Lock#obtain} throws IOException
-     */
-    public Object run() throws IOException {
-      boolean locked = false;
-      try {
-         locked = lock.obtain(lockWaitTimeout);
-         return doBody();
-      } finally {
-        if (locked) {
-          lock.close();
-        }
-      }
-    }
-  }
-
+  
+  /** 
+   * Best effort check that this lock is still valid. Locks
+   * could become invalidated externally for a number of reasons,
+   * for example if a user deletes the lock file manually or
+   * when a network filesystem is in use. 
+   * @throws IOException if the lock is no longer valid.
+   */
+  public abstract void ensureValid() throws IOException;
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/LockFactory.java b/lucene/core/src/java/org/apache/lucene/store/LockFactory.java
index 3a6632f..ca92590 100644
--- a/lucene/core/src/java/org/apache/lucene/store/LockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/LockFactory.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
 
 /**
  * <p>Base class for Locking implementation.  {@link Directory} uses
@@ -46,9 +47,12 @@
 public abstract class LockFactory {
 
   /**
-   * Return a new Lock instance identified by lockName.
+   * Return a new obtained Lock instance identified by lockName.
    * @param lockName name of the lock to be created.
+   * @throws LockObtainFailedException (optional specific exception) if the lock could
+   *         not be obtained because it is currently held elsewhere.
+   * @throws IOException if any i/o error occurs attempting to gain the lock
    */
-  public abstract Lock makeLock(Directory dir, String lockName);
+  public abstract Lock obtainLock(Directory dir, String lockName) throws IOException;
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/LockObtainFailedException.java b/lucene/core/src/java/org/apache/lucene/store/LockObtainFailedException.java
index 14b0b54..dedfea1 100644
--- a/lucene/core/src/java/org/apache/lucene/store/LockObtainFailedException.java
+++ b/lucene/core/src/java/org/apache/lucene/store/LockObtainFailedException.java
@@ -24,7 +24,7 @@
  * could not be acquired.  This
  * happens when a writer tries to open an index
  * that another writer already has open.
- * @see Lock#obtain(long)
+ * @see LockFactory#obtainLock(Directory, String)
  */
 public class LockObtainFailedException extends IOException {
   public LockObtainFailedException(String message) {
diff --git a/lucene/core/src/java/org/apache/lucene/store/LockStressTest.java b/lucene/core/src/java/org/apache/lucene/store/LockStressTest.java
index 7726734..8c2d8a8 100644
--- a/lucene/core/src/java/org/apache/lucene/store/LockStressTest.java
+++ b/lucene/core/src/java/org/apache/lucene/store/LockStressTest.java
@@ -38,10 +38,12 @@
  */ 
 
 public class LockStressTest {
+  
+  static final String LOCK_FILE_NAME = "test.lock";
 
   @SuppressForbidden(reason = "System.out required: command line tool")
+  @SuppressWarnings("try")
   public static void main(String[] args) throws Exception {
-
     if (args.length != 7) {
       System.out.println("Usage: java org.apache.lucene.store.LockStressTest myID verifierHost verifierPort lockFactoryClassName lockDirName sleepTimeMS count\n" +
                          "\n" +
@@ -91,7 +93,6 @@
       out.write(myID);
       out.flush();
       LockFactory verifyLF = new VerifyingLockFactory(lockFactory, in, out);
-      Lock l = verifyLF.makeLock(lockDir, "test.lock");
       final Random rnd = new Random();
       
       // wait for starting gun
@@ -100,25 +101,22 @@
       }
       
       for (int i = 0; i < count; i++) {
-        boolean obtained = false;
-        try {
-          obtained = l.obtain(rnd.nextInt(100) + 10);
-        } catch (LockObtainFailedException e) {}
-        
-        if (obtained) {
+        try (final Lock l = verifyLF.obtainLock(lockDir, LOCK_FILE_NAME)) {
           if (rnd.nextInt(10) == 0) {
             if (rnd.nextBoolean()) {
               verifyLF = new VerifyingLockFactory(getNewLockFactory(lockFactoryClassName), in, out);
             }
-            final Lock secondLock = verifyLF.makeLock(lockDir, "test.lock");
-            if (secondLock.obtain()) {
-              throw new IOException("Double Obtain");
+            try (final Lock secondLock = verifyLF.obtainLock(lockDir, LOCK_FILE_NAME)) {
+              throw new IOException("Double obtain");
+            } catch (LockObtainFailedException loe) {
+              // pass
             }
           }
           Thread.sleep(sleepTimeMS);
-          l.close();
+        } catch (LockObtainFailedException loe) {
+          // obtain failed
         }
-        
+
         if (i % 500 == 0) {
           System.out.println((i * 100. / count) + "% done.");
         }
diff --git a/lucene/core/src/java/org/apache/lucene/store/LockValidatingDirectoryWrapper.java b/lucene/core/src/java/org/apache/lucene/store/LockValidatingDirectoryWrapper.java
new file mode 100644
index 0000000..389c56d
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/store/LockValidatingDirectoryWrapper.java
@@ -0,0 +1,64 @@
+package org.apache.lucene.store;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+
+/** 
+ * This class makes a best-effort check that a provided {@link Lock}
+ * is valid before any destructive filesystem operation.
+ */
+public final class LockValidatingDirectoryWrapper extends FilterDirectory {
+  private final Lock writeLock;
+
+  public LockValidatingDirectoryWrapper(Directory in, Lock writeLock) {
+    super(in);
+    this.writeLock = writeLock;
+  }
+
+  @Override
+  public void deleteFile(String name) throws IOException {
+    writeLock.ensureValid();
+    in.deleteFile(name);
+  }
+
+  @Override
+  public IndexOutput createOutput(String name, IOContext context) throws IOException {
+    writeLock.ensureValid();
+    return in.createOutput(name, context);
+  }
+
+  @Override
+  public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException {
+    writeLock.ensureValid();
+    in.copyFrom(from, src, dest, context);
+  }
+
+  @Override
+  public void renameFile(String source, String dest) throws IOException {
+    writeLock.ensureValid();
+    in.renameFile(source, dest);
+  }
+
+  @Override
+  public void sync(Collection<String> names) throws IOException {
+    writeLock.ensureValid();
+    in.sync(names);
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java
index e14ed13..7ab06ae 100644
--- a/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java
@@ -19,10 +19,11 @@
 
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileLock;
-import java.nio.channels.OverlappingFileLockException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashSet;
@@ -78,136 +79,127 @@
    */
   public static final NativeFSLockFactory INSTANCE = new NativeFSLockFactory();
 
+  private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
+
   private NativeFSLockFactory() {}
 
   @Override
-  protected Lock makeFSLock(FSDirectory dir, String lockName) {
-    return new NativeFSLock(dir.getDirectory(), lockName);
-  }
-  
-  static final class NativeFSLock extends Lock {
+  protected Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException {
+    Path lockDir = dir.getDirectory();
+    
+    // Ensure that lockDir exists and is a directory.
+    // note: this will fail if lockDir is a symlink
+    Files.createDirectories(lockDir);
+    
+    Path lockFile = lockDir.resolve(lockName);
 
-    private FileChannel channel;
-    private FileLock lock;
-    private Path path;
-    private Path lockDir;
-    private static final Set<String> LOCK_HELD = Collections.synchronizedSet(new HashSet<String>());
-
-
-    public NativeFSLock(Path lockDir, String lockFileName) {
-      this.lockDir = lockDir;
-      path = lockDir.resolve(lockFileName);
+    try {
+      Files.createFile(lockFile);
+    } catch (IOException ignore) {
+      // we must create the file to have a truly canonical path.
+      // if it's already created, we don't care. if it cant be created, it will fail below.
     }
-
-
-    @Override
-    public synchronized boolean obtain() throws IOException {
-
-      if (lock != null) {
-        // Our instance is already locked:
-        return false;
-      }
-
-      // Ensure that lockDir exists and is a directory.
-      Files.createDirectories(lockDir);
+    
+    // fails if the lock file does not exist
+    final Path realPath = lockFile.toRealPath();
+    
+    // used as a best-effort check, to see if the underlying file has changed
+    final FileTime creationTime = Files.readAttributes(realPath, BasicFileAttributes.class).creationTime();
+    
+    if (LOCK_HELD.add(realPath.toString())) {
+      FileChannel channel = null;
+      FileLock lock = null;
       try {
-        Files.createFile(path);
-      } catch (IOException ignore) {
-        // we must create the file to have a truly canonical path.
-        // if it's already created, we don't care. if it cant be created, it will fail below.
-      }
-      final Path canonicalPath = path.toRealPath();
-      // Make sure nobody else in-process has this lock held
-      // already, and, mark it held if not:
-      // This is a pretty crazy workaround for some documented
-      // but yet awkward JVM behavior:
-      //
-      //   On some systems, closing a channel releases all locks held by the Java virtual machine on the underlying file
-      //   regardless of whether the locks were acquired via that channel or via another channel open on the same file.
-      //   It is strongly recommended that, within a program, a unique channel be used to acquire all locks on any given
-      //   file.
-      //
-      // This essentially means if we close "A" channel for a given file all locks might be released... the odd part
-      // is that we can't re-obtain the lock in the same JVM but from a different process if that happens. Nevertheless
-      // this is super trappy. See LUCENE-5738
-      boolean obtained = false;
-      if (LOCK_HELD.add(canonicalPath.toString())) {
-        try {
-          channel = FileChannel.open(path, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
-          try {
-            lock = channel.tryLock();
-            obtained = lock != null;
-          } catch (IOException | OverlappingFileLockException e) {
-            // At least on OS X, we will sometimes get an
-            // intermittent "Permission Denied" IOException,
-            // which seems to simply mean "you failed to get
-            // the lock".  But other IOExceptions could be
-            // "permanent" (eg, locking is not supported via
-            // the filesystem).  So, we record the failure
-            // reason here; the timeout obtain (usually the
-            // one calling us) will use this as "root cause"
-            // if it fails to get the lock.
-            failureReason = e;
-          }
-        } finally {
-          if (obtained == false) { // not successful - clear up and move out
-            clearLockHeld(path);
-            final FileChannel toClose = channel;
-            channel = null;
-            IOUtils.closeWhileHandlingException(toClose);
-          }
+        channel = FileChannel.open(realPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
+        lock = channel.tryLock();
+        if (lock != null) {
+          return new NativeFSLock(lock, channel, realPath, creationTime);
+        } else {
+          throw new LockObtainFailedException("Lock held by another program: " + realPath);
+        }
+      } finally {
+        if (lock == null) { // not successful - clear up and move out
+          IOUtils.closeWhileHandlingException(channel); // TODO: addSuppressed
+          clearLockHeld(realPath);  // clear LOCK_HELD last 
         }
       }
-      return obtained;
+    } else {
+      throw new LockObtainFailedException("Lock held by this virtual machine: " + realPath);
+    }
+  }
+  
+  private static final void clearLockHeld(Path path) throws IOException {
+    boolean remove = LOCK_HELD.remove(path.toString());
+    if (remove == false) {
+      throw new AlreadyClosedException("Lock path was cleared but never marked as held: " + path);
+    }
+  }
+
+  // TODO: kind of bogus we even pass channel:
+  // FileLock has an accessor, but mockfs doesnt yet mock the locks, too scary atm.
+
+  static final class NativeFSLock extends Lock {
+    final FileLock lock;
+    final FileChannel channel;
+    final Path path;
+    final FileTime creationTime;
+    volatile boolean closed;
+    
+    NativeFSLock(FileLock lock, FileChannel channel, Path path, FileTime creationTime) {
+      this.lock = lock;
+      this.channel = channel;
+      this.path = path;
+      this.creationTime = creationTime;
+    }
+
+    @Override
+    public void ensureValid() throws IOException {
+      if (closed) {
+        throw new AlreadyClosedException("Lock instance already released: " + this);
+      }
+      // check we are still in the locks map (some debugger or something crazy didn't remove us)
+      if (!LOCK_HELD.contains(path.toString())) {
+        throw new AlreadyClosedException("Lock path unexpectedly cleared from map: " + this);
+      }
+      // check our lock wasn't invalidated.
+      if (!lock.isValid()) {
+        throw new AlreadyClosedException("FileLock invalidated by an external force: " + this);
+      }
+      // try to validate the underlying file descriptor.
+      // this will throw IOException if something is wrong.
+      long size = channel.size();
+      if (size != 0) {
+        throw new AlreadyClosedException("Unexpected lock file size: " + size + ", (lock=" + this + ")");
+      }
+      // try to validate the backing file name, that it still exists,
+      // and has the same creation time as when we obtained the lock. 
+      // if it differs, someone deleted our lock file (and we are ineffective)
+      FileTime ctime = Files.readAttributes(path, BasicFileAttributes.class).creationTime(); 
+      if (!creationTime.equals(ctime)) {
+        throw new AlreadyClosedException("Underlying file changed by an external force at " + creationTime + ", (lock=" + this + ")");
+      }
     }
 
     @Override
     public synchronized void close() throws IOException {
-      try {
-        if (lock != null) {
-          try {
-            lock.release();
-            lock = null;
-          } finally {
-            clearLockHeld(path);
-          }
-        }
-      } finally {
-        IOUtils.close(channel);
-        channel = null;
+      if (closed) {
+        return;
       }
-    }
-
-    private static final void clearLockHeld(Path path) throws IOException {
-      path = path.toRealPath();
-      boolean remove = LOCK_HELD.remove(path.toString());
-      assert remove : "Lock was cleared but never marked as held";
-    }
-
-    @Override
-    public synchronized boolean isLocked() {
-      // The test for is isLocked is not directly possible with native file locks:
-      
-      // First a shortcut, if a lock reference in this instance is available
-      if (lock != null) return true;
-      
-      // Look if lock file is definitely not present; if not, there can definitely be no lock!
-      if (Files.notExists(path)) return false;
-      
-      // Try to obtain and release (if was locked) the lock
-      try {
-        boolean obtained = obtain();
-        if (obtained) close();
-        return !obtained;
-      } catch (IOException ioe) {
-        return false;
-      }    
+      // NOTE: we don't validate, as unlike SimpleFSLockFactory, we can't break others locks
+      // first release the lock, then the channel
+      try (FileChannel channel = this.channel;
+           FileLock lock = this.lock) {
+        assert lock != null;
+        assert channel != null;
+      } finally {
+        closed = true;
+        clearLockHeld(path);
+      }
     }
 
     @Override
     public String toString() {
-      return "NativeFSLock@" + path;
+      return "NativeFSLock(path=" + path + ",impl=" + lock + ",ctime=" + creationTime + ")"; 
     }
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/NoLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/NoLockFactory.java
index a5417df..7a209c5 100644
--- a/lucene/core/src/java/org/apache/lucene/store/NoLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/NoLockFactory.java
@@ -37,23 +37,17 @@
   private NoLockFactory() {}
 
   @Override
-  public Lock makeLock(Directory dir, String lockName) {
+  public Lock obtainLock(Directory dir, String lockName) {
     return SINGLETON_LOCK;
   }
   
   private static class NoLock extends Lock {
     @Override
-    public boolean obtain() throws IOException {
-      return true;
-    }
-
-    @Override
     public void close() {
     }
 
     @Override
-    public boolean isLocked() {
-      return false;
+    public void ensureValid() throws IOException {
     }
 
     @Override
@@ -61,5 +55,4 @@
       return "NoLock";
     }
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/SimpleFSLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
index 87e1080..0985ef6 100644
--- a/lucene/core/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/SimpleFSLockFactory.java
@@ -17,26 +17,24 @@
  * limitations under the License.
  */
 
-import java.io.File;
 import java.io.IOException;
+import java.nio.file.AccessDeniedException;
+import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
 
 /**
  * <p>Implements {@link LockFactory} using {@link
  * Files#createFile}.</p>
  *
- * <p><b>NOTE:</b> the {@linkplain File#createNewFile() javadocs
- * for <code>File.createNewFile()</code>} contain a vague
- * yet spooky warning about not using the API for file
- * locking.  This warning was added due to <a target="_top"
- * href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4676183">this
- * bug</a>, and in fact the only known problem with using
- * this API for locking is that the Lucene write lock may
- * not be released when the JVM exits abnormally.</p>
-
- * <p>When this happens, a {@link LockObtainFailedException}
- * is hit when trying to create a writer, in which case you
+ * <p>The main downside with using this API for locking is 
+ * that the Lucene write lock may not be released when 
+ * the JVM exits abnormally.</p>
+ *
+ * <p>When this happens, an {@link LockObtainFailedException}
+ * is hit when trying to create a writer, in which case you may
  * need to explicitly clear the lock file first by
  * manually removing the file.  But, first be certain that
  * no writer is in fact writing to the index otherwise you
@@ -70,55 +68,83 @@
   private SimpleFSLockFactory() {}
 
   @Override
-  protected Lock makeFSLock(FSDirectory dir, String lockName) {
-    return new SimpleFSLock(dir.getDirectory(), lockName);
+  protected Lock obtainFSLock(FSDirectory dir, String lockName) throws IOException {
+    Path lockDir = dir.getDirectory();
+    
+    // Ensure that lockDir exists and is a directory.
+    // note: this will fail if lockDir is a symlink
+    Files.createDirectories(lockDir);
+    
+    Path lockFile = lockDir.resolve(lockName);
+    
+    // create the file: this will fail if it already exists
+    try {
+      Files.createFile(lockFile);
+    } catch (FileAlreadyExistsException | AccessDeniedException e) {
+      // convert optional specific exception to our optional specific exception
+      throw new LockObtainFailedException("Lock held elsewhere: " + lockFile, e);
+    }
+    
+    // used as a best-effort check, to see if the underlying file has changed
+    final FileTime creationTime = Files.readAttributes(lockFile, BasicFileAttributes.class).creationTime();
+    
+    return new SimpleFSLock(lockFile, creationTime);
   }
   
-  static class SimpleFSLock extends Lock {
+  static final class SimpleFSLock extends Lock {
+    private final Path path;
+    private final FileTime creationTime;
+    private volatile boolean closed;
 
-    Path lockFile;
-    Path lockDir;
-
-    public SimpleFSLock(Path lockDir, String lockFileName) {
-      this.lockDir = lockDir;
-      lockFile = lockDir.resolve(lockFileName);
+    SimpleFSLock(Path path, FileTime creationTime) throws IOException {
+      this.path = path;
+      this.creationTime = creationTime;
     }
 
     @Override
-    public boolean obtain() throws IOException {
-      try {
-        Files.createDirectories(lockDir);
-        Files.createFile(lockFile);
-        return true;
-      } catch (IOException ioe) {
-        // On Windows, on concurrent createNewFile, the 2nd process gets "access denied".
-        // In that case, the lock was not aquired successfully, so return false.
-        // We record the failure reason here; the obtain with timeout (usually the
-        // one calling us) will use this as "root cause" if it fails to get the lock.
-        failureReason = ioe;
-        return false;
+    public void ensureValid() throws IOException {
+      if (closed) {
+        throw new AlreadyClosedException("Lock instance already released: " + this);
+      }
+      // try to validate the backing file name, that it still exists,
+      // and has the same creation time as when we obtained the lock. 
+      // if it differs, someone deleted our lock file (and we are ineffective)
+      FileTime ctime = Files.readAttributes(path, BasicFileAttributes.class).creationTime(); 
+      if (!creationTime.equals(ctime)) {
+        throw new AlreadyClosedException("Underlying file changed by an external force at " + creationTime + ", (lock=" + this + ")");
       }
     }
 
     @Override
-    public void close() throws LockReleaseFailedException {
-      // TODO: wierd that clearLock() throws the raw IOException...
-      try {
-        Files.deleteIfExists(lockFile);
-      } catch (Throwable cause) {
-        throw new LockReleaseFailedException("failed to delete " + lockFile, cause);
+    public synchronized void close() throws IOException {
+      if (closed) {
+        return;
       }
-    }
-
-    @Override
-    public boolean isLocked() {
-      return Files.exists(lockFile);
+      try {
+        // NOTE: unlike NativeFSLockFactory, we can potentially delete someone else's
+        // lock if things have gone wrong. we do best-effort check (ensureValid) to
+        // avoid doing this.
+        try {
+          ensureValid();
+        } catch (Throwable exc) {
+          // notify the user they may need to intervene.
+          throw new LockReleaseFailedException("Lock file cannot be safely removed. Manual intervention is recommended.", exc);
+        }
+        // we did a best effort check, now try to remove the file. if something goes wrong,
+        // we need to make it clear to the user that the directory may still remain locked.
+        try {
+          Files.delete(path);
+        } catch (Throwable exc) {
+          throw new LockReleaseFailedException("Unable to remove lock file. Manual intervention is recommended", exc);
+        }
+      } finally {
+        closed = true;
+      }
     }
 
     @Override
     public String toString() {
-      return "SimpleFSLock@" + lockFile;
+      return "SimpleFSLock(path=" + path + ",ctime=" + creationTime + ")";
     }
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
index a376cbb..68d3f34 100644
--- a/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
@@ -24,7 +24,7 @@
  * Implements {@link LockFactory} for a single in-process instance,
  * meaning all locking will take place through this one instance.
  * Only use this {@link LockFactory} when you are certain all
- * IndexReaders and IndexWriters for a given index are running
+ * IndexWriters for a given index are running
  * against a single shared in-process Directory instance.  This is
  * currently the default locking for RAMDirectory.
  *
@@ -33,41 +33,53 @@
 
 public final class SingleInstanceLockFactory extends LockFactory {
 
-  private final HashSet<String> locks = new HashSet<>();
+  final HashSet<String> locks = new HashSet<>();
 
   @Override
-  public Lock makeLock(Directory dir, String lockName) {
-    return new SingleInstanceLock(locks, lockName);
+  public Lock obtainLock(Directory dir, String lockName) throws IOException {
+    synchronized (locks) {
+      if (locks.add(lockName)) {
+        return new SingleInstanceLock(lockName);
+      } else {
+        throw new LockObtainFailedException("lock instance already obtained: (dir=" + dir + ", lockName=" + lockName + ")");
+      }
+    }
   }
 
-  private static class SingleInstanceLock extends Lock {
-
+  private class SingleInstanceLock extends Lock {
     private final String lockName;
-    private final HashSet<String> locks;
+    private volatile boolean closed;
 
-    public SingleInstanceLock(HashSet<String> locks, String lockName) {
-      this.locks = locks;
+    public SingleInstanceLock(String lockName) {
       this.lockName = lockName;
     }
 
     @Override
-    public boolean obtain() throws IOException {
-      synchronized(locks) {
-        return locks.add(lockName);
+    public void ensureValid() throws IOException {
+      if (closed) {
+        throw new AlreadyClosedException("Lock instance already released: " + this);
+      }
+      // check we are still in the locks map (some debugger or something crazy didn't remove us)
+      synchronized (locks) {
+        if (!locks.contains(lockName)) {
+          throw new AlreadyClosedException("Lock instance was invalidated from map: " + this);
+        }
       }
     }
 
     @Override
-    public void close() {
-      synchronized(locks) {
-        locks.remove(lockName);
+    public synchronized void close() throws IOException {
+      if (closed) {
+        return;
       }
-    }
-
-    @Override
-    public boolean isLocked() {
-      synchronized(locks) {
-        return locks.contains(lockName);
+      try {
+        synchronized (locks) {
+          if (!locks.remove(lockName)) {
+            throw new AlreadyClosedException("Lock was already released: " + this);
+          }
+        }
+      } finally {
+        closed = true;
       }
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/store/VerifyingLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/VerifyingLockFactory.java
index 2dafe9a..3790673 100644
--- a/lucene/core/src/java/org/apache/lucene/store/VerifyingLockFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/VerifyingLockFactory.java
@@ -44,8 +44,22 @@
   private class CheckedLock extends Lock {
     private final Lock lock;
 
-    public CheckedLock(Lock lock) {
+    public CheckedLock(Lock lock) throws IOException {
       this.lock = lock;
+      verify((byte) 1);
+    }
+
+    @Override
+    public void ensureValid() throws IOException {
+      lock.ensureValid();
+    }
+
+    @Override
+    public void close() throws IOException {
+      try (Lock l = lock) {
+        l.ensureValid();
+        verify((byte) 0);
+      }
     }
 
     private void verify(byte message) throws IOException {
@@ -59,27 +73,6 @@
         throw new IOException("Protocol violation.");
       }
     }
-
-    @Override
-    public synchronized boolean obtain() throws IOException {
-      boolean obtained = lock.obtain();
-      if (obtained)
-        verify((byte) 1);
-      return obtained;
-    }
-
-    @Override
-    public synchronized boolean isLocked() throws IOException {
-      return lock.isLocked();
-    }
-
-    @Override
-    public synchronized void close() throws IOException {
-      if (isLocked()) {
-        verify((byte) 0);
-        lock.close();
-      }
-    }
   }
 
   /**
@@ -94,7 +87,7 @@
   }
 
   @Override
-  public Lock makeLock(Directory dir, String lockName) {
-    return new CheckedLock(lf.makeLock(dir, lockName));
+  public Lock obtainLock(Directory dir, String lockName) throws IOException {
+    return new CheckedLock(lf.obtainLock(dir, lockName));
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
index ebee4f2..5dc8c9b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
@@ -251,17 +251,29 @@
     public FST.Arc<T> arc;
     public T cost;
     public final IntsRefBuilder input;
+    public final float boost;
+    public final CharSequence context;
 
     /** Sole constructor */
     public FSTPath(T cost, FST.Arc<T> arc, IntsRefBuilder input) {
+      this(cost, arc, input, 0, null);
+    }
+
+    public FSTPath(T cost, FST.Arc<T> arc, IntsRefBuilder input, float boost, CharSequence context) {
       this.arc = new FST.Arc<T>().copyFrom(arc);
       this.cost = cost;
       this.input = input;
+      this.boost = boost;
+      this.context = context;
+    }
+
+    public FSTPath<T> newPath(T cost, IntsRefBuilder input) {
+      return new FSTPath<>(cost, this.arc, input, this.boost, this.context);
     }
 
     @Override
     public String toString() {
-      return "input=" + input + " cost=" + cost;
+      return "input=" + input.get() + " cost=" + cost + "context=" + context + "boost=" + boost;
     }
   }
 
@@ -295,7 +307,8 @@
 
     private final FST.Arc<T> scratchArc = new FST.Arc<>();
     
-    final Comparator<T> comparator;
+    private final Comparator<T> comparator;
+    private final Comparator<FSTPath<T>> pathComparator;
 
     TreeSet<FSTPath<T>> queue = null;
 
@@ -307,13 +320,18 @@
      * @param comparator the comparator to select the top N
      */
     public TopNSearcher(FST<T> fst, int topN, int maxQueueDepth, Comparator<T> comparator) {
+      this(fst, topN, maxQueueDepth, comparator, new TieBreakByInputComparator<>(comparator));
+    }
+
+    public TopNSearcher(FST<T> fst, int topN, int maxQueueDepth, Comparator<T> comparator,
+                        Comparator<FSTPath<T>> pathComparator) {
       this.fst = fst;
       this.bytesReader = fst.getBytesReader();
       this.topN = topN;
       this.maxQueueDepth = maxQueueDepth;
       this.comparator = comparator;
-
-      queue = new TreeSet<>(new TieBreakByInputComparator<>(comparator));
+      this.pathComparator = pathComparator;
+      queue = new TreeSet<>(pathComparator);
     }
 
     // If back plus this arc is competitive then add to queue:
@@ -326,7 +344,7 @@
 
       if (queue.size() == maxQueueDepth) {
         FSTPath<T> bottom = queue.last();
-        int comp = comparator.compare(cost, bottom.cost);
+        int comp = pathComparator.compare(path, bottom);
         if (comp > 0) {
           // Doesn't compete
           return;
@@ -354,25 +372,29 @@
       IntsRefBuilder newInput = new IntsRefBuilder();
       newInput.copyInts(path.input.get());
       newInput.append(path.arc.label);
-      final FSTPath<T> newPath = new FSTPath<>(cost, path.arc, newInput);
 
-      queue.add(newPath);
+      queue.add(path.newPath(cost, newInput));
 
       if (queue.size() == maxQueueDepth+1) {
         queue.pollLast();
       }
     }
 
+    public void addStartPaths(FST.Arc<T> node, T startOutput, boolean allowEmptyString, IntsRefBuilder input) throws IOException {
+      addStartPaths(node, startOutput, allowEmptyString, input, 0, null);
+    }
+
     /** Adds all leaving arcs, including 'finished' arc, if
      *  the node is final, from this node into the queue.  */
-    public void addStartPaths(FST.Arc<T> node, T startOutput, boolean allowEmptyString, IntsRefBuilder input) throws IOException {
+    public void addStartPaths(FST.Arc<T> node, T startOutput, boolean allowEmptyString, IntsRefBuilder input,
+                              float boost, CharSequence context) throws IOException {
 
       // De-dup NO_OUTPUT since it must be a singleton:
       if (startOutput.equals(fst.outputs.getNoOutput())) {
         startOutput = fst.outputs.getNoOutput();
       }
 
-      FSTPath<T> path = new FSTPath<>(startOutput, node, input);
+      FSTPath<T> path = new FSTPath<>(startOutput, node, input, boost, context);
       fst.readFirstTargetArc(node, path.arc, bytesReader);
 
       //System.out.println("add start paths");
@@ -493,10 +515,10 @@
           if (path.arc.label == FST.END_LABEL) {
             // Add final output:
             //System.out.println("    done!: " + path);
-            T finalOutput = fst.outputs.add(path.cost, path.arc.output);
-            if (acceptResult(path.input.get(), finalOutput)) {
+            path.cost = fst.outputs.add(path.cost, path.arc.output);
+            if (acceptResult(path)) {
               //System.out.println("    add result: " + path);
-              results.add(new Result<>(path.input.get(), finalOutput));
+              results.add(new Result<>(path.input.get(), path.cost));
             } else {
               rejectCount++;
             }
@@ -510,6 +532,10 @@
       return new TopResults<>(rejectCount + topN <= maxQueueDepth, results);
     }
 
+    protected boolean acceptResult(FSTPath<T> path) {
+      return acceptResult(path.input.get(), path.cost);
+    }
+
     protected boolean acceptResult(IntsRef input, T output) {
       return true;
     }
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java
similarity index 64%
copy from lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
copy to lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java
index 8f45f16..5d80362 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesFixedSorted.java
@@ -38,7 +38,7 @@
 // 3-core AMD at 2.5Ghz, 12 GB RAM, 5GB test heap, 2 test JVMs, 2TB SATA.
 @Monster("Takes ~ 6 hours if the heap is 5gb")
 @SuppressSysoutChecks(bugUrl = "Stuff gets printed")
-public class Test2BSortedDocValues extends LuceneTestCase {
+public class Test2BSortedDocValuesFixedSorted extends LuceneTestCase {
   
   // indexes Integer.MAX_VALUE docs with a fixed binary field
   public void testFixedSorted() throws Exception {
@@ -96,66 +96,5 @@
     dir.close();
   }
   
-  // indexes Integer.MAX_VALUE docs with a fixed binary field
-  public void test2BOrds() throws Exception {
-    BaseDirectoryWrapper dir = newFSDirectory(createTempDir("2BOrds"));
-    if (dir instanceof MockDirectoryWrapper) {
-      ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
-    }
-    
-    IndexWriter w = new IndexWriter(dir,
-        new IndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
-        .setRAMBufferSizeMB(256.0)
-        .setMergeScheduler(new ConcurrentMergeScheduler())
-        .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
-        .setCodec(TestUtil.getDefaultCodec()));
-
-    Document doc = new Document();
-    byte bytes[] = new byte[4];
-    BytesRef data = new BytesRef(bytes);
-    SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
-    doc.add(dvField);
-    
-    for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
-      bytes[0] = (byte)(i >> 24);
-      bytes[1] = (byte)(i >> 16);
-      bytes[2] = (byte)(i >> 8);
-      bytes[3] = (byte) i;
-      w.addDocument(doc);
-      if (i % 100000 == 0) {
-        System.out.println("indexed: " + i);
-        System.out.flush();
-      }
-    }
-    
-    w.forceMerge(1);
-    w.close();
-    
-    System.out.println("verifying...");
-    System.out.flush();
-    
-    DirectoryReader r = DirectoryReader.open(dir);
-    int counter = 0;
-    for (LeafReaderContext context : r.leaves()) {
-      LeafReader reader = context.reader();
-      BytesRef scratch = new BytesRef();
-      BinaryDocValues dv = reader.getSortedDocValues("dv");
-      for (int i = 0; i < reader.maxDoc(); i++) {
-        bytes[0] = (byte) (counter >> 24);
-        bytes[1] = (byte) (counter >> 16);
-        bytes[2] = (byte) (counter >> 8);
-        bytes[3] = (byte) counter;
-        counter++;
-        final BytesRef term = dv.get(i);
-        assertEquals(data, term);
-      }
-    }
-    
-    r.close();
-    dir.close();
-  }
-  
   // TODO: variable
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
similarity index 67%
rename from lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
rename to lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
index 8f45f16..d2444e2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
@@ -38,63 +38,7 @@
 // 3-core AMD at 2.5Ghz, 12 GB RAM, 5GB test heap, 2 test JVMs, 2TB SATA.
 @Monster("Takes ~ 6 hours if the heap is 5gb")
 @SuppressSysoutChecks(bugUrl = "Stuff gets printed")
-public class Test2BSortedDocValues extends LuceneTestCase {
-  
-  // indexes Integer.MAX_VALUE docs with a fixed binary field
-  public void testFixedSorted() throws Exception {
-    BaseDirectoryWrapper dir = newFSDirectory(createTempDir("2BFixedSorted"));
-    if (dir instanceof MockDirectoryWrapper) {
-      ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
-    }
-    
-    IndexWriter w = new IndexWriter(dir,
-        new IndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
-        .setRAMBufferSizeMB(256.0)
-        .setMergeScheduler(new ConcurrentMergeScheduler())
-        .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
-        .setCodec(TestUtil.getDefaultCodec()));
-
-    Document doc = new Document();
-    byte bytes[] = new byte[2];
-    BytesRef data = new BytesRef(bytes);
-    SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
-    doc.add(dvField);
-    
-    for (int i = 0; i < IndexWriter.MAX_DOCS; i++) {
-      bytes[0] = (byte)(i >> 8);
-      bytes[1] = (byte) i;
-      w.addDocument(doc);
-      if (i % 100000 == 0) {
-        System.out.println("indexed: " + i);
-        System.out.flush();
-      }
-    }
-    
-    w.forceMerge(1);
-    w.close();
-    
-    System.out.println("verifying...");
-    System.out.flush();
-    
-    DirectoryReader r = DirectoryReader.open(dir);
-    int expectedValue = 0;
-    for (LeafReaderContext context : r.leaves()) {
-      LeafReader reader = context.reader();
-      BinaryDocValues dv = reader.getSortedDocValues("dv");
-      for (int i = 0; i < reader.maxDoc(); i++) {
-        bytes[0] = (byte)(expectedValue >> 8);
-        bytes[1] = (byte) expectedValue;
-        final BytesRef term = dv.get(i);
-        assertEquals(data, term);
-        expectedValue++;
-      }
-    }
-    
-    r.close();
-    dir.close();
-  }
+public class Test2BSortedDocValuesOrds extends LuceneTestCase {
   
   // indexes Integer.MAX_VALUE docs with a fixed binary field
   public void test2BOrds() throws Exception {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index c98f1a6..55197e7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -1268,7 +1268,6 @@
     Directory dest = newDirectory();
 
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
-    iwc.setWriteLockTimeout(1);
     RandomIndexWriter w2 = new RandomIndexWriter(random(), dest, iwc);
 
     try {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 51445f4..59f9e1d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -28,6 +28,7 @@
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
@@ -95,14 +96,7 @@
         IndexReader reader = null;
         int i;
 
-        long savedWriteLockTimeout = IndexWriterConfig.getDefaultWriteLockTimeout();
-        try {
-          IndexWriterConfig.setDefaultWriteLockTimeout(2000);
-          assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout());
-          writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        } finally {
-          IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout);
-        }
+        writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
 
         // add 100 documents
         for (i = 0; i < 100; i++) {
@@ -1724,8 +1718,7 @@
     RandomIndexWriter w1 = new RandomIndexWriter(random(), d);
     w1.deleteAll();
     try {
-      new RandomIndexWriter(random(), d, newIndexWriterConfig(null)
-                                           .setWriteLockTimeout(100));
+      new RandomIndexWriter(random(), d, newIndexWriterConfig(null));
       fail("should not be able to create another writer");
     } catch (LockObtainFailedException lofe) {
       // expected
@@ -2721,5 +2714,103 @@
     r.close();
     dir.close();
   }
+
+  // LUCENE-6505
+  public void testNRTSegmentsFile() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w = new IndexWriter(dir, iwc);
+    // creates segments_1
+    w.commit();
+
+    // newly opened NRT reader should see gen=1 segments file
+    DirectoryReader r = DirectoryReader.open(w, true);
+    assertEquals(1, r.getIndexCommit().getGeneration());
+    assertEquals("segments_1", r.getIndexCommit().getSegmentsFileName());
+
+    // newly opened non-NRT reader should see gen=1 segments file
+    DirectoryReader r2 = DirectoryReader.open(dir);
+    assertEquals(1, r2.getIndexCommit().getGeneration());
+    assertEquals("segments_1", r2.getIndexCommit().getSegmentsFileName());
+    r2.close();
+    
+    // make a change and another commit
+    w.addDocument(new Document());
+    w.commit();
+    DirectoryReader r3 = DirectoryReader.openIfChanged(r);
+    r.close();
+    assertNotNull(r3);
+
+    // reopened NRT reader should see gen=2 segments file
+    assertEquals(2, r3.getIndexCommit().getGeneration());
+    assertEquals("segments_2", r3.getIndexCommit().getSegmentsFileName());
+    r3.close();
+
+    // newly opened non-NRT reader should see gen=2 segments file
+    DirectoryReader r4 = DirectoryReader.open(dir);
+    assertEquals(2, r4.getIndexCommit().getGeneration());
+    assertEquals("segments_2", r4.getIndexCommit().getSegmentsFileName());
+    r4.close();
+
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-6505
+  public void testNRTAfterCommit() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w = new IndexWriter(dir, iwc);
+    w.commit();
+
+    w.addDocument(new Document());
+    DirectoryReader r = DirectoryReader.open(w, true);
+    w.commit();
+
+    // commit even with no other changes counts as a "change" that NRT reader reopen will see:
+    DirectoryReader r2 = DirectoryReader.open(dir);
+    assertNotNull(r2);
+    assertEquals(2, r2.getIndexCommit().getGeneration());
+    assertEquals("segments_2", r2.getIndexCommit().getSegmentsFileName());
+
+    IOUtils.close(r, r2, w, dir);
+  }
+
+  // LUCENE-6505
+  public void testNRTAfterSetUserDataWithoutCommit() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w = new IndexWriter(dir, iwc);
+    w.commit();
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    Map<String,String> m = new HashMap<>();
+    m.put("foo", "bar");
+    w.setCommitData(m);
+
+    // setCommitData with no other changes should count as an NRT change:
+    DirectoryReader r2 = DirectoryReader.openIfChanged(r);
+    assertNotNull(r2);
+
+    IOUtils.close(r2, r, w, dir);
+  }
+
+  // LUCENE-6505
+  public void testNRTAfterSetUserDataWithCommit() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w = new IndexWriter(dir, iwc);
+    w.commit();
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    Map<String,String> m = new HashMap<>();
+    m.put("foo", "bar");
+    w.setCommitData(m);
+    w.commit();
+    // setCommitData and also commit, with no other changes, should count as an NRT change:
+    DirectoryReader r2 = DirectoryReader.openIfChanged(r);
+    assertNotNull(r2);
+    IOUtils.close(r, r2, w, dir);
+  }
 }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index 7c9e35e..f71f6d8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -2199,7 +2199,7 @@
     
     // even though we hit exception: we are closed, no locks or files held, index in good state
     assertTrue(iw.isClosed());
-    assertFalse(IndexWriter.isLocked(dir));
+    dir.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
     
     r = DirectoryReader.open(dir);
     assertEquals(10, r.maxDoc());
@@ -2268,7 +2268,7 @@
       
       // even though we hit exception: we are closed, no locks or files held, index in good state
       assertTrue(iw.isClosed());
-      assertFalse(IndexWriter.isLocked(dir));
+      dir.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
       
       r = DirectoryReader.open(dir);
       assertEquals(10, r.maxDoc());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
index 9813038..c2299fe 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
@@ -23,9 +23,12 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader.ReaderClosedListener;
 import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -123,59 +126,65 @@
     dir2.close();    
   }
   
-  // closeSubreaders=false
-  public void testReaderClosedListener1() throws Exception {
-    Directory dir1 = getDir1(random());
-    CompositeReader ir1 = DirectoryReader.open(dir1);
+  private void testReaderClosedListener(boolean closeSubReaders, int wrapMultiReaderType) throws IOException {
+    final Directory dir1 = getDir1(random());
+    final CompositeReader ir2, ir1 = DirectoryReader.open(dir1);
+    switch (wrapMultiReaderType) {
+      case 0:
+        ir2 = ir1;
+        break;
+      case 1:
+        // default case, does close subreaders:
+        ir2 = new MultiReader(ir1); break;
+      case 2:
+        ir2 = new MultiReader(new CompositeReader[] {ir1}, false); break;
+      default:
+        throw new AssertionError();
+    }
     
     // with overlapping
-    ParallelCompositeReader pr = new ParallelCompositeReader(false,
-     new CompositeReader[] {ir1},
-     new CompositeReader[] {ir1});
+    ParallelCompositeReader pr = new ParallelCompositeReader(closeSubReaders,
+     new CompositeReader[] {ir2},
+     new CompositeReader[] {ir2});
 
     final int[] listenerClosedCount = new int[1];
 
     assertEquals(3, pr.leaves().size());
 
     for(LeafReaderContext cxt : pr.leaves()) {
-      cxt.reader().addReaderClosedListener(new ReaderClosedListener() {
-          @Override
-          public void onClose(IndexReader reader) {
-            listenerClosedCount[0]++;
-          }
-        });
+      cxt.reader().addReaderClosedListener(reader -> listenerClosedCount[0]++);
     }
     pr.close();
-    ir1.close();
+    if (!closeSubReaders) {
+      ir1.close();
+    }
     assertEquals(3, listenerClosedCount[0]);
+    
+    // We have to close the extra MultiReader, because it will not close its own subreaders:
+    if (wrapMultiReaderType == 2) {
+      ir2.close();
+    }
     dir1.close();
   }
 
-  // closeSubreaders=true
+  public void testReaderClosedListener1() throws Exception {
+    testReaderClosedListener(false, 0);
+  }
+
   public void testReaderClosedListener2() throws Exception {
-    Directory dir1 = getDir1(random());
-    CompositeReader ir1 = DirectoryReader.open(dir1);
-    
-    // with overlapping
-    ParallelCompositeReader pr = new ParallelCompositeReader(true,
-     new CompositeReader[] {ir1},
-     new CompositeReader[] {ir1});
+    testReaderClosedListener(true, 0);
+  }
 
-    final int[] listenerClosedCount = new int[1];
+  public void testReaderClosedListener3() throws Exception {
+    testReaderClosedListener(false, 1);
+  }
 
-    assertEquals(3, pr.leaves().size());
+  public void testReaderClosedListener4() throws Exception {
+    testReaderClosedListener(true, 1);
+  }
 
-    for(LeafReaderContext cxt : pr.leaves()) {
-      cxt.reader().addReaderClosedListener(new ReaderClosedListener() {
-          @Override
-          public void onClose(IndexReader reader) {
-            listenerClosedCount[0]++;
-          }
-        });
-    }
-    pr.close();
-    assertEquals(3, listenerClosedCount[0]);
-    dir1.close();
+  public void testReaderClosedListener5() throws Exception {
+    testReaderClosedListener(false, 2);
   }
 
   public void testCloseInnerReader() throws Exception {
@@ -395,7 +404,7 @@
     ParallelCompositeReader pr = new ParallelCompositeReader(new CompositeReader[] {new MultiReader(ir1)});
     
     final String s = pr.toString();
-    assertTrue("toString incorrect: " + s, s.startsWith("ParallelCompositeReader(ParallelCompositeReader(ParallelLeafReader("));
+    assertTrue("toString incorrect (should be flattened): " + s, s.startsWith("ParallelCompositeReader(ParallelLeafReader("));
 
     pr.close();
     dir1.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSleepingLockWrapper.java b/lucene/core/src/test/org/apache/lucene/index/TestSleepingLockWrapper.java
new file mode 100644
index 0000000..daa3952
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSleepingLockWrapper.java
@@ -0,0 +1,49 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import org.apache.lucene.index.SleepingLockWrapper;
+import org.apache.lucene.store.BaseLockFactoryTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.SingleInstanceLockFactory;
+import org.apache.lucene.util.TestUtil;
+
+/** Simple tests for SleepingLockWrapper */
+public class TestSleepingLockWrapper extends BaseLockFactoryTestCase {
+
+  @Override
+  protected Directory getDirectory(Path path) throws IOException {
+    long lockWaitTimeout = TestUtil.nextLong(random(), 20, 100);
+    long pollInterval = TestUtil.nextLong(random(), 2, 10);
+    
+    int which = random().nextInt(3);
+    switch (which) {
+      case 0:
+        return new SleepingLockWrapper(newDirectory(random(), new SingleInstanceLockFactory()), lockWaitTimeout, pollInterval);
+      case 1:
+        return new SleepingLockWrapper(newFSDirectory(path), lockWaitTimeout, pollInterval);
+      default:
+        return new SleepingLockWrapper(newFSDirectory(path), lockWaitTimeout, pollInterval);
+    }
+  }
+  
+  // TODO: specific tests to this impl
+}
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
index fdbdb11..8534fbf 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
@@ -86,14 +86,12 @@
         assertFalse(slowFileExists(d2, fname));
       }
 
-      Lock lock = dir.makeLock(lockname);
-      assertTrue(lock.obtain());
+      Lock lock = dir.obtainLock(lockname);
 
-      for (int j=0; j<dirs.length; j++) {
-        FSDirectory d2 = dirs[j];
-        Lock lock2 = d2.makeLock(lockname);
+      for (Directory other : dirs) {
         try {
-          assertFalse(lock2.obtain(1));
+          other.obtainLock(lockname);
+          fail("didnt get exception");
         } catch (LockObtainFailedException e) {
           // OK
         }
@@ -102,8 +100,7 @@
       lock.close();
       
       // now lock with different dir
-      lock = dirs[(i+1)%dirs.length].makeLock(lockname);
-      assertTrue(lock.obtain());
+      lock = dirs[(i+1)%dirs.length].obtainLock(lockname);
       lock.close();
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLock.java b/lucene/core/src/test/org/apache/lucene/store/TestLock.java
deleted file mode 100644
index e66b4fa..0000000
--- a/lucene/core/src/test/org/apache/lucene/store/TestLock.java
+++ /dev/null
@@ -1,55 +0,0 @@
-package org.apache.lucene.store;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-import java.io.IOException;
-import org.apache.lucene.util.LuceneTestCase;
-
-public class TestLock extends LuceneTestCase {
-
-    public void testObtain() {
-        LockMock lock = new LockMock();
-        Lock.LOCK_POLL_INTERVAL = 10;
-
-        try {
-            lock.obtain(Lock.LOCK_POLL_INTERVAL);
-            fail("Should have failed to obtain lock");
-        } catch (IOException e) {
-            assertEquals("should attempt to lock more than once", lock.lockAttempts, 2);
-        }
-    }
-
-    private class LockMock extends Lock {
-        public int lockAttempts;
-
-        @Override
-        public boolean obtain() {
-            lockAttempts++;
-            return false;
-        }
-        @Override
-        public void close() {
-            // do nothing
-        }
-        @Override
-        public boolean isLocked() {
-            return false;
-        }
-    }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
index 8b582d3..d3eca51 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
@@ -18,8 +18,6 @@
  */
 
 import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -27,16 +25,9 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 
 public class TestLockFactory extends LuceneTestCase {
@@ -58,15 +49,6 @@
         // Both write lock and commit lock should have been created:
         assertEquals("# of unique locks created (after instantiating IndexWriter)",
                      1, lf.locksCreated.size());
-        assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
-                   lf.makeLockCount >= 1);
-        
-        for(final String lockName : lf.locksCreated.keySet()) {
-            MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
-            assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
-                       lock.lockAttempts > 0);
-        }
-        
         writer.close();
     }
 
@@ -75,7 +57,6 @@
     // Verify: NoLockFactory allows two IndexWriters
     public void testRAMDirectoryNoLocking() throws IOException {
         MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(NoLockFactory.INSTANCE));
-        dir.setAssertLocks(false); // we are gonna explicitly test we get this back
 
         IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
         writer.commit(); // required so the second open succeed 
@@ -95,240 +76,29 @@
         }
     }
 
-    // Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
-    // Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
-    public void testDefaultRAMDirectory() throws IOException {
-        RAMDirectory dir = new RAMDirectory();
-
-        assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.lockFactory,
-                   dir.lockFactory instanceof SingleInstanceLockFactory);
-
-        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
-
-        // Create a 2nd IndexWriter.  This should fail:
-        IndexWriter writer2 = null;
-        try {
-            writer2 = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
-            fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
-        } catch (IOException e) {
-        }
-
-        writer.close();
-        if (writer2 != null) {
-            writer2.close();
-        }
-    }
-
-    // Verify: do stress test, by opening IndexReaders and
-    // IndexWriters over & over in 2 threads and making sure
-    // no unexpected exceptions are raised:
-    @Nightly
-    public void testStressLocksSimpleFSLockFactory() throws Exception {
-      _testStressLocks(SimpleFSLockFactory.INSTANCE, createTempDir("index.TestLockFactory6"));
-    }
-
-    // Verify: do stress test, by opening IndexReaders and
-    // IndexWriters over & over in 2 threads and making sure
-    // no unexpected exceptions are raised, but use
-    // NativeFSLockFactory:
-    @Nightly
-    public void testStressLocksNativeFSLockFactory() throws Exception {
-      Path dir = createTempDir("index.TestLockFactory7");
-      _testStressLocks(NativeFSLockFactory.INSTANCE, dir);
-    }
-
-    public void _testStressLocks(LockFactory lockFactory, Path indexDir) throws Exception {
-        Directory dir = newFSDirectory(indexDir, lockFactory);
-
-        // First create a 1 doc index:
-        IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
-        addDoc(w);
-        w.close();
-
-        WriterThread writer = new WriterThread(100, dir);
-        SearcherThread searcher = new SearcherThread(100, dir);
-        writer.start();
-        searcher.start();
-
-        while(writer.isAlive() || searcher.isAlive()) {
-          Thread.sleep(1000);
-        }
-
-        assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
-        assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
-
-        dir.close();
-        // Cleanup
-        IOUtils.rm(indexDir);
-    }
-
-    // Verify: NativeFSLockFactory works correctly
-    public void testNativeFSLockFactory() throws IOException {
-      Directory dir = FSDirectory.open(createTempDir(LuceneTestCase.getTestClass().getSimpleName()), NativeFSLockFactory.INSTANCE);
-
-      Lock l = dir.makeLock("commit");
-      Lock l2 = dir.makeLock("commit");
-
-      assertTrue("failed to obtain lock", l.obtain());
-      assertTrue("succeeded in obtaining lock twice", !l2.obtain());
-      l.close();
-
-      assertTrue("failed to obtain 2nd lock after first one was freed", l2.obtain());
-      l2.close();
-
-      // Make sure we can obtain first one again, test isLocked():
-      assertTrue("failed to obtain lock", l.obtain());
-      assertTrue(l.isLocked());
-      assertTrue(l2.isLocked());
-      l.close();
-      assertFalse(l.isLocked());
-      assertFalse(l2.isLocked());
-    }
-
-    
-    // Verify: NativeFSLockFactory works correctly if the lock file exists
-    public void testNativeFSLockFactoryLockExists() throws IOException {
-      Path tempDir = createTempDir(LuceneTestCase.getTestClass().getSimpleName());
-      Path lockFile = tempDir.resolve("test.lock");
-      Files.createFile(lockFile);
-      
-      Directory dir = FSDirectory.open(tempDir, NativeFSLockFactory.INSTANCE);
-      Lock l = dir.makeLock("test.lock");
-      assertTrue("failed to obtain lock", l.obtain());
-      l.close();
-      assertFalse("failed to release lock", l.isLocked());
-      Files.deleteIfExists(lockFile);
-    }
-
-    private class WriterThread extends Thread { 
-        private Directory dir;
-        private int numIteration;
-        public boolean hitException = false;
-        public WriterThread(int numIteration, Directory dir) {
-            this.numIteration = numIteration;
-            this.dir = dir;
-        }
-        @Override
-        public void run() {
-            IndexWriter writer = null;
-            for(int i=0;i<this.numIteration;i++) {
-                try {
-                    writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
-                } catch (IOException e) {
-                    if (e.toString().indexOf(" timed out:") == -1) {
-                        hitException = true;
-                        System.out.println("Stress Test Index Writer: creation hit unexpected IOException: " + e.toString());
-                        e.printStackTrace(System.out);
-                    } else {
-                        // lock obtain timed out
-                        // NOTE: we should at some point
-                        // consider this a failure?  The lock
-                        // obtains, across IndexReader &
-                        // IndexWriters should be "fair" (ie
-                        // FIFO).
-                    }
-                } catch (Exception e) {
-                    hitException = true;
-                    System.out.println("Stress Test Index Writer: creation hit unexpected exception: " + e.toString());
-                    e.printStackTrace(System.out);
-                    break;
-                }
-                if (writer != null) {
-                    try {
-                        addDoc(writer);
-                    } catch (IOException e) {
-                        hitException = true;
-                        System.out.println("Stress Test Index Writer: addDoc hit unexpected exception: " + e.toString());
-                        e.printStackTrace(System.out);
-                        break;
-                    }
-                    try {
-                        writer.close();
-                    } catch (IOException e) {
-                        hitException = true;
-                        System.out.println("Stress Test Index Writer: close hit unexpected exception: " + e.toString());
-                        e.printStackTrace(System.out);
-                        break;
-                    }
-                    writer = null;
-                }
-            }
-        }
-    }
-
-    private class SearcherThread extends Thread { 
-        private Directory dir;
-        private int numIteration;
-        public boolean hitException = false;
-        public SearcherThread(int numIteration, Directory dir) {
-            this.numIteration = numIteration;
-            this.dir = dir;
-        }
-        @Override
-        public void run() {
-            IndexReader reader = null;
-            IndexSearcher searcher = null;
-            Query query = new TermQuery(new Term("content", "aaa"));
-            for(int i=0;i<this.numIteration;i++) {
-                try{
-                    reader = DirectoryReader.open(dir);
-                    searcher = newSearcher(reader);
-                } catch (Exception e) {
-                    hitException = true;
-                    System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
-                    e.printStackTrace(System.out);
-                    break;
-                }
-                try {
-                  searcher.search(query, 1000);
-                } catch (IOException e) {
-                  hitException = true;
-                  System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
-                  e.printStackTrace(System.out);
-                  break;
-                }
-                // System.out.println(hits.length() + " total results");
-                try {
-                  reader.close();
-                } catch (IOException e) {
-                  hitException = true;
-                  System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
-                  e.printStackTrace(System.out);
-                  break;
-                }
-            }
-        }
-    }
-
     class MockLockFactory extends LockFactory {
 
         public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
-        public int makeLockCount = 0;
 
         @Override
-        public synchronized Lock makeLock(Directory dir, String lockName) {
+        public synchronized Lock obtainLock(Directory dir, String lockName) {
             Lock lock = new MockLock();
             locksCreated.put(lockName, lock);
-            makeLockCount++;
             return lock;
         }
 
         public class MockLock extends Lock {
-            public int lockAttempts;
 
             @Override
-            public boolean obtain() {
-                lockAttempts++;
-                return true;
-            }
-            @Override
             public void close() {
                 // do nothing
             }
+
             @Override
-            public boolean isLocked() {
-                return false;
+            public void ensureValid() throws IOException {
+              // do nothing
             }
+
         }
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestNativeFSLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestNativeFSLockFactory.java
new file mode 100644
index 0000000..b53707e
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/store/TestNativeFSLockFactory.java
@@ -0,0 +1,108 @@
+package org.apache.lucene.store;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.lucene.util.IOUtils;
+
+/** Simple tests for NativeFSLockFactory */
+public class TestNativeFSLockFactory extends BaseLockFactoryTestCase {
+
+  @Override
+  protected Directory getDirectory(Path path) throws IOException {
+    return newFSDirectory(path, NativeFSLockFactory.INSTANCE);
+  }
+  
+  /** Verify NativeFSLockFactory works correctly if the lock file exists */
+  public void testLockFileExists() throws IOException {
+    Path tempDir = createTempDir();
+    Path lockFile = tempDir.resolve("test.lock");
+    Files.createFile(lockFile);
+    
+    Directory dir = getDirectory(tempDir);
+    Lock l = dir.obtainLock("test.lock");
+    l.close();
+    dir.close();
+  }
+  
+  /** release the lock and test ensureValid fails */
+  public void testInvalidateLock() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    NativeFSLockFactory.NativeFSLock lock =  (NativeFSLockFactory.NativeFSLock) dir.obtainLock("test.lock");
+    lock.ensureValid();
+    lock.lock.release();
+    try {
+      lock.ensureValid();
+      fail("no exception");
+    } catch (AlreadyClosedException expected) {
+      // ok
+    } finally {
+      IOUtils.closeWhileHandlingException(lock);
+    }
+    dir.close();
+  }
+  
+  /** close the channel and test ensureValid fails */
+  public void testInvalidateChannel() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    NativeFSLockFactory.NativeFSLock lock =  (NativeFSLockFactory.NativeFSLock) dir.obtainLock("test.lock");
+    lock.ensureValid();
+    lock.channel.close();
+    try {
+      lock.ensureValid();
+      fail("no exception");
+    } catch (AlreadyClosedException expected) {
+      // ok
+    } finally {
+      IOUtils.closeWhileHandlingException(lock);
+    }
+    dir.close();
+  }
+  
+  /** delete the lockfile and test ensureValid fails */
+  public void testDeleteLockFile() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    try {
+      Lock lock = dir.obtainLock("test.lock");
+      lock.ensureValid();
+    
+      try {
+        dir.deleteFile("test.lock");
+      } catch (Exception e) {
+        // we can't delete a file for some reason, just clean up and assume the test.
+        IOUtils.closeWhileHandlingException(lock);
+        assumeNoException("test requires the ability to delete a locked file", e);
+      }
+    
+      try {
+        lock.ensureValid();
+        fail("no exception");
+      } catch (IOException expected) {
+        // ok
+      } finally {
+        IOUtils.closeWhileHandlingException(lock);
+      }
+    } finally {
+      // Do this in finally clause in case the assumeNoException is false:
+      dir.close();
+    }
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestSimpleFSLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestSimpleFSLockFactory.java
new file mode 100644
index 0000000..f05297e
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/store/TestSimpleFSLockFactory.java
@@ -0,0 +1,61 @@
+package org.apache.lucene.store;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import org.apache.lucene.util.IOUtils;
+
+/** Simple tests for SimpleFSLockFactory */
+public class TestSimpleFSLockFactory extends BaseLockFactoryTestCase {
+
+  @Override
+  protected Directory getDirectory(Path path) throws IOException {
+    return newFSDirectory(path, SimpleFSLockFactory.INSTANCE);
+  }
+  
+  /** delete the lockfile and test ensureValid fails */
+  public void testDeleteLockFile() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    try {
+      Lock lock = dir.obtainLock("test.lock");
+      lock.ensureValid();
+    
+      try {
+        dir.deleteFile("test.lock");
+      } catch (Exception e) {
+        // we can't delete a file for some reason, just clean up and assume the test.
+        IOUtils.closeWhileHandlingException(lock);
+        assumeNoException("test requires the ability to delete a locked file", e);
+      }
+    
+      try {
+        lock.ensureValid();
+        fail("no exception");
+      } catch (IOException expected) {
+        // ok
+      } finally {
+        IOUtils.closeWhileHandlingException(lock);
+      }
+    } finally {
+      // Do this in finally clause in case the assumeNoException is false:
+      dir.close();
+    }
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestSingleInstanceLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestSingleInstanceLockFactory.java
new file mode 100644
index 0000000..c9f4668
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/store/TestSingleInstanceLockFactory.java
@@ -0,0 +1,59 @@
+package org.apache.lucene.store;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+/** Simple tests for SingleInstanceLockFactory */
+public class TestSingleInstanceLockFactory extends BaseLockFactoryTestCase {
+  
+  @Override
+  protected Directory getDirectory(Path path) throws IOException {
+    return newDirectory(random(), new SingleInstanceLockFactory());
+  }
+  
+  // Verify: SingleInstanceLockFactory is the default lock for RAMDirectory
+  // Verify: RAMDirectory does basic locking correctly (can't create two IndexWriters)
+  public void testDefaultRAMDirectory() throws IOException {
+    RAMDirectory dir = new RAMDirectory();
+    
+    assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.lockFactory,
+        dir.lockFactory instanceof SingleInstanceLockFactory);
+    
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
+    
+    // Create a 2nd IndexWriter.  This should fail:
+    IndexWriter writer2 = null;
+    try {
+      writer2 = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
+      fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
+    } catch (IOException e) {
+    }
+    
+    writer.close();
+    if (writer2 != null) {
+      writer2.close();
+    }
+  }
+}
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index e98e565..77b2389 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -43,7 +43,7 @@
 import org.apache.lucene.index.TieredMergePolicy;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockObtainFailedException; // javadocs
+import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.util.BytesRef;
 
 /*
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java
new file mode 100644
index 0000000..59a284d
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java
@@ -0,0 +1,151 @@
+package org.apache.lucene.search.postingshighlight;
+
+import java.text.BreakIterator;
+import java.text.CharacterIterator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A {@link BreakIterator} that breaks the text whenever a certain separator, provided as a constructor argument, is found.
+ */
+public final class CustomSeparatorBreakIterator extends BreakIterator {
+
+  private final char separator;
+  private CharacterIterator text;
+  private int current;
+
+  public CustomSeparatorBreakIterator(char separator) {
+    this.separator = separator;
+  }
+
+  @Override
+  public int current() {
+    return current;
+  }
+
+  @Override
+  public int first() {
+    text.setIndex(text.getBeginIndex());
+    return current = text.getIndex();
+  }
+
+  @Override
+  public int last() {
+    text.setIndex(text.getEndIndex());
+    return current = text.getIndex();
+  }
+
+  @Override
+  public int next() {
+    if (text.getIndex() == text.getEndIndex()) {
+      return DONE;
+    } else {
+      return advanceForward();
+    }
+  }
+
+  private int advanceForward() {
+    char c;
+    while ((c = text.next()) != CharacterIterator.DONE) {
+      if (c == separator) {
+        return current = text.getIndex() + 1;
+      }
+    }
+    assert text.getIndex() == text.getEndIndex();
+    return current = text.getIndex();
+  }
+
+  @Override
+  public int following(int pos) {
+    if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
+      throw new IllegalArgumentException("offset out of bounds");
+    } else if (pos == text.getEndIndex()) {
+      // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
+      // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
+      text.setIndex(text.getEndIndex());
+      current = text.getIndex();
+      return DONE;
+    } else {
+      text.setIndex(pos);
+      current = text.getIndex();
+      return advanceForward();
+    }
+  }
+
+  @Override
+  public int previous() {
+    if (text.getIndex() == text.getBeginIndex()) {
+      return DONE;
+    } else {
+      return advanceBackward();
+    }
+  }
+
+  private int advanceBackward() {
+    char c;
+    while ((c = text.previous()) != CharacterIterator.DONE) {
+      if (c == separator) {
+        return current = text.getIndex() + 1;
+      }
+    }
+    assert text.getIndex() == text.getBeginIndex();
+    return current = text.getIndex();
+  }
+
+  @Override
+  public int preceding(int pos) {
+    if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
+      throw new IllegalArgumentException("offset out of bounds");
+    } else if (pos == text.getBeginIndex()) {
+      // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
+      // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
+      text.setIndex(text.getBeginIndex());
+      current = text.getIndex();
+      return DONE;
+    } else {
+      text.setIndex(pos);
+      current = text.getIndex();
+      return advanceBackward();
+    }
+  }
+
+  @Override
+  public int next(int n) {
+    if (n < 0) {
+      for (int i = 0; i < -n; i++) {
+        previous();
+      }
+    } else {
+      for (int i = 0; i < n; i++) {
+        next();
+      }
+    }
+    return current();
+  }
+
+  @Override
+  public CharacterIterator getText() {
+    return text;
+  }
+
+  @Override
+  public void setText(CharacterIterator newText) {
+    text = newText;
+    current = text.getBeginIndex();
+  }
+}
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestCustomSeparatorBreakIterator.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestCustomSeparatorBreakIterator.java
new file mode 100644
index 0000000..4b440f5
--- /dev/null
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestCustomSeparatorBreakIterator.java
@@ -0,0 +1,115 @@
+package org.apache.lucene.search.postingshighlight;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.text.BreakIterator;
+import java.util.Locale;
+
+import static org.apache.lucene.search.postingshighlight.TestWholeBreakIterator.assertSameBreaks;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class TestCustomSeparatorBreakIterator extends LuceneTestCase {
+
+  private static final Character[] SEPARATORS = new Character[]{' ', '\u0000', 8233};
+
+  public void testBreakOnCustomSeparator() throws Exception {
+    Character separator = randomSeparator();
+    BreakIterator bi = new CustomSeparatorBreakIterator(separator);
+    String source = "this" + separator + "is" + separator + "the" + separator + "first" + separator + "sentence";
+    bi.setText(source);
+    assertThat(bi.current(), equalTo(0));
+    assertThat(bi.first(), equalTo(0));
+    assertThat(source.substring(bi.current(), bi.next()), equalTo("this" + separator));
+    assertThat(source.substring(bi.current(), bi.next()), equalTo("is" + separator));
+    assertThat(source.substring(bi.current(), bi.next()), equalTo("the" + separator));
+    assertThat(source.substring(bi.current(), bi.next()), equalTo("first" + separator));
+    assertThat(source.substring(bi.current(), bi.next()), equalTo("sentence"));
+    assertThat(bi.next(), equalTo(BreakIterator.DONE));
+
+    assertThat(bi.last(), equalTo(source.length()));
+    int current = bi.current();
+    assertThat(source.substring(bi.previous(), current), equalTo("sentence"));
+    current = bi.current();
+    assertThat(source.substring(bi.previous(), current), equalTo("first" + separator));
+    current = bi.current();
+    assertThat(source.substring(bi.previous(), current), equalTo("the" + separator));
+    current = bi.current();
+    assertThat(source.substring(bi.previous(), current), equalTo("is" + separator));
+    current = bi.current();
+    assertThat(source.substring(bi.previous(), current), equalTo("this" + separator));
+    assertThat(bi.previous(), equalTo(BreakIterator.DONE));
+    assertThat(bi.current(), equalTo(0));
+
+    assertThat(source.substring(0, bi.following(9)), equalTo("this" + separator + "is" + separator + "the" + separator));
+
+    assertThat(source.substring(0, bi.preceding(9)), equalTo("this" + separator + "is" + separator));
+
+    assertThat(bi.first(), equalTo(0));
+    assertThat(source.substring(0, bi.next(3)), equalTo("this" + separator + "is" + separator + "the" + separator));
+  }
+
+  public void testSingleSentences() throws Exception {
+    BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+    BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+    assertSameBreaks("a", expected, actual);
+    assertSameBreaks("ab", expected, actual);
+    assertSameBreaks("abc", expected, actual);
+    assertSameBreaks("", expected, actual);
+  }
+
+  public void testSliceEnd() throws Exception {
+    BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+    BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+    assertSameBreaks("a000", 0, 1, expected, actual);
+    assertSameBreaks("ab000", 0, 1, expected, actual);
+    assertSameBreaks("abc000", 0, 1, expected, actual);
+    assertSameBreaks("000", 0, 0, expected, actual);
+  }
+
+  public void testSliceStart() throws Exception {
+    BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+    BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+    assertSameBreaks("000a", 3, 1, expected, actual);
+    assertSameBreaks("000ab", 3, 2, expected, actual);
+    assertSameBreaks("000abc", 3, 3, expected, actual);
+    assertSameBreaks("000", 3, 0, expected, actual);
+  }
+
+  public void testSliceMiddle() throws Exception {
+    BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+    BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+    assertSameBreaks("000a000", 3, 1, expected, actual);
+    assertSameBreaks("000ab000", 3, 2, expected, actual);
+    assertSameBreaks("000abc000", 3, 3, expected, actual);
+    assertSameBreaks("000000", 3, 0, expected, actual);
+  }
+
+  /** the current position must be ignored, initial position is always first() */
+  public void testFirstPosition() throws Exception {
+    BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+    BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+    assertSameBreaks("000ab000", 3, 2, 4, expected, actual);
+  }
+
+  private static char randomSeparator() {
+    return RandomPicks.randomFrom(random(), SEPARATORS);
+  }
+}
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestWholeBreakIterator.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestWholeBreakIterator.java
index d113509..8021a28 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestWholeBreakIterator.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestWholeBreakIterator.java
@@ -17,13 +17,13 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.util.LuceneTestCase;
+
 import java.text.BreakIterator;
 import java.text.CharacterIterator;
 import java.text.StringCharacterIterator;
 import java.util.Locale;
 
-import org.apache.lucene.util.LuceneTestCase;
-
 public class TestWholeBreakIterator extends LuceneTestCase {
   
   /** For single sentences, we know WholeBreakIterator should break the same as a sentence iterator */
@@ -70,18 +70,18 @@
     assertSameBreaks("000ab000", 3, 2, 4, expected, actual);
   }
 
-  public void assertSameBreaks(String text, BreakIterator expected, BreakIterator actual) {
+  public static void assertSameBreaks(String text, BreakIterator expected, BreakIterator actual) {
     assertSameBreaks(new StringCharacterIterator(text), 
                      new StringCharacterIterator(text), 
                      expected, 
                      actual);
   }
   
-  public void assertSameBreaks(String text, int offset, int length, BreakIterator expected, BreakIterator actual) {
+  public static void assertSameBreaks(String text, int offset, int length, BreakIterator expected, BreakIterator actual) {
     assertSameBreaks(text, offset, length, offset, expected, actual);
   }
   
-  public void assertSameBreaks(String text, int offset, int length, int current, BreakIterator expected, BreakIterator actual) {
+  public static void assertSameBreaks(String text, int offset, int length, int current, BreakIterator expected, BreakIterator actual) {
     assertSameBreaks(new StringCharacterIterator(text, offset, offset+length, current), 
                      new StringCharacterIterator(text, offset, offset+length, current), 
                      expected, 
@@ -89,7 +89,7 @@
   }
 
   /** Asserts that two breakiterators break the text the same way */
-  public void assertSameBreaks(CharacterIterator one, CharacterIterator two, BreakIterator expected, BreakIterator actual) {
+  public static void assertSameBreaks(CharacterIterator one, CharacterIterator two, BreakIterator expected, BreakIterator actual) {
     expected.setText(one);
     actual.setText(two);
 
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointField.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointField.java
new file mode 100644
index 0000000..8cc1f5b
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointField.java
@@ -0,0 +1,50 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.DocValuesType;
+
+/** Add this to a document to index lat/lon point, but be sure to use {@link BKDTreeDocValuesFormat} for the field. */
+public final class BKDPointField extends Field {
+
+  public static final FieldType TYPE = new FieldType();
+  static {
+    TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC);
+    TYPE.freeze();
+  }
+
+  /** 
+   * Creates a new BKDPointField field with the specified lat and lon
+   * @param name field name
+   * @param lat double latitude
+   * @param lon double longitude
+   * @throws IllegalArgumentException if the field name is null or lat or lon are out of bounds
+   */
+  public BKDPointField(String name, double lat, double lon) {
+    super(name, TYPE);
+    if (BKDTreeWriter.validLat(lat) == false) {
+      throw new IllegalArgumentException("invalid lat (" + lat + "): must be -90 to 90");
+    }
+    if (BKDTreeWriter.validLon(lon) == false) {
+      throw new IllegalArgumentException("invalid lon (" + lon + "): must be -180 to 180");
+    }
+    fieldsData = Long.valueOf(((long) BKDTreeWriter.encodeLat(lat) << 32) | (BKDTreeWriter.encodeLon(lon) & 0xffffffffL));
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInBBoxQuery.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInBBoxQuery.java
new file mode 100644
index 0000000..3f96e3a
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInBBoxQuery.java
@@ -0,0 +1,214 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
+/** Finds all previously indexed points that fall within the specified boundings box.
+ *
+ *  <p>The field must be indexed with {@link BKDTreeDocValuesFormat}, and {@link BKDPointField} added per document.
+ *
+ *  <p><b>NOTE</b>: for fastest performance, this allocates FixedBitSet(maxDoc) for each segment.  The score of each hit is the query boost.
+ *
+ * @lucene.experimental */
+
+public class BKDPointInBBoxQuery extends Query {
+  final String field;
+  final double minLat;
+  final double maxLat;
+  final double minLon;
+  final double maxLon;
+
+  /** Matches all points &gt;= minLon, minLat (inclusive) and &lt; maxLon, maxLat (exclusive). */ 
+  public BKDPointInBBoxQuery(String field, double minLat, double maxLat, double minLon, double maxLon) {
+    this.field = field;
+    if (BKDTreeWriter.validLat(minLat) == false) {
+      throw new IllegalArgumentException("minLat=" + minLat + " is not a valid latitude");
+    }
+    if (BKDTreeWriter.validLat(maxLat) == false) {
+      throw new IllegalArgumentException("maxLat=" + maxLat + " is not a valid latitude");
+    }
+    if (BKDTreeWriter.validLon(minLon) == false) {
+      throw new IllegalArgumentException("minLon=" + minLon + " is not a valid longitude");
+    }
+    if (BKDTreeWriter.validLon(maxLon) == false) {
+      throw new IllegalArgumentException("maxLon=" + maxLon + " is not a valid longitude");
+    }
+    this.minLon = minLon;
+    this.maxLon = maxLon;
+    this.minLat = minLat;
+    this.maxLat = maxLat;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+
+    // I don't use RandomAccessWeight here: it's no good to approximate with "match all docs"; this is an inverted structure and should be
+    // used in the first pass:
+
+    return new Weight(this) {
+      private float queryNorm;
+      private float queryWeight;
+
+      @Override
+      public void extractTerms(Set<Term> terms) {
+      }
+
+      @Override
+      public float getValueForNormalization() throws IOException {
+        queryWeight = getBoost();
+        return queryWeight * queryWeight;
+      }
+
+      @Override
+      public void normalize(float norm, float topLevelBoost) {
+        queryNorm = norm * topLevelBoost;
+        queryWeight *= queryNorm;
+      }
+
+      @Override
+      public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+        final Scorer s = scorer(context, context.reader().getLiveDocs());
+        final boolean exists = s != null && s.advance(doc) == doc;
+
+        if (exists) {
+          return Explanation.match(queryWeight, BKDPointInBBoxQuery.this.toString() + ", product of:",
+              Explanation.match(getBoost(), "boost"), Explanation.match(queryNorm, "queryNorm"));
+        } else {
+          return Explanation.noMatch(BKDPointInBBoxQuery.this.toString() + " doesn't match id " + doc);
+        }
+      }
+
+      @Override
+      public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+        LeafReader reader = context.reader();
+        SortedNumericDocValues sdv = reader.getSortedNumericDocValues(field);
+        if (sdv == null) {
+          // No docs in this segment had this field
+          return null;
+        }
+
+        if (sdv instanceof BKDTreeSortedNumericDocValues == false) {
+          throw new IllegalStateException("field \"" + field + "\" was not indexed with BKDTreeDocValuesFormat: got: " + sdv);
+        }
+        BKDTreeSortedNumericDocValues treeDV = (BKDTreeSortedNumericDocValues) sdv;
+        BKDTreeReader tree = treeDV.getBKDTreeReader();
+
+        DocIdSet result = tree.intersect(acceptDocs, minLat, maxLat, minLon, maxLon, treeDV.delegate);
+
+        final DocIdSetIterator disi = result.iterator();
+
+        return new Scorer(this) {
+
+          @Override
+          public float score() throws IOException {
+            return queryWeight;
+          }
+
+          @Override
+          public int freq() throws IOException {
+            return 1;
+          }
+
+          @Override
+          public int docID() {
+            return disi.docID();
+          }
+
+          @Override
+          public int nextDoc() throws IOException {
+            return disi.nextDoc();
+          }
+
+          @Override
+          public int advance(int target) throws IOException {
+            return disi.advance(target);
+          }
+
+          @Override
+          public long cost() {
+            return disi.cost();
+          }
+        };
+      }
+    };
+  }
+  @Override
+  public int hashCode() {
+    int hash = super.hashCode();
+    hash += Double.hashCode(minLat)^0x14fa55fb;
+    hash += Double.hashCode(maxLat)^0x733fa5fe;
+    hash += Double.hashCode(minLon)^0x14fa55fb;
+    hash += Double.hashCode(maxLon)^0x733fa5fe;
+    return hash;
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (super.equals(other) && other instanceof BKDPointInBBoxQuery) {
+      final BKDPointInBBoxQuery q = (BKDPointInBBoxQuery) other;
+      return field.equals(q.field) &&
+        minLat == q.minLat &&
+        maxLat == q.maxLat &&
+        minLon == q.minLon &&
+        maxLon == q.maxLon;
+    }
+
+    return false;
+  }
+
+  @Override
+  public String toString(String field) {
+    final StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getSimpleName());
+    sb.append(':');
+    if (this.field.equals(field) == false) {
+      sb.append("field=");
+      sb.append(this.field);
+      sb.append(':');
+    }
+
+    return sb.append(" Lower Left: [")
+        .append(minLon)
+        .append(',')
+        .append(minLat)
+        .append(']')
+        .append(" Upper Right: [")
+        .append(maxLon)
+        .append(',')
+        .append(maxLat)
+        .append("]")
+        .append(ToStringUtils.boost(getBoost()))
+        .toString();
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInPolygonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInPolygonQuery.java
new file mode 100644
index 0000000..36e415b
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDPointInPolygonQuery.java
@@ -0,0 +1,284 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
+/** Finds all previously indexed points that fall within the specified polygon.
+ *
+ *  <p>The field must be indexed with {@link BKDTreeDocValuesFormat}, and {@link BKDPointField} added per document.
+ *
+ *  <p>Because this implementation cannot intersect each cell with the polygon, it will be costly especially for large polygons, as every
+ *   possible point must be checked.
+ *
+ *  <p><b>NOTE</b>: for fastest performance, this allocates FixedBitSet(maxDoc) for each segment.  The score of each hit is the query boost.
+ *
+ * @lucene.experimental */
+
+public class BKDPointInPolygonQuery extends Query {
+  final String field;
+  final double minLat;
+  final double maxLat;
+  final double minLon;
+  final double maxLon;
+  final double[] polyLats;
+  final double[] polyLons;
+
+  /** The lats/lons must be clockwise or counter-clockwise. */
+  public BKDPointInPolygonQuery(String field, double[] polyLats, double[] polyLons) {
+    this.field = field;
+    if (polyLats.length != polyLons.length) {
+      throw new IllegalArgumentException("polyLats and polyLons must be equal length");
+    }
+    if (polyLats.length < 4) {
+      throw new IllegalArgumentException("at least 4 polygon points required");
+    }
+    if (polyLats[0] != polyLats[polyLats.length-1]) {
+      throw new IllegalArgumentException("first and last points of the polygon must be the same (it must close itself): polyLats[0]=" + polyLats[0] + " polyLats[" + (polyLats.length-1) + "]=" + polyLats[polyLats.length-1]);
+    }
+    if (polyLons[0] != polyLons[polyLons.length-1]) {
+      throw new IllegalArgumentException("first and last points of the polygon must be the same (it must close itself): polyLons[0]=" + polyLons[0] + " polyLons[" + (polyLons.length-1) + "]=" + polyLons[polyLons.length-1]);
+    }
+
+    this.polyLats = polyLats;
+    this.polyLons = polyLons;
+
+    double minLon = Double.POSITIVE_INFINITY;
+    double minLat = Double.POSITIVE_INFINITY;
+    double maxLon = Double.NEGATIVE_INFINITY;
+    double maxLat = Double.NEGATIVE_INFINITY;
+    for(int i=0;i<polyLats.length;i++) {
+      double lat = polyLats[i];
+      if (BKDTreeWriter.validLat(lat) == false) {
+        throw new IllegalArgumentException("polyLats[" + i + "]=" + lat + " is not a valid latitude");
+      }
+      minLat = Math.min(minLat, lat);
+      maxLat = Math.max(maxLat, lat);
+      double lon = polyLons[i];
+      if (BKDTreeWriter.validLon(lon) == false) {
+        throw new IllegalArgumentException("polyLons[" + i + "]=" + lat + " is not a valid longitude");
+      }
+      minLon = Math.min(minLon, lon);
+      maxLon = Math.max(maxLon, lon);
+    }
+    this.minLon = minLon;
+    this.maxLon = maxLon;
+    this.minLat = minLat;
+    this.maxLat = maxLat;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+
+    // I don't use RandomAccessWeight here: it's no good to approximate with "match all docs"; this is an inverted structure and should be
+    // used in the first pass:
+
+    // TODO: except that the polygon verify is costly!  The approximation should be all docs in all overlapping cells, and matches() should
+    // then check the polygon
+
+    return new Weight(this) {
+      private float queryNorm;
+      private float queryWeight;
+
+      @Override
+      public void extractTerms(Set<Term> terms) {
+      }
+
+      @Override
+      public float getValueForNormalization() throws IOException {
+        queryWeight = getBoost();
+        return queryWeight * queryWeight;
+      }
+
+      @Override
+      public void normalize(float norm, float topLevelBoost) {
+        queryNorm = norm * topLevelBoost;
+        queryWeight *= queryNorm;
+      }
+
+      @Override
+      public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+        final Scorer s = scorer(context, context.reader().getLiveDocs());
+        final boolean exists = s != null && s.advance(doc) == doc;
+
+        if (exists) {
+          return Explanation.match(queryWeight, BKDPointInPolygonQuery.this.toString() + ", product of:",
+              Explanation.match(getBoost(), "boost"), Explanation.match(queryNorm, "queryNorm"));
+        } else {
+          return Explanation.noMatch(BKDPointInPolygonQuery.this.toString() + " doesn't match id " + doc);
+        }
+      }
+
+      @Override
+      public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+        LeafReader reader = context.reader();
+        SortedNumericDocValues sdv = reader.getSortedNumericDocValues(field);
+        if (sdv == null) {
+          // No docs in this segment had this field
+          return null;
+        }
+
+        if (sdv instanceof BKDTreeSortedNumericDocValues == false) {
+          throw new IllegalStateException("field \"" + field + "\" was not indexed with BKDTreeDocValuesFormat: got: " + sdv);
+        }
+        BKDTreeSortedNumericDocValues treeDV = (BKDTreeSortedNumericDocValues) sdv;
+        BKDTreeReader tree = treeDV.getBKDTreeReader();
+        
+        // TODO: make this more efficient: as we recurse the BKD tree we should check whether the
+        // bbox we are recursing into intersects our shape; Apache SIS may have (non-GPL!) code to do this?
+        DocIdSet result = tree.intersect(acceptDocs, minLat, maxLat, minLon, maxLon,
+                                         new BKDTreeReader.LatLonFilter() {
+                                           @Override
+                                           public boolean accept(double lat, double lon) {
+                                             return pointInPolygon(lat, lon);
+                                           }
+                                         }, treeDV.delegate);
+
+        final DocIdSetIterator disi = result.iterator();
+
+        return new Scorer(this) {
+
+          @Override
+          public float score() throws IOException {
+            return queryWeight;
+          }
+
+          @Override
+          public int freq() throws IOException {
+            return 1;
+          }
+
+          @Override
+          public int docID() {
+            return disi.docID();
+          }
+
+          @Override
+          public int nextDoc() throws IOException {
+            return disi.nextDoc();
+          }
+
+          @Override
+          public int advance(int target) throws IOException {
+            return disi.advance(target);
+          }
+
+          @Override
+          public long cost() {
+            return disi.cost();
+          }
+        };
+      }
+    };
+  }
+
+  // TODO: share w/ GeoUtils:
+
+  /**
+   * simple even-odd point in polygon computation
+   *    1.  Determine if point is contained in the longitudinal range
+   *    2.  Determine whether point crosses the edge by computing the latitudinal delta
+   *        between the end-point of a parallel vector (originating at the point) and the
+   *        y-component of the edge sink
+   *
+   * NOTE: Requires polygon point (x,y) order either clockwise or counter-clockwise
+   */
+  boolean pointInPolygon(double lat, double lon) {
+    /**
+     * Note: This is using a euclidean coordinate system which could result in
+     * upwards of 110KM error at the equator.
+     * TODO convert coordinates to cylindrical projection (e.g. mercator)
+     */
+
+    // TODO: this quantizes a bit differently ... boundary cases will fail here:
+    boolean inPoly = false;
+    for (int i = 1; i < polyLons.length; i++) {
+      if (polyLons[i] <= lon && polyLons[i-1] > lon || polyLons[i-1] <= lon && polyLons[i] > lon) {
+        if (polyLats[i] + (lon - polyLons[i]) / (polyLons[i-1] - polyLons[i]) * (polyLats[i-1] - polyLats[i]) <= lat) {
+          inPoly = !inPoly;
+        }
+      }
+    }
+    return inPoly;
+  }
+
+  @Override
+  @SuppressWarnings({"unchecked","rawtypes"})
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    if (!super.equals(o)) return false;
+
+    BKDPointInPolygonQuery that = (BKDPointInPolygonQuery) o;
+
+    if (Arrays.equals(polyLons, that.polyLons) == false) {
+      return false;
+    }
+    if (Arrays.equals(polyLats, that.polyLats) == false) {
+      return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public final int hashCode() {
+    int result = super.hashCode();
+    result = 31 * result + Arrays.hashCode(polyLons);
+    result = 31 * result + Arrays.hashCode(polyLats);
+    return result;
+  }
+
+  @Override
+  public String toString(String field) {
+    final StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getSimpleName());
+    sb.append(':');
+    if (this.field.equals(field) == false) {
+      sb.append(" field=");
+      sb.append(this.field);
+      sb.append(':');
+    }
+    sb.append(" Points: ");
+    for (int i=0; i<polyLons.length; ++i) {
+      sb.append("[")
+        .append(polyLons[i])
+        .append(", ")
+        .append(polyLats[i])
+        .append("] ");
+    }
+    sb.append(ToStringUtils.boost(getBoost()));
+    return sb.toString();
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesConsumer.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesConsumer.java
new file mode 100644
index 0000000..aa1bf20
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesConsumer.java
@@ -0,0 +1,133 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.DocValuesConsumer;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+
+class BKDTreeDocValuesConsumer extends DocValuesConsumer implements Closeable {
+  final DocValuesConsumer delegate;
+  final int maxPointsInLeafNode;
+  final int maxPointsSortInHeap;
+  final IndexOutput out;
+  final Map<Integer,Long> fieldIndexFPs = new HashMap<>();
+  final SegmentWriteState state;
+
+  public BKDTreeDocValuesConsumer(DocValuesConsumer delegate, SegmentWriteState state, int maxPointsInLeafNode, int maxPointsSortInHeap) throws IOException {
+    BKDTreeWriter.verifyParams(maxPointsInLeafNode, maxPointsSortInHeap);
+    this.delegate = delegate;
+    this.maxPointsInLeafNode = maxPointsInLeafNode;
+    this.maxPointsSortInHeap = maxPointsSortInHeap;
+    this.state = state;
+    String datFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BKDTreeDocValuesFormat.DATA_EXTENSION);
+    out = state.directory.createOutput(datFileName, state.context);
+    CodecUtil.writeIndexHeader(out, BKDTreeDocValuesFormat.DATA_CODEC_NAME, BKDTreeDocValuesFormat.DATA_VERSION_CURRENT,
+                               state.segmentInfo.getId(), state.segmentSuffix);
+  }
+
+  @Override
+  public void close() throws IOException {
+    boolean success = false;
+    try {
+      CodecUtil.writeFooter(out);
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(delegate, out);
+      } else {
+        IOUtils.closeWhileHandlingException(delegate, out);
+      }
+    }
+    
+    String metaFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BKDTreeDocValuesFormat.META_EXTENSION);
+    IndexOutput metaOut = state.directory.createOutput(metaFileName, state.context);
+    success = false;
+    try {
+      CodecUtil.writeIndexHeader(metaOut, BKDTreeDocValuesFormat.META_CODEC_NAME, BKDTreeDocValuesFormat.META_VERSION_CURRENT,
+                                 state.segmentInfo.getId(), state.segmentSuffix);
+      metaOut.writeVInt(fieldIndexFPs.size());
+      for(Map.Entry<Integer,Long> ent : fieldIndexFPs.entrySet()) {       
+        metaOut.writeVInt(ent.getKey());
+        metaOut.writeVLong(ent.getValue());
+      }
+      CodecUtil.writeFooter(metaOut);
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(metaOut);
+      } else {
+        IOUtils.closeWhileHandlingException(metaOut);
+      }
+    }
+  }
+
+  @Override
+  public void addSortedNumericField(FieldInfo field, Iterable<Number> docToValueCount, Iterable<Number> values) throws IOException {
+    delegate.addSortedNumericField(field, docToValueCount, values);
+    BKDTreeWriter writer = new BKDTreeWriter(maxPointsInLeafNode, maxPointsSortInHeap);
+    Iterator<Number> valueIt = values.iterator();
+    Iterator<Number> valueCountIt = docToValueCount.iterator();
+    for (int docID=0;docID<state.segmentInfo.maxDoc();docID++) {
+      assert valueCountIt.hasNext();
+      int count = valueCountIt.next().intValue();
+      for(int i=0;i<count;i++) {
+        assert valueIt.hasNext();
+        long value = valueIt.next().longValue();
+        int latEnc = (int) (value >> 32);
+        int lonEnc = (int) (value & 0xffffffff);
+        writer.add(latEnc, lonEnc, docID);
+      }
+    }
+
+    long indexStartFP = writer.finish(out);
+
+    fieldIndexFPs.put(field.number, indexStartFP);
+  }
+
+  @Override
+  public void addNumericField(FieldInfo field, Iterable<Number> values) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addBinaryField(FieldInfo field, Iterable<BytesRef> values) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addSortedField(FieldInfo field, Iterable<BytesRef> values, Iterable<Number> docToOrd) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addSortedSetField(FieldInfo field, Iterable<BytesRef> values, Iterable<Number> docToOrdCount, Iterable<Number> ords) {
+    throw new UnsupportedOperationException();
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesFormat.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesFormat.java
new file mode 100644
index 0000000..6333716
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesFormat.java
@@ -0,0 +1,109 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.codecs.DocValuesConsumer;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.DocValuesProducer;
+import org.apache.lucene.codecs.lucene50.Lucene50DocValuesFormat;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+
+/**
+ * A {@link DocValuesFormat} to efficiently index geo-spatial lat/lon points
+ * from {@link BKDPointField} for fast bounding-box ({@link BKDPointInBBoxQuery})
+ * and polygon ({@link BKDPointInPolygonQuery}) queries.
+ *
+ * <p>This wraps {@link Lucene50DocValuesFormat}, but saves its own BKD tree
+ * structures to disk for fast query-time intersection. See <a
+ * href="https://www.cs.duke.edu/~pankaj/publications/papers/bkd-sstd.pdf">this paper</a>
+ * for details.
+ *
+ * <p>The BKD tree slices up 2D (lat/lon) space into smaller and
+ * smaller rectangles, until the smallest rectangles have approximately
+ * between X/2 and X (X default is 1024) points in them, at which point
+ * such leaf cells are written as a block to disk, while the index tree
+ * structure recording how space was sub-divided is loaded into HEAP
+ * at search time.  At search time, the tree is recursed based on whether
+ * each of left or right child overlap with the query shape, and once
+ * a leaf block is reached, all documents in that leaf block are collected
+ * if the cell is fully enclosed by the query shape, or filtered and then
+ * collected, if not.
+ *
+ * <p>The index is also quite compact, because docs only appear once in
+ * the tree (no "prefix terms").
+ *
+ * <p>In addition to the files written by {@link Lucene50DocValuesFormat}, this format writes:
+ * <ol>
+ *   <li><tt>.kdd</tt>: BKD leaf data and index</li>
+ *   <li><tt>.kdm</tt>: BKD metadata</li>
+ * </ol>
+ *
+ * <p>The disk format is experimental and free to change suddenly, and this code likely has new and exciting bugs!
+ *
+ * @lucene.experimental */
+
+public class BKDTreeDocValuesFormat extends DocValuesFormat {
+
+  static final String DATA_CODEC_NAME = "BKDData";
+  static final int DATA_VERSION_START = 0;
+  static final int DATA_VERSION_CURRENT = DATA_VERSION_START;
+  static final String DATA_EXTENSION = "kdd";
+
+  static final String META_CODEC_NAME = "BKDMeta";
+  static final int META_VERSION_START = 0;
+  static final int META_VERSION_CURRENT = META_VERSION_START;
+  static final String META_EXTENSION = "kdm";
+
+  private final int maxPointsInLeafNode;
+  private final int maxPointsSortInHeap;
+  
+  private final DocValuesFormat delegate = new Lucene50DocValuesFormat();
+
+  /** Default constructor */
+  public BKDTreeDocValuesFormat() {
+    this(BKDTreeWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE, BKDTreeWriter.DEFAULT_MAX_POINTS_SORT_IN_HEAP);
+  }
+
+  /** Creates this with custom configuration.
+   *
+   * @param maxPointsInLeafNode Maximum number of points in each leaf cell.  Smaller values create a deeper tree with larger in-heap index and possibly
+   *    faster searching.  The default is 1024.
+   * @param maxPointsSortInHeap Maximum number of points where in-heap sort can be used.  When the number of points exceeds this, a (slower)
+   *    offline sort is used.  The default is 128 * 1024.
+   *
+   * @lucene.experimental */
+  public BKDTreeDocValuesFormat(int maxPointsInLeafNode, int maxPointsSortInHeap) {
+    super("BKDTree");
+    BKDTreeWriter.verifyParams(maxPointsInLeafNode, maxPointsSortInHeap);
+    this.maxPointsInLeafNode = maxPointsInLeafNode;
+    this.maxPointsSortInHeap = maxPointsSortInHeap;
+  }
+
+  @Override
+  public DocValuesConsumer fieldsConsumer(final SegmentWriteState state) throws IOException {
+    return new BKDTreeDocValuesConsumer(delegate.fieldsConsumer(state), state, maxPointsInLeafNode, maxPointsSortInHeap);
+  }
+
+  @Override
+  public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException {
+    return new BKDTreeDocValuesProducer(delegate.fieldsProducer(state), state);
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesProducer.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesProducer.java
new file mode 100644
index 0000000..0283a72
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeDocValuesProducer.java
@@ -0,0 +1,169 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.DocValuesProducer;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.store.ChecksumIndexInput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.Accountables;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.RamUsageEstimator;
+
+class BKDTreeDocValuesProducer extends DocValuesProducer {
+
+  private final Map<String,BKDTreeReader> treeReaders = new HashMap<>();
+  private final Map<Integer,Long> fieldToIndexFPs = new HashMap<>();
+
+  private final IndexInput datIn;
+  private final AtomicLong ramBytesUsed;
+  private final int maxDoc;
+  private final DocValuesProducer delegate;
+  private final boolean merging;
+
+  public BKDTreeDocValuesProducer(DocValuesProducer delegate, SegmentReadState state) throws IOException {
+    String metaFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BKDTreeDocValuesFormat.META_EXTENSION);
+    ChecksumIndexInput metaIn = state.directory.openChecksumInput(metaFileName, state.context);
+    CodecUtil.checkIndexHeader(metaIn, BKDTreeDocValuesFormat.META_CODEC_NAME, BKDTreeDocValuesFormat.META_VERSION_START, BKDTreeDocValuesFormat.META_VERSION_CURRENT,
+                               state.segmentInfo.getId(), state.segmentSuffix);
+    int fieldCount = metaIn.readVInt();
+    for(int i=0;i<fieldCount;i++) {
+      int fieldNumber = metaIn.readVInt();
+      long indexFP = metaIn.readVLong();
+      fieldToIndexFPs.put(fieldNumber, indexFP);
+    }
+    CodecUtil.checkFooter(metaIn);
+    metaIn.close();
+
+    String datFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BKDTreeDocValuesFormat.DATA_EXTENSION);
+    datIn = state.directory.openInput(datFileName, state.context);
+    CodecUtil.checkIndexHeader(datIn, BKDTreeDocValuesFormat.DATA_CODEC_NAME, BKDTreeDocValuesFormat.DATA_VERSION_START, BKDTreeDocValuesFormat.DATA_VERSION_CURRENT,
+                               state.segmentInfo.getId(), state.segmentSuffix);
+    ramBytesUsed = new AtomicLong(RamUsageEstimator.shallowSizeOfInstance(getClass()));
+    maxDoc = state.segmentInfo.maxDoc();
+    this.delegate = delegate;
+    merging = false;
+  }
+
+  // clone for merge: we don't hang onto the BKDTrees we load
+  BKDTreeDocValuesProducer(BKDTreeDocValuesProducer orig) throws IOException {
+    assert Thread.holdsLock(orig);
+    datIn = orig.datIn.clone();
+    ramBytesUsed = new AtomicLong(orig.ramBytesUsed.get());
+    delegate = orig.delegate.getMergeInstance();
+    fieldToIndexFPs.putAll(orig.fieldToIndexFPs);
+    treeReaders.putAll(orig.treeReaders);
+    merging = true;
+    maxDoc = orig.maxDoc;
+  }
+
+  @Override
+  public synchronized SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
+    BKDTreeReader treeReader = treeReaders.get(field.name);
+    if (treeReader == null) {
+      // Lazy load
+      Long fp = fieldToIndexFPs.get(field.number);
+      if (fp == null) {
+        throw new IllegalArgumentException("this field was not indexed as a BKDPointField");
+      }
+      datIn.seek(fp);
+      treeReader = new BKDTreeReader(datIn, maxDoc);
+
+      // Only hang onto the reader when we are not merging:
+      if (merging == false) {
+        treeReaders.put(field.name, treeReader);
+        ramBytesUsed.addAndGet(treeReader.ramBytesUsed());
+      }
+    }
+
+    return new BKDTreeSortedNumericDocValues(treeReader, delegate.getSortedNumeric(field));
+  }
+
+  @Override
+  public void close() throws IOException {
+    IOUtils.close(datIn, delegate);
+  }
+
+  @Override
+  public void checkIntegrity() throws IOException {
+    CodecUtil.checksumEntireFile(datIn);
+  }
+
+  @Override
+  public NumericDocValues getNumeric(FieldInfo field) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public BinaryDocValues getBinary(FieldInfo field) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public SortedDocValues getSorted(FieldInfo field) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public SortedSetDocValues getSortedSet(FieldInfo field) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Bits getDocsWithField(FieldInfo field) throws IOException {
+    return delegate.getDocsWithField(field);
+  }
+
+  @Override
+  public synchronized Collection<Accountable> getChildResources() {
+    List<Accountable> resources = new ArrayList<>();
+    for(Map.Entry<String,BKDTreeReader> ent : treeReaders.entrySet()) {
+      resources.add(Accountables.namedAccountable("field " + ent.getKey(), ent.getValue()));
+    }
+
+    return resources;
+  }
+
+  @Override
+  public synchronized DocValuesProducer getMergeInstance() throws IOException {
+    return new BKDTreeDocValuesProducer(this);
+  }
+
+  @Override
+  public long ramBytesUsed() {
+    return ramBytesUsed.get();
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeReader.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeReader.java
new file mode 100644
index 0000000..59f9472
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeReader.java
@@ -0,0 +1,385 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/** Handles intersection of a shape with a BKD tree previously written with {@link BKDTreeWriter}.
+ *
+ * @lucene.experimental */
+
+final class BKDTreeReader implements Accountable {
+  final private int[] splitValues; 
+  final private int leafNodeOffset;
+  final private long[] leafBlockFPs;
+  final int maxDoc;
+  final IndexInput in;
+
+  interface LatLonFilter {
+    boolean accept(double lat, double lon);
+  }
+
+  public BKDTreeReader(IndexInput in, int maxDoc) throws IOException {
+
+    // Read index:
+    int numLeaves = in.readVInt();
+    leafNodeOffset = numLeaves;
+
+    // Tree is fully balanced binary tree, so number of nodes = numLeaves-1, except our nodeIDs are 1-based (splitValues[0] is unused):
+    splitValues = new int[numLeaves];
+    for(int i=0;i<numLeaves;i++) {
+      splitValues[i] = in.readInt();
+    }
+    leafBlockFPs = new long[numLeaves];
+    for(int i=0;i<numLeaves;i++) {
+      leafBlockFPs[i] = in.readVLong();
+    }
+
+    this.maxDoc = maxDoc;
+    this.in = in;
+  }
+
+  private static final class QueryState {
+    final IndexInput in;
+    byte[] scratch = new byte[16];
+    final ByteArrayDataInput scratchReader = new ByteArrayDataInput(scratch);
+    final FixedBitSet bits;
+    final int latMinEnc;
+    final int latMaxEnc;
+    final int lonMinEnc;
+    final int lonMaxEnc;
+    final LatLonFilter latLonFilter;
+    final SortedNumericDocValues sndv;
+
+    public QueryState(IndexInput in, int maxDoc,
+                      int latMinEnc, int latMaxEnc,
+                      int lonMinEnc, int lonMaxEnc,
+                      LatLonFilter latLonFilter,
+                      SortedNumericDocValues sndv) {
+      this.in = in;
+      this.bits = new FixedBitSet(maxDoc);
+      this.latMinEnc = latMinEnc;
+      this.latMaxEnc = latMaxEnc;
+      this.lonMinEnc = lonMinEnc;
+      this.lonMaxEnc = lonMaxEnc;
+      this.latLonFilter = latLonFilter;
+      this.sndv = sndv;
+    }
+  }
+
+  public DocIdSet intersect(Bits acceptDocs, double latMin, double latMax, double lonMin, double lonMax, SortedNumericDocValues sndv) throws IOException {
+    return intersect(acceptDocs, latMin, latMax, lonMin, lonMax, null, sndv);
+  }
+
+  public DocIdSet intersect(Bits acceptDocs, double latMin, double latMax, double lonMin, double lonMax, LatLonFilter filter, SortedNumericDocValues sndv) throws IOException {
+    if (BKDTreeWriter.validLat(latMin) == false) {
+      throw new IllegalArgumentException("invalid latMin: " + latMin);
+    }
+    if (BKDTreeWriter.validLat(latMax) == false) {
+      throw new IllegalArgumentException("invalid latMax: " + latMax);
+    }
+    if (BKDTreeWriter.validLon(lonMin) == false) {
+      throw new IllegalArgumentException("invalid lonMin: " + lonMin);
+    }
+    if (BKDTreeWriter.validLon(lonMax) == false) {
+      throw new IllegalArgumentException("invalid lonMax: " + lonMax);
+    }
+
+    int latMinEnc = BKDTreeWriter.encodeLat(latMin);
+    int latMaxEnc = BKDTreeWriter.encodeLat(latMax);
+    int lonMinEnc = BKDTreeWriter.encodeLon(lonMin);
+    int lonMaxEnc = BKDTreeWriter.encodeLon(lonMax);
+
+    // TODO: we should use a sparse bit collector here, but BitDocIdSet.Builder is 2.4X slower than straight FixedBitSet.
+    // Maybe we should use simple int[] (not de-duping) up until size X, then cutover.  Or maybe SentinelIntSet when it's
+    // small.
+
+    QueryState state = new QueryState(in.clone(), maxDoc,
+                                      latMinEnc, latMaxEnc,
+                                      lonMinEnc, lonMaxEnc,
+                                      filter,
+                                      sndv);
+
+    int hitCount = intersect(acceptDocs, state, 1,
+                             BKDTreeWriter.encodeLat(-90.0),
+                             BKDTreeWriter.encodeLat(Math.nextAfter(90.0, Double.POSITIVE_INFINITY)),
+                             BKDTreeWriter.encodeLon(-180.0),
+                             BKDTreeWriter.encodeLon(Math.nextAfter(180.0, Double.POSITIVE_INFINITY)));
+
+    // NOTE: hitCount is an over-estimate in the multi-valued case:
+    return new BitDocIdSet(state.bits, hitCount);
+  }
+
+  /** Fast path: this is called when the query rect fully encompasses all cells under this node. */
+  private int addAll(Bits acceptDocs, QueryState state, int nodeID) throws IOException {
+    if (nodeID >= leafNodeOffset) {
+      // Leaf node
+      long fp = leafBlockFPs[nodeID-leafNodeOffset];
+      //System.out.println("    leaf nodeID=" + nodeID + " vs leafNodeOffset=" + leafNodeOffset + " fp=" + fp);
+      if (fp == 0) {
+        // Dead end node (adversary case):
+        return 0;
+      }
+      //IndexInput in = leafDISI.in;
+      state.in.seek(fp);
+      //allLeafDISI.reset(fp);
+      
+      //System.out.println("    seek to leafFP=" + fp);
+      // How many points are stored in this leaf cell:
+      int count = state.in.readVInt();
+      if (state.latLonFilter != null) {
+        // Handle this differently since we must also look up lat/lon:
+
+        int hitCount = 0;
+        for(int i=0;i<count;i++) {
+
+          int docID = state.in.readInt();
+          
+          if (acceptDocs == null || acceptDocs.get(docID)) {
+
+            state.sndv.setDocument(docID);
+
+            // How many values this doc has:
+            int docValueCount = state.sndv.count();
+            for(int j=0;j<docValueCount;j++) {
+              long enc = state.sndv.valueAt(j);
+              int latEnc = (int) ((enc>>32) & 0xffffffffL);
+              int lonEnc = (int) (enc & 0xffffffffL);
+
+              // TODO: maybe we can fix LatLonFilter to operate on encoded forms?
+              if (state.latLonFilter.accept(BKDTreeWriter.decodeLat(latEnc), BKDTreeWriter.decodeLon(lonEnc))) {
+                state.bits.set(docID);
+                hitCount++;
+
+                // Stop processing values for this doc since it's now accepted:
+                break;
+              }
+            }
+          }
+        }
+
+        return hitCount;
+
+      } else if (acceptDocs != null) {
+        for(int i=0;i<count;i++) {
+          int docID = state.in.readInt();
+          if (acceptDocs.get(docID)) {
+            state.bits.set(docID);
+          }
+        }
+      } else {
+        for(int i=0;i<count;i++) {
+          int docID = state.in.readInt();
+          state.bits.set(docID);
+        }
+      }
+
+      //bits.or(allLeafDISI);
+      //return allLeafDISI.getHitCount();
+      return count;
+    } else {
+      int splitValue = splitValues[nodeID];
+
+      if (splitValue == Integer.MAX_VALUE) {
+        // Dead end node (adversary case):
+        return 0;
+      }
+
+      //System.out.println("  splitValue=" + splitValue);
+
+      //System.out.println("  addAll: inner");
+      int count = 0;
+      count += addAll(acceptDocs, state, 2*nodeID);
+      count += addAll(acceptDocs, state, 2*nodeID+1);
+      //System.out.println("  addAll: return count=" + count);
+      return count;
+    }
+  }
+
+  private int intersect(Bits acceptDocs, QueryState state,
+                        int nodeID,
+                        int cellLatMinEnc, int cellLatMaxEnc, int cellLonMinEnc, int cellLonMaxEnc)
+    throws IOException {
+
+    // 2.06 sec -> 1.52 sec for 225 OSM London queries:
+    if (state.latMinEnc <= cellLatMinEnc && state.latMaxEnc >= cellLatMaxEnc && state.lonMinEnc <= cellLonMinEnc && state.lonMaxEnc >= cellLonMaxEnc) {
+      // Optimize the case when the query fully contains this cell: we can
+      // recursively add all points without checking if they match the query:
+
+      /*
+      System.out.println("A: " + BKDTreeWriter.decodeLat(cellLatMinEnc)
+                         + " " + BKDTreeWriter.decodeLat(cellLatMaxEnc)
+                         + " " + BKDTreeWriter.decodeLon(cellLonMinEnc)
+                         + " " + BKDTreeWriter.decodeLon(cellLonMaxEnc));
+      */
+
+      return addAll(acceptDocs, state, nodeID);
+    }
+
+    long latRange = (long) cellLatMaxEnc - (long) cellLatMinEnc;
+    long lonRange = (long) cellLonMaxEnc - (long) cellLonMinEnc;
+
+    int dim;
+    if (latRange >= lonRange) {
+      dim = 0;
+    } else {
+      dim = 1;
+    }
+
+    //System.out.println("\nintersect node=" + nodeID + " vs " + leafNodeOffset);
+
+    if (nodeID >= leafNodeOffset) {
+      // Leaf node; scan and filter all points in this block:
+      //System.out.println("    intersect leaf nodeID=" + nodeID + " vs leafNodeOffset=" + leafNodeOffset + " fp=" + leafBlockFPs[nodeID-leafNodeOffset]);
+      int hitCount = 0;
+
+      //IndexInput in = leafDISI.in;
+      long fp = leafBlockFPs[nodeID-leafNodeOffset];
+      if (fp == 0) {
+        // Dead end node (adversary case):
+        //System.out.println("    dead-end leaf");
+        return 0;
+      }
+
+      /*
+      System.out.println("I: " + BKDTreeWriter.decodeLat(cellLatMinEnc)
+                         + " " + BKDTreeWriter.decodeLat(cellLatMaxEnc)
+                         + " " + BKDTreeWriter.decodeLon(cellLonMinEnc)
+                         + " " + BKDTreeWriter.decodeLon(cellLonMaxEnc));
+      */
+
+      state.in.seek(fp);
+
+      // How many points are stored in this leaf cell:
+      int count = state.in.readVInt();
+
+      for(int i=0;i<count;i++) {
+        int docID = state.in.readInt();
+        if (acceptDocs == null || acceptDocs.get(docID)) {
+          state.sndv.setDocument(docID);
+          // How many values this doc has:
+          int docValueCount = state.sndv.count();
+          for(int j=0;j<docValueCount;j++) {
+            long enc = state.sndv.valueAt(j);
+
+            int latEnc = (int) ((enc>>32) & 0xffffffffL);
+            int lonEnc = (int) (enc & 0xffffffffL);
+
+            if (latEnc >= state.latMinEnc &&
+                latEnc < state.latMaxEnc &&
+                lonEnc >= state.lonMinEnc &&
+                lonEnc < state.lonMaxEnc &&
+                (state.latLonFilter == null ||
+                 state.latLonFilter.accept(BKDTreeWriter.decodeLat(latEnc), BKDTreeWriter.decodeLon(lonEnc)))) {
+              state.bits.set(docID);
+              hitCount++;
+
+              // Stop processing values for this doc:
+              break;
+            }
+          }
+        }
+      }
+
+      return hitCount;
+
+      // this (using BitDocIdSet.Builder) is 3.4X slower!
+      /*
+      //bits.or(leafDISI);
+      //return leafDISI.getHitCount();
+      */
+
+    } else {
+
+      int splitValue = splitValues[nodeID];
+
+      if (splitValue == Integer.MAX_VALUE) {
+        // Dead end node (adversary case):
+        //System.out.println("    dead-end sub-tree");
+        return 0;
+      }
+
+      //System.out.println("  splitValue=" + splitValue);
+
+      int count = 0;
+
+      if (dim == 0) {
+
+        //System.out.println("  split on lat=" + splitValue);
+
+        // Inner node split on lat:
+
+        // Left node:
+        if (state.latMinEnc < splitValue) {
+          //System.out.println("  recurse left");
+          count += intersect(acceptDocs, state,
+                             2*nodeID,
+                             cellLatMinEnc, splitValue, cellLonMinEnc, cellLonMaxEnc);
+        }
+
+        // Right node:
+        if (state.latMaxEnc >= splitValue) {
+          //System.out.println("  recurse right");
+          count += intersect(acceptDocs, state,
+                             2*nodeID+1,
+                             splitValue, cellLatMaxEnc, cellLonMinEnc, cellLonMaxEnc);
+        }
+
+      } else {
+        // Inner node split on lon:
+        assert dim == 1;
+
+        // System.out.println("  split on lon=" + splitValue);
+
+        // Left node:
+        if (state.lonMinEnc < splitValue) {
+          // System.out.println("  recurse left");
+          count += intersect(acceptDocs, state,
+                             2*nodeID,
+                             cellLatMinEnc, cellLatMaxEnc, cellLonMinEnc, splitValue);
+        }
+
+        // Right node:
+        if (state.lonMaxEnc >= splitValue) {
+          // System.out.println("  recurse right");
+          count += intersect(acceptDocs, state,
+                             2*nodeID+1,
+                             cellLatMinEnc, cellLatMaxEnc, splitValue, cellLonMaxEnc);
+        }
+      }
+
+      return count;
+    }
+  }
+
+  @Override
+  public long ramBytesUsed() {
+    return splitValues.length * RamUsageEstimator.NUM_BYTES_INT + 
+      leafBlockFPs.length * RamUsageEstimator.NUM_BYTES_LONG;
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeSortedNumericDocValues.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeSortedNumericDocValues.java
new file mode 100644
index 0000000..1a2c179
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeSortedNumericDocValues.java
@@ -0,0 +1,49 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.SortedNumericDocValues;
+
+class BKDTreeSortedNumericDocValues extends SortedNumericDocValues {
+  final BKDTreeReader bkdTreeReader;
+  final SortedNumericDocValues delegate;
+
+  public BKDTreeSortedNumericDocValues(BKDTreeReader bkdTreeReader, SortedNumericDocValues delegate) {
+    this.bkdTreeReader = bkdTreeReader;
+    this.delegate = delegate;
+  }
+
+  public BKDTreeReader getBKDTreeReader() {
+    return bkdTreeReader;
+  }
+
+  @Override
+  public void setDocument(int doc) {
+    delegate.setDocument(doc);
+  }
+
+  @Override
+  public long valueAt(int index) {
+    return delegate.valueAt(index);
+  }
+
+  @Override
+  public int count() {
+    return delegate.count();
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeWriter.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeWriter.java
new file mode 100644
index 0000000..2d02ee9
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/BKDTreeWriter.java
@@ -0,0 +1,897 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.InPlaceMergeSorter;
+import org.apache.lucene.util.LongBitSet;
+import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
+import org.apache.lucene.util.OfflineSorter;
+import org.apache.lucene.util.RamUsageEstimator;
+
+// TODO
+//   - could we just "use postings" to map leaf -> docIDs?
+//   - the polygon query really should be 2-phase
+//   - if we could merge trees, we could drop delegating to wrapped DV?
+//   - we could also index "auto-prefix terms" here, and use better compression, and maybe only use for the "fully contained" case so we'd
+//     only index docIDs
+//   - the index could be efficiently encoded as an FST, so we don't have wasteful
+//     (monotonic) long[] leafBlockFPs; or we could use MonotonicLongValues ... but then
+//     the index is already plenty small: 60M OSM points --> 1.1 MB with 128 points
+//     per leaf, and you can reduce that by putting more points per leaf
+//   - we can quantize the split values to 2 bytes (short): http://people.csail.mit.edu/tmertens/papers/qkdtree.pdf
+//   - we could use threads while building; the higher nodes are very parallelizable
+//   - generalize to N dimenions? i think there are reasonable use cases here, e.g.
+//     2 dimensional points to store houses, plus e.g. 3rd dimension for "household income"
+//   - geo3d integration should be straightforward?  better accuracy, faster performance for small-poly-with-bbox cases?  right now the poly
+//     check is very costly...
+
+/** Recursively builds a BKD tree to assign all incoming points to smaller
+ *  and smaller rectangles until the number of points in a given
+ *  rectangle is &lt= the <code>maxPointsInLeafNode</code>.  The tree is
+ *  fully balanced, which means the leaf nodes will have between 50% and 100% of
+ *  the requested <code>maxPointsInLeafNode</code>, except for the adversarial case
+ *  of indexing exactly the same point many times.
+ *
+ *  <p>
+ *  See <a href="https://www.cs.duke.edu/~pankaj/publications/papers/bkd-sstd.pdf">this paper</a> for details.
+ *
+ *  <p>This consumes heap during writing: it allocates a <code>LongBitSet(numPoints)</code>, 
+ *  and for any nodes with fewer than <code>maxPointsSortInHeap</code>, it holds
+ *  the points in memory as simple java arrays.
+ *
+ *  <p>
+ *  <b>NOTE</b>: This can write at most Integer.MAX_VALUE * <code>maxPointsInLeafNode</code> total points.
+ *
+ * @lucene.experimental */
+
+class BKDTreeWriter {
+
+  // latEnc (int) + lonEnc (int) + ord (long) + docID (int)
+  static final int BYTES_PER_DOC = RamUsageEstimator.NUM_BYTES_LONG + 3 * RamUsageEstimator.NUM_BYTES_INT;
+
+  //static final boolean DEBUG = false;
+
+  public static final int DEFAULT_MAX_POINTS_IN_LEAF_NODE = 1024;
+
+  /** This works out to max of ~10 MB peak heap tied up during writing: */
+  public static final int DEFAULT_MAX_POINTS_SORT_IN_HEAP = 128*1024;;
+
+  private final byte[] scratchBytes = new byte[BYTES_PER_DOC];
+  private final ByteArrayDataOutput scratchBytesOutput = new ByteArrayDataOutput(scratchBytes);
+
+  private OfflineSorter.ByteSequencesWriter writer;
+  private GrowingHeapLatLonWriter heapWriter;
+
+  private Path tempInput;
+  private Path tempDir;
+  private final int maxPointsInLeafNode;
+  private final int maxPointsSortInHeap;
+
+  private long pointCount;
+
+  public BKDTreeWriter() throws IOException {
+    this(DEFAULT_MAX_POINTS_IN_LEAF_NODE, DEFAULT_MAX_POINTS_SORT_IN_HEAP);
+  }
+
+  // TODO: instead of maxPointsSortInHeap, change to maxMBHeap ... the mapping is non-obvious:
+  public BKDTreeWriter(int maxPointsInLeafNode, int maxPointsSortInHeap) throws IOException {
+    verifyParams(maxPointsInLeafNode, maxPointsSortInHeap);
+    this.maxPointsInLeafNode = maxPointsInLeafNode;
+    this.maxPointsSortInHeap = maxPointsSortInHeap;
+
+    // We write first maxPointsSortInHeap in heap, then cutover to offline for additional points:
+    heapWriter = new GrowingHeapLatLonWriter(maxPointsSortInHeap);
+  }
+
+  public static void verifyParams(int maxPointsInLeafNode, int maxPointsSortInHeap) {
+    if (maxPointsInLeafNode <= 0) {
+      throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
+    }
+    if (maxPointsInLeafNode > ArrayUtil.MAX_ARRAY_LENGTH) {
+      throw new IllegalArgumentException("maxPointsInLeafNode must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsInLeafNode);
+    }
+    if (maxPointsSortInHeap < maxPointsInLeafNode) {
+      throw new IllegalArgumentException("maxPointsSortInHeap must be >= maxPointsInLeafNode; got " + maxPointsSortInHeap + " vs maxPointsInLeafNode="+ maxPointsInLeafNode);
+    }
+    if (maxPointsSortInHeap > ArrayUtil.MAX_ARRAY_LENGTH) {
+      throw new IllegalArgumentException("maxPointsSortInHeap must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsSortInHeap);
+    }
+  }
+
+  public void add(double lat, double lon, int docID) throws IOException {
+
+    if (validLat(lat) == false) {
+      throw new IllegalArgumentException("invalid lat: " + lat);
+    }
+    if (validLon(lon) == false) {
+      throw new IllegalArgumentException("invalid lon: " + lon);
+    }
+
+    // Quantize to 32 bit precision, which is plenty: ~.0093 meter precision (longitude) at the equator
+    add(encodeLat(lat), encodeLon(lon), docID);
+  }
+
+  /** If the current segment has too many points then we switchover to temp files / offline sort. */
+  private void switchToOffline() throws IOException {
+
+    // OfflineSorter isn't thread safe, but our own private tempDir works around this:
+    tempDir = Files.createTempDirectory(OfflineSorter.defaultTempDir(), BKDTreeWriter.class.getSimpleName());
+
+    // For each .add we just append to this input file, then in .finish we sort this input and resursively build the tree:
+    tempInput = tempDir.resolve("in");
+    writer = new OfflineSorter.ByteSequencesWriter(tempInput);
+    for(int i=0;i<pointCount;i++) {
+      scratchBytesOutput.reset(scratchBytes);
+      scratchBytesOutput.writeInt(heapWriter.latEncs[i]);
+      scratchBytesOutput.writeInt(heapWriter.lonEncs[i]);
+      scratchBytesOutput.writeVInt(heapWriter.docIDs[i]);
+      scratchBytesOutput.writeVLong(i);
+      // TODO: can/should OfflineSorter optimize the fixed-width case?
+      writer.write(scratchBytes, 0, scratchBytes.length);
+    }
+
+    heapWriter = null;
+  }
+
+  void add(int latEnc, int lonEnc, int docID) throws IOException {
+    assert latEnc > Integer.MIN_VALUE;
+    assert latEnc < Integer.MAX_VALUE;
+    assert lonEnc > Integer.MIN_VALUE;
+    assert lonEnc < Integer.MAX_VALUE;
+
+    if (pointCount >= maxPointsSortInHeap) {
+      if (writer == null) {
+        switchToOffline();
+      }
+      scratchBytesOutput.reset(scratchBytes);
+      scratchBytesOutput.writeInt(latEnc);
+      scratchBytesOutput.writeInt(lonEnc);
+      scratchBytesOutput.writeVInt(docID);
+      scratchBytesOutput.writeVLong(pointCount);
+      writer.write(scratchBytes, 0, scratchBytes.length);
+    } else {
+      // Not too many points added yet, continue using heap:
+      heapWriter.append(latEnc, lonEnc, pointCount, docID);
+    }
+
+    pointCount++;
+  }
+
+  /** Changes incoming {@link ByteSequencesWriter} file to to fixed-width-per-entry file, because we need to be able to slice
+   *  as we recurse in {@link #build}. */
+  private LatLonWriter convertToFixedWidth(Path in) throws IOException {
+    BytesRefBuilder scratch = new BytesRefBuilder();
+    scratch.grow(BYTES_PER_DOC);
+    BytesRef bytes = scratch.get();
+    ByteArrayDataInput dataReader = new ByteArrayDataInput();
+
+    OfflineSorter.ByteSequencesReader reader = null;
+    LatLonWriter sortedWriter = null;
+    boolean success = false;
+    try {
+      reader = new OfflineSorter.ByteSequencesReader(in);
+      sortedWriter = getWriter(pointCount);
+      for (long i=0;i<pointCount;i++) {
+        boolean result = reader.read(scratch);
+        assert result;
+        dataReader.reset(bytes.bytes, bytes.offset, bytes.length);
+        int latEnc = dataReader.readInt();
+        int lonEnc = dataReader.readInt();
+        int docID = dataReader.readVInt();
+        long ord = dataReader.readVLong();
+        assert docID >= 0: "docID=" + docID;
+        assert latEnc > Integer.MIN_VALUE;
+        assert latEnc < Integer.MAX_VALUE;
+        assert lonEnc > Integer.MIN_VALUE;
+        assert lonEnc < Integer.MAX_VALUE;
+        sortedWriter.append(latEnc, lonEnc, ord, docID);
+      }
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(sortedWriter, reader);
+      } else {
+        IOUtils.closeWhileHandlingException(reader);
+        try {
+          sortedWriter.destroy();
+        } catch (Throwable t) {
+          // Suppress to keep throwing original exc
+        }
+      }
+    }
+
+    return sortedWriter;
+  }
+
+  private LatLonWriter sort(boolean lon) throws IOException {
+    if (heapWriter != null) {
+
+      assert pointCount < Integer.MAX_VALUE;
+
+      // All buffered points are still in heap
+      new InPlaceMergeSorter() {
+        @Override
+        protected void swap(int i, int j) {
+          int docID = heapWriter.docIDs[i];
+          heapWriter.docIDs[i] = heapWriter.docIDs[j];
+          heapWriter.docIDs[j] = docID;
+
+          long ord = heapWriter.ords[i];
+          heapWriter.ords[i] = heapWriter.ords[j];
+          heapWriter.ords[j] = ord;
+
+          int latEnc = heapWriter.latEncs[i];
+          heapWriter.latEncs[i] = heapWriter.latEncs[j];
+          heapWriter.latEncs[j] = latEnc;
+
+          int lonEnc = heapWriter.lonEncs[i];
+          heapWriter.lonEncs[i] = heapWriter.lonEncs[j];
+          heapWriter.lonEncs[j] = lonEnc;
+        }
+
+        @Override
+        protected int compare(int i, int j) {
+          int cmp;
+          if (lon) {
+            cmp = Integer.compare(heapWriter.lonEncs[i], heapWriter.lonEncs[j]);
+          } else {
+            cmp = Integer.compare(heapWriter.latEncs[i], heapWriter.latEncs[j]);
+          }
+          if (cmp != 0) {
+            return cmp;
+          }
+
+          // Tie-break
+          cmp = Integer.compare(heapWriter.docIDs[i], heapWriter.docIDs[j]);
+          if (cmp != 0) {
+            return cmp;
+          }
+
+          return Long.compare(heapWriter.ords[i], heapWriter.ords[j]);
+        }
+      }.sort(0, (int) pointCount);
+
+      HeapLatLonWriter sorted = new HeapLatLonWriter((int) pointCount);
+      for(int i=0;i<pointCount;i++) {
+        sorted.append(heapWriter.latEncs[i],
+                      heapWriter.lonEncs[i],
+                      heapWriter.ords[i],
+                      heapWriter.docIDs[i]);
+      }
+
+      return sorted;
+    } else {
+
+      // Offline sort:
+      assert tempDir != null;
+
+      final ByteArrayDataInput reader = new ByteArrayDataInput();
+      Comparator<BytesRef> cmp = new Comparator<BytesRef>() {
+        private final ByteArrayDataInput readerB = new ByteArrayDataInput();
+
+        @Override
+        public int compare(BytesRef a, BytesRef b) {
+          reader.reset(a.bytes, a.offset, a.length);
+          final int latAEnc = reader.readInt();
+          final int lonAEnc = reader.readInt();
+          final int docIDA = reader.readVInt();
+          final long ordA = reader.readVLong();
+
+          reader.reset(b.bytes, b.offset, b.length);
+          final int latBEnc = reader.readInt();
+          final int lonBEnc = reader.readInt();
+          final int docIDB = reader.readVInt();
+          final long ordB = reader.readVLong();
+
+          int cmp;
+          if (lon) {
+            cmp = Integer.compare(lonAEnc, lonBEnc);
+          } else {
+            cmp = Integer.compare(latAEnc, latBEnc);
+          }
+          if (cmp != 0) {
+            return cmp;
+          }
+
+          // Tie-break
+          cmp = Integer.compare(docIDA, docIDB);
+          if (cmp != 0) {
+            return cmp;
+          }
+
+          return Long.compare(ordA, ordB);
+        }
+      };
+
+      Path sorted = tempDir.resolve("sorted");
+      boolean success = false;
+      try {
+        OfflineSorter latSorter = new OfflineSorter(cmp, OfflineSorter.BufferSize.automatic(), tempDir, OfflineSorter.MAX_TEMPFILES);
+        latSorter.sort(tempInput, sorted);
+        LatLonWriter writer = convertToFixedWidth(sorted);
+        success = true;
+        return writer;
+      } finally {
+        if (success) {
+          IOUtils.rm(sorted);
+        } else {
+          IOUtils.deleteFilesIgnoringExceptions(sorted);
+        }
+      }
+    }
+  }
+
+  /** Writes the BKD tree to the provided {@link IndexOutput} and returns the file offset where index was written. */
+  public long finish(IndexOutput out) throws IOException {
+    //System.out.println("\nBKDTreeWriter.finish pointCount=" + pointCount + " out=" + out + " heapWriter=" + heapWriter);
+
+    if (writer != null) {
+      writer.close();
+    }
+
+    LongBitSet bitSet = new LongBitSet(pointCount);
+
+    // TODO: we should use in-memory sort here, if number of points is small enough:
+
+    long countPerLeaf = pointCount;
+    long innerNodeCount = 1;
+
+    while (countPerLeaf > maxPointsInLeafNode) {
+      countPerLeaf /= 2;
+      innerNodeCount *= 2;
+    }
+
+    //System.out.println("innerNodeCount=" + innerNodeCount);
+
+    if (1+2*innerNodeCount >= Integer.MAX_VALUE) {
+      throw new IllegalStateException("too many nodes; increase maxPointsInLeafNode (currently " + maxPointsInLeafNode + ") and reindex");
+    }
+
+    innerNodeCount--;
+
+    int numLeaves = (int) (innerNodeCount+1);
+
+    // Indexed by nodeID, but first (root) nodeID is 1
+    int[] splitValues = new int[numLeaves];
+
+    // +1 because leaf count is power of 2 (e.g. 8), and innerNodeCount is power of 2 minus 1 (e.g. 7)
+    long[] leafBlockFPs = new long[numLeaves];
+
+    // Make sure the math above "worked":
+    assert pointCount / splitValues.length <= maxPointsInLeafNode: "pointCount=" + pointCount + " splitValues.length=" + splitValues.length + " maxPointsInLeafNode=" + maxPointsInLeafNode;
+    //System.out.println("  avg pointsPerLeaf=" + (pointCount/splitValues.length));
+
+    // Sort all docs once by lat, once by lon:
+    LatLonWriter latSortedWriter = null;
+    LatLonWriter lonSortedWriter = null;
+
+    boolean success = false;
+    try {
+      lonSortedWriter = sort(true);
+      latSortedWriter = sort(false);
+      heapWriter = null;
+
+      build(1, numLeaves, new PathSlice(latSortedWriter, 0, pointCount),
+            new PathSlice(lonSortedWriter, 0, pointCount),
+            bitSet, out,
+            Integer.MIN_VALUE, Integer.MAX_VALUE,
+            Integer.MIN_VALUE, Integer.MAX_VALUE,
+            //encodeLat(-90.0), encodeLat(Math.nextAfter(90.0, Double.POSITIVE_INFINITY)),
+            //encodeLon(-180.0), encodeLon(Math.nextAfter(180.0, Double.POSITIVE_INFINITY)),
+            splitValues,
+            leafBlockFPs);
+      success = true;
+    } finally {
+      if (success) {
+        latSortedWriter.destroy();
+        lonSortedWriter.destroy();
+        IOUtils.rm(tempInput);
+      } else {
+        try {
+          latSortedWriter.destroy();
+        } catch (Throwable t) {
+          // Suppress to keep throwing original exc
+        }
+        try {
+          lonSortedWriter.destroy();
+        } catch (Throwable t) {
+          // Suppress to keep throwing original exc
+        }
+        IOUtils.deleteFilesIgnoringExceptions(tempInput);
+      }
+    }
+
+    //System.out.println("Total nodes: " + innerNodeCount);
+
+    // Write index:
+    long indexFP = out.getFilePointer();
+    out.writeVInt(numLeaves);
+
+    // NOTE: splitValues[0] is unused, because nodeID is 1-based:
+    for (int i=0;i<splitValues.length;i++) {
+      out.writeInt(splitValues[i]);
+    }
+    for (int i=0;i<leafBlockFPs.length;i++) {
+      out.writeVLong(leafBlockFPs[i]);
+    }
+
+    if (tempDir != null) {
+      // If we had to go offline, we should have removed all temp files we wrote:
+      assert directoryIsEmpty(tempDir);
+      IOUtils.rm(tempDir);
+    }
+
+    return indexFP;
+  }
+
+  // Called only from assert
+  private boolean directoryIsEmpty(Path in) {
+    try (DirectoryStream<Path> dir = Files.newDirectoryStream(in)) {
+      for (Path path : dir) {
+        assert false: "dir=" + in + " still has file=" + path;
+        return false;
+      }
+    } catch (IOException ioe) {
+      // Just ignore: we are only called from assert
+    }
+    return true;
+  }
+
+  /** Sliced reference to points in an OfflineSorter.ByteSequencesWriter file. */
+  private static final class PathSlice {
+    final LatLonWriter writer;
+    final long start;
+    final long count;
+
+    public PathSlice(LatLonWriter writer, long start, long count) {
+      this.writer = writer;
+      this.start = start;
+      this.count = count;
+    }
+
+    @Override
+    public String toString() {
+      return "PathSlice(start=" + start + " count=" + count + " writer=" + writer + ")";
+    }
+  }
+
+  /** Marks bits for the ords (points) that belong in the left sub tree. */
+  private long markLeftTree(int splitDim, PathSlice source, LongBitSet bitSet, int[] splitValueRet,
+                            int minLatEnc, int maxLatEnc, int minLonEnc, int maxLonEnc) throws IOException {
+
+    // This is the initital size of our left tree, but we may lower it below for == case:
+    long leftCount = source.count / 2;
+
+    // Read the split value:
+    //if (DEBUG) System.out.println("  leftCount=" + leftCount + " vs " + source.count);
+    LatLonReader reader = source.writer.getReader(source.start + leftCount);
+    boolean success = false;
+    int splitValue;
+    try {
+      boolean result = reader.next();
+      assert result;
+
+      int latSplitEnc = reader.latEnc();
+      assert latSplitEnc >= minLatEnc && latSplitEnc < maxLatEnc: "latSplitEnc=" + latSplitEnc + " minLatEnc=" + minLatEnc + " maxLatEnc=" + maxLatEnc;
+
+      int lonSplitEnc = reader.lonEnc();
+      assert lonSplitEnc >= minLonEnc && lonSplitEnc < maxLonEnc: "lonSplitEnc=" + lonSplitEnc + " minLonEnc=" + minLonEnc + " maxLonEnc=" + maxLonEnc;
+
+      if (splitDim == 0) {
+        splitValue = latSplitEnc;
+        //if (DEBUG) System.out.println("  splitValue=" + decodeLat(splitValue));
+      } else {
+        splitValue = lonSplitEnc;
+        //if (DEBUG) System.out.println("  splitValue=" + decodeLon(splitValue));
+      }
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(reader);
+      } else {
+        IOUtils.closeWhileHandlingException(reader);
+      }
+    }
+
+    splitValueRet[0] = splitValue;
+
+    // Mark ords that fall into the left half, and also handle the == boundary case:
+    assert bitSet.cardinality() == 0: "cardinality=" + bitSet.cardinality();
+
+    success = false;
+    reader = source.writer.getReader(source.start);
+    try {
+      int lastValue = Integer.MIN_VALUE;
+      for (int i=0;i<leftCount;i++) {
+        boolean result = reader.next();
+        assert result;
+        int latEnc = reader.latEnc();
+        int lonEnc = reader.lonEnc();
+
+        int value;
+        if (splitDim == 0) {
+          value = latEnc;
+        } else {
+          value = lonEnc;
+        }
+
+        // Our input source is supposed to be sorted on the incoming dimension:
+        assert value >= lastValue;
+        lastValue = value;
+
+        if (value == splitValue) {
+          // If we have identical points at the split, we move the count back to before the identical points:
+          leftCount = i;
+          break;
+        }
+        assert value < splitValue: "i=" + i + " value=" + value + " vs splitValue=" + splitValue;
+        long ord = reader.ord();
+        int docID = reader.docID();
+        assert docID >= 0: "docID=" + docID + " reader=" + reader;
+
+        // We should never see dup ords:
+        assert bitSet.get(ord) == false;
+        bitSet.set(ord);
+      }
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(reader);
+      } else {
+        IOUtils.closeWhileHandlingException(reader);
+      }
+    }
+
+    assert leftCount == bitSet.cardinality(): "leftCount=" + leftCount + " cardinality=" + bitSet.cardinality();
+
+    return leftCount;
+  }
+
+  /** dim=0 means we split on lat, dim=1 means lon.  The incoming PathSlice for the dim we will split is already partitioned/sorted. */
+  private void build(int nodeID, int leafNodeOffset,
+                     PathSlice lastLatSorted,
+                     PathSlice lastLonSorted,
+                     LongBitSet bitSet,
+                     IndexOutput out,
+                     int minLatEnc, int maxLatEnc, int minLonEnc, int maxLonEnc,
+                     int[] splitValues,
+                     long[] leafBlockFPs) throws IOException {
+
+    PathSlice source;
+    PathSlice nextSource;
+
+    long latRange = (long) maxLatEnc - (long) minLatEnc;
+    long lonRange = (long) maxLonEnc - (long) minLonEnc;
+
+    int splitDim;
+    if (latRange >= lonRange) {
+      // Split by lat:
+      splitDim = 0;
+      source = lastLatSorted;
+      nextSource = lastLonSorted;
+    } else {
+      // Split by lon:
+      splitDim = 1;
+      source = lastLonSorted;
+      nextSource = lastLatSorted;
+    }
+
+    long count = source.count;
+
+    //if (DEBUG) System.out.println("\nBUILD: nodeID=" + nodeID + " leafNodeOffset=" + leafNodeOffset + " splitDim=" + splitDim + "\n  lastLatSorted=" + lastLatSorted + "\n  lastLonSorted=" + lastLonSorted + "\n  count=" + count + " lat=" + decodeLat(minLatEnc) + " TO " + decodeLat(maxLatEnc) + " lon=" + decodeLon(minLonEnc) + " TO " + decodeLon(maxLonEnc));
+
+    if (count == 0) {
+      // Dead end in the tree, due to adversary cases, e.g. many identical points:
+      if (nodeID < splitValues.length) {
+        // Sentinel used to mark that the tree is dead under here:
+        splitValues[nodeID] = Integer.MAX_VALUE;
+      }
+      //if (DEBUG) System.out.println("  dead-end sub-tree");
+      return;
+    }
+
+    if (nodeID >= leafNodeOffset) {
+      // Leaf node: write block
+      //if (DEBUG) System.out.println("  leaf");
+      assert maxLatEnc > minLatEnc;
+      assert maxLonEnc > minLonEnc;
+
+      //System.out.println("\nleaf:\n  lat range: " + ((long) maxLatEnc-minLatEnc));
+      //System.out.println("  lon range: " + ((long) maxLonEnc-minLonEnc));
+
+      assert count == source.count: "count=" + count + " vs source.count=" + source.count;
+
+      // Sort by docID in the leaf so we can .or(DISI) at search time:
+      LatLonReader reader = source.writer.getReader(source.start);
+
+      int[] docIDs = new int[(int) count];
+
+      boolean success = false;
+      try {
+        for (int i=0;i<source.count;i++) {
+
+          // NOTE: we discard ord at this point; we only needed it temporarily
+          // during building to uniquely identify each point to properly handle
+          // the multi-valued case (one docID having multiple values):
+
+          // We also discard lat/lon, since at search time, we reside on the
+          // wrapped doc values for this:
+
+          boolean result = reader.next();
+          assert result;
+          docIDs[i] = reader.docID();
+        }
+        success = true;
+      } finally {
+        if (success) {
+          IOUtils.close(reader);
+        } else {
+          IOUtils.closeWhileHandlingException(reader);
+        }
+      }
+
+      Arrays.sort(docIDs);
+
+      // Dedup docIDs: for the multi-valued case where more than one value for the doc
+      // wound up in this leaf cell, we only need to store the docID once:
+      int lastDocID = -1;
+      int uniqueCount = 0;
+      for(int i=0;i<docIDs.length;i++) {
+        int docID = docIDs[i];
+        if (docID != lastDocID) {
+          uniqueCount++;
+          lastDocID = docID;
+        }
+      }
+      assert uniqueCount <= count;
+
+      long startFP = out.getFilePointer();
+      out.writeVInt(uniqueCount);
+
+      // Save the block file pointer:
+      leafBlockFPs[nodeID - leafNodeOffset] = startFP;
+      //System.out.println("    leafFP=" + startFP);
+
+      lastDocID = -1;
+      for (int i=0;i<docIDs.length;i++) {
+        // Absolute int encode; with "vInt of deltas" encoding, the .kdd size dropped from
+        // 697 MB -> 539 MB, but query time for 225 queries went from 1.65 sec -> 2.64 sec.
+        // I think if we also indexed prefix terms here we could do less costly compression
+        // on those lists:
+        int docID = docIDs[i];
+        if (docID != lastDocID) {
+          out.writeInt(docID);
+          lastDocID = docID;
+        }
+      }
+      //long endFP = out.getFilePointer();
+      //System.out.println("  bytes/doc: " + ((endFP - startFP) / count));
+    } else {
+      // Inner node: sort, partition/recurse
+
+      assert nodeID < splitValues.length: "nodeID=" + nodeID + " splitValues.length=" + splitValues.length;
+
+      int[] splitValueArray = new int[1];
+
+      assert source.count == count;
+      long leftCount = markLeftTree(splitDim, source, bitSet, splitValueArray,
+                                    minLatEnc, maxLatEnc, minLonEnc, maxLonEnc);
+      int splitValue = splitValueArray[0];
+
+      // TODO: we could save split value in here so we don't have to re-open file later:
+
+      // Partition nextSource into sorted left and right sets, so we can recurse.  This is somewhat hairy: we partition the next lon set
+      // according to how we had just partitioned the lat set, and vice/versa:
+
+      LatLonWriter leftWriter = null;
+      LatLonWriter rightWriter = null;
+      LatLonReader reader = null;
+
+      boolean success = false;
+
+      int nextLeftCount = 0;
+
+      try {
+        leftWriter = getWriter(leftCount);
+        rightWriter = getWriter(nextSource.count - leftCount);
+
+        //if (DEBUG) System.out.println("  partition:\n    splitValueEnc=" + splitValue + "\n    " + nextSource + "\n      --> leftSorted=" + leftWriter + "\n      --> rightSorted=" + rightWriter + ")");
+        assert nextSource.count == count;
+        reader = nextSource.writer.getReader(nextSource.start);
+
+        // TODO: we could compute the split value here for each sub-tree and save an O(N) pass on recursion, but makes code hairier and only
+        // changes the constant factor of building, not the big-oh:
+        for (int i=0;i<nextSource.count;i++) {
+          boolean result = reader.next();
+          assert result;
+          int latEnc = reader.latEnc();
+          int lonEnc = reader.lonEnc();
+          long ord = reader.ord();
+          int docID = reader.docID();
+          assert docID >= 0: "docID=" + docID + " reader=" + reader;
+          if (bitSet.get(ord)) {
+            if (splitDim == 0) {
+              assert latEnc < splitValue: "latEnc=" + latEnc + " splitValue=" + splitValue;
+            } else {
+              assert lonEnc < splitValue: "lonEnc=" + lonEnc + " splitValue=" + splitValue;
+            }
+            leftWriter.append(latEnc, lonEnc, ord, docID);
+            nextLeftCount++;
+          } else {
+            if (splitDim == 0) {
+              assert latEnc >= splitValue: "latEnc=" + latEnc + " splitValue=" + splitValue;
+            } else {
+              assert lonEnc >= splitValue: "lonEnc=" + lonEnc + " splitValue=" + splitValue;
+            }
+            rightWriter.append(latEnc, lonEnc, ord, docID);
+          }
+        }
+        bitSet.clear(0, pointCount);
+        success = true;
+      } finally {
+        if (success) {
+          IOUtils.close(reader, leftWriter, rightWriter);
+        } else {
+          IOUtils.closeWhileHandlingException(reader, leftWriter, rightWriter);
+        }
+      }
+
+      assert leftCount == nextLeftCount: "leftCount=" + leftCount + " nextLeftCount=" + nextLeftCount;
+      assert count == nextSource.count: "count=" + count + " nextSource.count=" + count;
+
+      success = false;
+      try {
+        if (splitDim == 0) {
+          //if (DEBUG) System.out.println("  recurse left");
+          build(2*nodeID, leafNodeOffset,
+                new PathSlice(source.writer, source.start, leftCount),
+                new PathSlice(leftWriter, 0, leftCount),
+                bitSet,
+                out,
+                minLatEnc, splitValue, minLonEnc, maxLonEnc,
+                splitValues, leafBlockFPs);
+          leftWriter.destroy();
+
+          //if (DEBUG) System.out.println("  recurse right");
+          build(2*nodeID+1, leafNodeOffset,
+                new PathSlice(source.writer, source.start+leftCount, count-leftCount),
+                new PathSlice(rightWriter, 0, count - leftCount),
+                bitSet,
+                out,
+                splitValue, maxLatEnc, minLonEnc, maxLonEnc,
+                splitValues, leafBlockFPs);
+          rightWriter.destroy();
+        } else {
+          //if (DEBUG) System.out.println("  recurse left");
+          build(2*nodeID, leafNodeOffset,
+                new PathSlice(leftWriter, 0, leftCount),
+                new PathSlice(source.writer, source.start, leftCount),
+                bitSet,
+                out,
+                minLatEnc, maxLatEnc, minLonEnc, splitValue,
+                splitValues, leafBlockFPs);
+
+          leftWriter.destroy();
+
+          //if (DEBUG) System.out.println("  recurse right");
+          build(2*nodeID+1, leafNodeOffset,
+                new PathSlice(rightWriter, 0, count-leftCount),
+                new PathSlice(source.writer, source.start+leftCount, count-leftCount),    
+                bitSet,
+                out,
+                minLatEnc, maxLatEnc, splitValue, maxLonEnc,
+                splitValues, leafBlockFPs);
+          rightWriter.destroy();
+        }
+        success = true;
+      } finally {
+        if (success == false) {
+          try {
+            leftWriter.destroy();
+          } catch (Throwable t) {
+            // Suppress to keep throwing original exc
+          }
+          try {
+            rightWriter.destroy();
+          } catch (Throwable t) {
+            // Suppress to keep throwing original exc
+          }
+        }
+      }
+
+      splitValues[nodeID] = splitValue;
+    }
+  }
+
+  LatLonWriter getWriter(long count) throws IOException {
+    if (count < maxPointsSortInHeap) {
+      return new HeapLatLonWriter((int) count);
+    } else {
+      return new OfflineLatLonWriter(tempDir, count);
+    }
+  }
+
+  // TODO: move/share all this into GeoUtils
+
+  // We allow one iota over the true max:
+  static final double MAX_LAT_INCL = Math.nextAfter(90.0D, Double.POSITIVE_INFINITY);
+  static final double MAX_LON_INCL = Math.nextAfter(180.0D, Double.POSITIVE_INFINITY);
+  static final double MIN_LAT_INCL = -90.0D;
+  static final double MIN_LON_INCL = -180.0D;
+
+  static boolean validLat(double lat) {
+    return Double.isNaN(lat) == false && lat >= MIN_LAT_INCL && lat <= MAX_LAT_INCL;
+  }
+
+  static boolean validLon(double lon) {
+    return Double.isNaN(lon) == false && lon >= MIN_LON_INCL && lon <= MAX_LON_INCL;
+  }
+
+  private static final int BITS = 32;
+
+  // -3 so valid lat/lon never hit the Integer.MIN_VALUE nor Integer.MAX_VALUE:
+  private static final double LON_SCALE = ((0x1L<<BITS)-3)/360.0D;
+  private static final double LAT_SCALE = ((0x1L<<BITS)-3)/180.0D;
+
+  /** Max quantization error for both lat and lon when encoding/decoding into 32 bits */
+  public static final double TOLERANCE = 1E-7;
+
+  /** Quantizes double (64 bit) latitude into 32 bits */
+  static int encodeLat(double lat) {
+    assert validLat(lat): "lat=" + lat;
+    long x = (long) (lat * LAT_SCALE);
+    // We use Integer.MAX_VALUE as a sentinel:
+    assert x < Integer.MAX_VALUE: "lat=" + lat + " mapped to Integer.MAX_VALUE + " + (x - Integer.MAX_VALUE);
+    assert x > Integer.MIN_VALUE: "lat=" + lat + " mapped to Integer.MIN_VALUE";
+    return (int) x;
+  }
+
+  /** Quantizes double (64 bit) longitude into 32 bits */
+  static int encodeLon(double lon) {
+    assert validLon(lon): "lon=" + lon;
+    long x = (long) (lon * LON_SCALE);
+    // We use Integer.MAX_VALUE as a sentinel:
+    assert x < Integer.MAX_VALUE;
+    assert x > Integer.MIN_VALUE;
+    return (int) x;
+  }
+
+  /** Turns quantized value from {@link #encodeLat} back into a double. */
+  static double decodeLat(int x) {
+    return x / LAT_SCALE;
+  }
+
+  /** Turns quantized value from {@link #encodeLon} back into a double. */
+  static double decodeLon(int x) {
+    return x / LON_SCALE;
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/GrowingHeapLatLonWriter.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/GrowingHeapLatLonWriter.java
new file mode 100644
index 0000000..742fc4f
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/GrowingHeapLatLonWriter.java
@@ -0,0 +1,88 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+
+final class GrowingHeapLatLonWriter implements LatLonWriter {
+  int[] latEncs;
+  int[] lonEncs;
+  int[] docIDs;
+  long[] ords;
+  private int nextWrite;
+  final int maxSize;
+
+  public GrowingHeapLatLonWriter(int maxSize) {
+    latEncs = new int[16];
+    lonEncs = new int[16];
+    docIDs = new int[16];
+    ords = new long[16];
+    this.maxSize = maxSize;
+  }
+
+  private int[] growExact(int[] arr, int size) {
+    assert size > arr.length;
+    int[] newArr = new int[size];
+    System.arraycopy(arr, 0, newArr, 0, arr.length);
+    return newArr;
+  }
+
+  private long[] growExact(long[] arr, int size) {
+    assert size > arr.length;
+    long[] newArr = new long[size];
+    System.arraycopy(arr, 0, newArr, 0, arr.length);
+    return newArr;
+  }
+
+  @Override
+  public void append(int latEnc, int lonEnc, long ord, int docID) {
+    assert ord == nextWrite;
+    if (latEncs.length == nextWrite) {
+      int nextSize = Math.min(maxSize, ArrayUtil.oversize(nextWrite+1, RamUsageEstimator.NUM_BYTES_INT));
+      assert nextSize > nextWrite: "nextSize=" + nextSize + " vs nextWrite=" + nextWrite;
+      latEncs = growExact(latEncs, nextSize);
+      lonEncs = growExact(lonEncs, nextSize);
+      ords = growExact(ords, nextSize);
+      docIDs = growExact(docIDs, nextSize);
+    }
+    latEncs[nextWrite] = latEnc;
+    lonEncs[nextWrite] = lonEnc;
+    ords[nextWrite] = ord;
+    docIDs[nextWrite] = docID;
+    nextWrite++;
+  }
+
+  @Override
+  public LatLonReader getReader(long start) {
+    return new HeapLatLonReader(latEncs, lonEncs, ords, docIDs, (int) start, nextWrite);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public void destroy() {
+  }
+
+  @Override
+  public String toString() {
+    return "GrowingHeapLatLonWriter(count=" + nextWrite + " alloc=" + latEncs.length + ")";
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonReader.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonReader.java
new file mode 100644
index 0000000..67940f6
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonReader.java
@@ -0,0 +1,67 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class HeapLatLonReader implements LatLonReader {
+  private int curRead;
+  final int[] latEncs;
+  final int[] lonEncs;
+  final long[] ords;
+  final int[] docIDs;
+  final int end;
+
+  HeapLatLonReader(int[] latEncs, int[] lonEncs, long[] ords, int[] docIDs, int start, int end) {
+    this.latEncs = latEncs;
+    this.lonEncs = lonEncs;
+    this.ords = ords;
+    this.docIDs = docIDs;
+    curRead = start-1;
+    this.end = end;
+  }
+
+  @Override
+  public boolean next() {
+    curRead++;
+    return curRead < end;
+  }
+
+  @Override
+  public int latEnc() {
+    return latEncs[curRead];
+  }
+
+  @Override
+  public int lonEnc() {
+    return lonEncs[curRead];
+  }
+
+  @Override
+  public int docID() {
+    return docIDs[curRead];
+  }
+
+  @Override
+  public long ord() {
+    return ords[curRead];
+  }
+
+  @Override
+  public void close() {
+  }
+}
+
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonWriter.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonWriter.java
new file mode 100644
index 0000000..0bf68a2
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/HeapLatLonWriter.java
@@ -0,0 +1,63 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class HeapLatLonWriter implements LatLonWriter {
+  final int[] latEncs;
+  final int[] lonEncs;
+  final int[] docIDs;
+  final long[] ords;
+  private int nextWrite;
+
+  public HeapLatLonWriter(int count) {
+    latEncs = new int[count];
+    lonEncs = new int[count];
+    docIDs = new int[count];
+    ords = new long[count];
+  }
+
+  @Override
+  public void append(int latEnc, int lonEnc, long ord, int docID) {
+    latEncs[nextWrite] = latEnc;
+    lonEncs[nextWrite] = lonEnc;
+    ords[nextWrite] = ord;
+    docIDs[nextWrite] = docID;
+    nextWrite++;
+  }
+
+  @Override
+  public LatLonReader getReader(long start) {
+    return new HeapLatLonReader(latEncs, lonEncs, ords, docIDs, (int) start, latEncs.length);
+  }
+
+  @Override
+  public void close() {
+    if (nextWrite != latEncs.length) {
+      throw new IllegalStateException("only wrote " + nextWrite + " values, but expected " + latEncs.length);
+    }
+  }
+
+  @Override
+  public void destroy() {
+  }
+
+  @Override
+  public String toString() {
+    return "HeapLatLonWriter(count=" + latEncs.length + ")";
+  }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonReader.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonReader.java
new file mode 100644
index 0000000..aadfc7f
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonReader.java
@@ -0,0 +1,31 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/** Abstracts away whether OfflineSorter or simple arrays in heap are used. */
+interface LatLonReader extends Closeable {
+  boolean next() throws IOException;
+  int latEnc();
+  int lonEnc();
+  long ord();
+  int docID();
+}
+
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonWriter.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonWriter.java
new file mode 100644
index 0000000..161fe9c
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/LatLonWriter.java
@@ -0,0 +1,29 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/** Abstracts away whether OfflineSorter or simple arrays in heap are used. */
+interface LatLonWriter extends Closeable {
+  void append(int latEnc, int lonEnc, long ord, int docID) throws IOException;
+  LatLonReader getReader(long start) throws IOException;
+  void destroy() throws IOException;
+}
+
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonReader.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonReader.java
new file mode 100644
index 0000000..c898d38
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonReader.java
@@ -0,0 +1,89 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.lucene.store.InputStreamDataInput;
+
+final class OfflineLatLonReader implements LatLonReader {
+  final InputStreamDataInput in;
+  long countLeft;
+  private int latEnc;
+  private int lonEnc;
+  private long ord;
+  private int docID;
+
+  OfflineLatLonReader(Path tempFile, long start, long count) throws IOException {
+    InputStream fis = Files.newInputStream(tempFile);
+    long seekFP = start * BKDTreeWriter.BYTES_PER_DOC;
+    long skipped = 0;
+    while (skipped < seekFP) {
+      long inc = fis.skip(seekFP - skipped);
+      skipped += inc;
+      if (inc == 0) {
+        throw new RuntimeException("skip returned 0");
+      }
+    }
+    in = new InputStreamDataInput(new BufferedInputStream(fis));
+    this.countLeft = count;
+  }
+
+  @Override
+  public boolean next() throws IOException {
+    if (countLeft == 0) {
+      return false;
+    }
+    countLeft--;
+    latEnc = in.readInt();
+    lonEnc = in.readInt();
+    ord = in.readLong();
+    docID = in.readInt();
+    return true;
+  }
+
+  @Override
+  public int latEnc() {
+    return latEnc;
+  }
+
+  @Override
+  public int lonEnc() {
+    return lonEnc;
+  }
+
+  @Override
+  public long ord() {
+    return ord;
+  }
+
+  @Override
+  public int docID() {
+    return docID;
+  }
+
+  @Override
+  public void close() throws IOException {
+    in.close();
+  }
+}
+
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonWriter.java b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonWriter.java
new file mode 100644
index 0000000..ec22883
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/OfflineLatLonWriter.java
@@ -0,0 +1,76 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.OutputStreamDataOutput;
+import org.apache.lucene.util.IOUtils;
+
+final class OfflineLatLonWriter implements LatLonWriter {
+
+  final Path tempFile;
+  final byte[] scratchBytes = new byte[BKDTreeWriter.BYTES_PER_DOC];
+  final ByteArrayDataOutput scratchBytesOutput = new ByteArrayDataOutput(scratchBytes);      
+  final OutputStreamDataOutput out;
+  final long count;
+  private long countWritten;
+
+  public OfflineLatLonWriter(Path tempDir, long count) throws IOException {
+    tempFile = Files.createTempFile(tempDir, "size" + count + ".", "");
+    out = new OutputStreamDataOutput(new BufferedOutputStream(Files.newOutputStream(tempFile)));
+    this.count = count;
+  }
+    
+  @Override
+  public void append(int latEnc, int lonEnc, long ord, int docID) throws IOException {
+    out.writeInt(latEnc);
+    out.writeInt(lonEnc);
+    out.writeLong(ord);
+    out.writeInt(docID);
+    countWritten++;
+  }
+
+  @Override
+  public LatLonReader getReader(long start) throws IOException {
+    return new OfflineLatLonReader(tempFile, start, count-start);
+  }
+
+  @Override
+  public void close() throws IOException {
+    out.close();
+    if (count != countWritten) {
+      throw new IllegalStateException("wrote " + countWritten + " values, but expected " + count);
+    }
+  }
+
+  @Override
+  public void destroy() throws IOException {
+    IOUtils.rm(tempFile);
+  }
+
+  @Override
+  public String toString() {
+    return "OfflineLatLonWriter(count=" + count + " tempFile=" + tempFile + ")";
+  }
+}
+
diff --git a/lucene/sandbox/src/java/org/apache/lucene/bkdtree/package.html b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/package.html
new file mode 100644
index 0000000..90bf356
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/bkdtree/package.html
@@ -0,0 +1,28 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- not a package-info.java, because we already defined this package in core/ -->
+
+<html>
+<head>
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+</head>
+<body>
+This package contains a BKD spatial tree implementation for indexing lat/lon points and fast shape searching.
+</body>
+</html>
diff --git a/lucene/sandbox/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat b/lucene/sandbox/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
new file mode 100644
index 0000000..49d2b2e
--- /dev/null
+++ b/lucene/sandbox/src/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
@@ -0,0 +1,17 @@
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+org.apache.lucene.bkdtree.BKDTreeDocValuesFormat
+
diff --git a/lucene/sandbox/src/test/org/apache/lucene/bkdtree/TestBKDTree.java b/lucene/sandbox/src/test/org/apache/lucene/bkdtree/TestBKDTree.java
new file mode 100644
index 0000000..4b2c3b4
--- /dev/null
+++ b/lucene/sandbox/src/test/org/apache/lucene/bkdtree/TestBKDTree.java
@@ -0,0 +1,595 @@
+package org.apache.lucene.bkdtree;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+// TODO: can test framework assert we don't leak temp files?
+
+public class TestBKDTree extends LuceneTestCase {
+
+  private static boolean smallBBox;
+
+  @BeforeClass
+  public static void beforeClass() {
+    smallBBox = random().nextBoolean();
+  }
+
+  public void testAllLatEqual() throws Exception {
+    int numPoints = atLeast(10000);
+    double lat = randomLat();
+    double[] lats = new double[numPoints];
+    double[] lons = new double[numPoints];
+
+    boolean haveRealDoc = false;
+
+    for(int docID=0;docID<numPoints;docID++) {
+      int x = random().nextInt(20);
+      if (x == 17) {
+        // Some docs don't have a point:
+        lats[docID] = Double.NaN;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " is missing");
+        }
+        continue;
+      }
+
+      if (docID > 0 && x == 14 && haveRealDoc) {
+        int oldDocID;
+        while (true) {
+          oldDocID = random().nextInt(docID);
+          if (Double.isNaN(lats[oldDocID]) == false) {
+            break;
+          }
+        }
+            
+        // Fully identical point:
+        lons[docID] = lons[oldDocID];
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " lat=" + lat + " lon=" + lons[docID] + " (same lat/lon as doc=" + oldDocID + ")");
+        }
+      } else {
+        lons[docID] = randomLon();
+        haveRealDoc = true;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " lat=" + lat + " lon=" + lons[docID]);
+        }
+      }
+      lats[docID] = lat;
+    }
+
+    verify(lats, lons);
+  }
+
+  public void testAllLonEqual() throws Exception {
+    int numPoints = atLeast(10000);
+    double theLon = randomLon();
+    double[] lats = new double[numPoints];
+    double[] lons = new double[numPoints];
+
+    boolean haveRealDoc = false;
+
+    //System.out.println("theLon=" + theLon);
+
+    for(int docID=0;docID<numPoints;docID++) {
+      int x = random().nextInt(20);
+      if (x == 17) {
+        // Some docs don't have a point:
+        lats[docID] = Double.NaN;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " is missing");
+        }
+        continue;
+      }
+
+      if (docID > 0 && x == 14 && haveRealDoc) {
+        int oldDocID;
+        while (true) {
+          oldDocID = random().nextInt(docID);
+          if (Double.isNaN(lats[oldDocID]) == false) {
+            break;
+          }
+        }
+            
+        // Fully identical point:
+        lats[docID] = lats[oldDocID];
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + theLon + " (same lat/lon as doc=" + oldDocID + ")");
+        }
+      } else {
+        lats[docID] = randomLat();
+        haveRealDoc = true;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + theLon);
+        }
+      }
+      lons[docID] = theLon;
+    }
+
+    verify(lats, lons);
+  }
+
+  public void testMultiValued() throws Exception {
+    int numPoints = atLeast(10000);
+    // Every doc has 2 points:
+    double[] lats = new double[2*numPoints];
+    double[] lons = new double[2*numPoints];
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    // We rely on docID order:
+    iwc.setMergePolicy(newLogMergePolicy());
+    int maxPointsInLeaf = TestUtil.nextInt(random(), 16, 2048);
+    int maxPointsSortInHeap = TestUtil.nextInt(random(), 1024, 1024*1024);
+    Codec codec = TestUtil.alwaysDocValuesFormat(new BKDTreeDocValuesFormat(maxPointsInLeaf, maxPointsSortInHeap));
+    iwc.setCodec(codec);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+    for (int docID=0;docID<numPoints;docID++) {
+      Document doc = new Document();
+      lats[2*docID] = randomLat();
+      lons[2*docID] = randomLon();
+      doc.add(new BKDPointField("point", lats[2*docID], lons[2*docID]));
+      lats[2*docID+1] = randomLat();
+      lons[2*docID+1] = randomLon();
+      doc.add(new BKDPointField("point", lats[2*docID+1], lons[2*docID+1]));
+      w.addDocument(doc);
+    }
+
+    if (random().nextBoolean()) {
+      w.forceMerge(1);
+    }
+    IndexReader r = w.getReader();
+    w.close();
+    // We can't wrap with "exotic" readers because the BKD query must see the BKDDVFormat:
+    IndexSearcher s = newSearcher(r, false);
+
+    int iters = atLeast(100);
+    for (int iter=0;iter<iters;iter++) {
+      double lat0 = randomLat();
+      double lat1 = randomLat();
+      double lon0 = randomLon();
+      double lon1 = randomLon();
+
+      if (lat1 < lat0) {
+        double x = lat0;
+        lat0 = lat1;
+        lat1 = x;
+      }
+
+      if (lon1 < lon0) {
+        double x = lon0;
+        lon0 = lon1;
+        lon1 = x;
+      }
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " lat=" + lat0 + " TO " + lat1 + " lon=" + lon0 + " TO " + lon1);
+      }
+
+      Query query = new BKDPointInBBoxQuery("point", lat0, lat1, lon0, lon1);
+
+      final FixedBitSet hits = new FixedBitSet(r.maxDoc());
+      s.search(query, new SimpleCollector() {
+
+          private int docBase;
+
+          @Override
+          public boolean needsScores() {
+            return false;
+          }
+
+          @Override
+          protected void doSetNextReader(LeafReaderContext context) throws IOException {
+            docBase = context.docBase;
+          }
+
+          @Override
+          public void collect(int doc) {
+            hits.set(docBase+doc);
+          }
+        });
+
+      for(int docID=0;docID<lats.length/2;docID++) {
+        double latDoc1 = lats[2*docID];
+        double lonDoc1 = lons[2*docID];
+        double latDoc2 = lats[2*docID+1];
+        double lonDoc2 = lons[2*docID+1];
+        boolean expected = rectContainsPointEnc(lat0, lat1, lon0, lon1, latDoc1, lonDoc1) ||
+          rectContainsPointEnc(lat0, lat1, lon0, lon1, latDoc2, lonDoc2);
+
+        if (hits.get(docID) != expected) {
+          fail("docID=" + docID + " latDoc1=" + latDoc1 + " lonDoc1=" + lonDoc1 + " latDoc2=" + latDoc2 + " lonDoc2=" + lonDoc2 + " expected " + expected + " but got: " + hits.get(docID));
+        }
+      }
+    }
+    r.close();
+    dir.close();
+  }
+
+  // A particularly tricky adversary:
+  public void testSamePointManyTimes() throws Exception {
+    int numPoints = atLeast(1000);
+
+    // Every doc has 2 points:
+    double theLat = randomLat();
+    double theLon = randomLon();
+
+    double[] lats = new double[numPoints];
+    Arrays.fill(lats, theLat);
+
+    double[] lons = new double[numPoints];
+    Arrays.fill(lons, theLon);
+
+    verify(lats, lons);
+  }
+
+  public void testRandomTiny() throws Exception {
+    // Make sure single-leaf-node case is OK:
+    doTestRandom(10);
+  }
+
+  public void testRandomMedium() throws Exception {
+    doTestRandom(10000);
+  }
+
+  @Nightly
+  public void testRandomBig() throws Exception {
+    doTestRandom(200000);
+  }
+
+  private void doTestRandom(int count) throws Exception {
+
+    int numPoints = atLeast(count);
+
+    if (VERBOSE) {
+      System.out.println("TEST: numPoints=" + numPoints);
+    }
+
+    double[] lats = new double[numPoints];
+    double[] lons = new double[numPoints];
+
+    boolean haveRealDoc = false;
+
+    for (int docID=0;docID<numPoints;docID++) {
+      int x = random().nextInt(20);
+      if (x == 17) {
+        // Some docs don't have a point:
+        lats[docID] = Double.NaN;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " is missing");
+        }
+        continue;
+      }
+
+      if (docID > 0 && x < 3 && haveRealDoc) {
+        int oldDocID;
+        while (true) {
+          oldDocID = random().nextInt(docID);
+          if (Double.isNaN(lats[oldDocID]) == false) {
+            break;
+          }
+        }
+            
+        if (x == 0) {
+          // Identical lat to old point
+          lats[docID] = lats[oldDocID];
+          lons[docID] = randomLon();
+          if (VERBOSE) {
+            System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + lons[docID] + " (same lat as doc=" + oldDocID + ")");
+          }
+        } else if (x == 1) {
+          // Identical lon to old point
+          lats[docID] = randomLat();
+          lons[docID] = lons[oldDocID];
+          if (VERBOSE) {
+            System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + lons[docID] + " (same lon as doc=" + oldDocID + ")");
+          }
+        } else {
+          assert x == 2;
+          // Fully identical point:
+          lats[docID] = lats[oldDocID];
+          lons[docID] = lons[oldDocID];
+          if (VERBOSE) {
+            System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + lons[docID] + " (same lat/lon as doc=" + oldDocID + ")");
+          }
+        }
+      } else {
+        lats[docID] = randomLat();
+        lons[docID] = randomLon();
+        haveRealDoc = true;
+        if (VERBOSE) {
+          System.out.println("  doc=" + docID + " lat=" + lats[docID] + " lon=" + lons[docID]);
+        }
+      }
+    }
+
+    verify(lats, lons);
+  }
+
+  private static final double TOLERANCE = 1e-7;
+
+  private static void verify(double[] lats, double[] lons) throws Exception {
+    int maxPointsInLeaf = TestUtil.nextInt(random(), 16, 2048);
+    int maxPointsSortInHeap = TestUtil.nextInt(random(), maxPointsInLeaf, 1024*1024);
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    // Else we can get O(N^2) merging:
+    int mbd = iwc.getMaxBufferedDocs();
+    if (mbd != -1 && mbd < lats.length/100) {
+      iwc.setMaxBufferedDocs(lats.length/100);
+    }
+    final DocValuesFormat dvFormat = new BKDTreeDocValuesFormat(maxPointsInLeaf, maxPointsSortInHeap);
+    Codec codec = new Lucene50Codec() {
+        @Override
+        public DocValuesFormat getDocValuesFormatForField(String field) {
+          if (field.equals("point")) {
+            return dvFormat;
+          } else {
+            return super.getDocValuesFormatForField(field);
+          }
+        }
+      };
+    iwc.setCodec(codec);
+    Directory dir;
+    if (lats.length > 100000) {
+      dir = newFSDirectory(createTempDir("TestBKDTree"));
+    } else {
+      dir = newDirectory();
+    }
+    Set<Integer> deleted = new HashSet<>();
+    // RandomIndexWriter is too slow here:
+    IndexWriter w = new IndexWriter(dir, iwc);
+    for(int id=0;id<lats.length;id++) {
+      Document doc = new Document();
+      doc.add(newStringField("id", ""+id, Field.Store.NO));
+      doc.add(new NumericDocValuesField("id", id));
+      if (Double.isNaN(lats[id]) == false) {
+        doc.add(new BKDPointField("point", lats[id], lons[id]));
+      }
+      w.addDocument(doc);
+      if (id > 0 && random().nextInt(100) == 42) {
+        int idToDelete = random().nextInt(id);
+        w.deleteDocuments(new Term("id", ""+idToDelete));
+        deleted.add(idToDelete);
+        if (VERBOSE) {
+          System.out.println("  delete id=" + idToDelete);
+        }
+      }
+    }
+    if (random().nextBoolean()) {
+      w.forceMerge(1);
+    }
+    final IndexReader r = DirectoryReader.open(w, true);
+    w.close();
+
+    // We can't wrap with "exotic" readers because the BKD query must see the BKDDVFormat:
+    IndexSearcher s = newSearcher(r, false);
+
+    int numThreads = TestUtil.nextInt(random(), 2, 5);
+
+    List<Thread> threads = new ArrayList<>();
+    final int iters = atLeast(100);
+
+    final CountDownLatch startingGun = new CountDownLatch(1);
+    final AtomicBoolean failed = new AtomicBoolean();
+
+    for(int i=0;i<numThreads;i++) {
+      Thread thread = new Thread() {
+          @Override
+          public void run() {
+            try {
+              _run();
+            } catch (Exception e) {
+              failed.set(true);
+              throw new RuntimeException(e);
+            }
+          }
+
+          private void _run() throws Exception {
+            startingGun.await();
+
+            NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
+
+            for (int iter=0;iter<iters && failed.get() == false;iter++) {
+              double lat0 = randomLat();
+              double lat1 = randomLat();
+              double lon0 = randomLon();
+              double lon1 = randomLon();
+
+              if (lat1 < lat0) {
+                double x = lat0;
+                lat0 = lat1;
+                lat1 = x;
+              }
+
+              if (lon1 < lon0) {
+                double x = lon0;
+                lon0 = lon1;
+                lon1 = x;
+              }
+
+              if (VERBOSE) {
+                System.out.println("\nTEST: iter=" + iter + " lat=" + lat0 + " TO " + lat1 + " lon=" + lon0 + " TO " + lon1);
+              }
+
+              Query query;
+              if (random().nextBoolean()) {
+                query = new BKDPointInBBoxQuery("point", lat0, lat1, lon0, lon1);
+              } else {
+                double[] lats = new double[5];
+                double[] lons = new double[5];
+                lats[0] = lat0;
+                lons[0] = lon0;
+                lats[1] = lat1;
+                lons[1] = lon0;
+                lats[2] = lat1;
+                lons[2] = lon1;
+                lats[3] = lat0;
+                lons[3] = lon1;
+                lats[4] = lat0;
+                lons[4] = lon0;
+                query = new BKDPointInPolygonQuery("point", lats, lons);
+              }
+
+              if (VERBOSE) {
+                System.out.println("  using query: " + query);
+              }
+
+              final FixedBitSet hits = new FixedBitSet(r.maxDoc());
+              s.search(query, new SimpleCollector() {
+
+                  private int docBase;
+
+                  @Override
+                  public boolean needsScores() {
+                    return false;
+                  }
+
+                  @Override
+                  protected void doSetNextReader(LeafReaderContext context) throws IOException {
+                    docBase = context.docBase;
+                  }
+
+                  @Override
+                  public void collect(int doc) {
+                    hits.set(docBase+doc);
+                  }
+                });
+
+              if (VERBOSE) {
+                System.out.println("  hitCount: " + hits.cardinality());
+              }
+      
+              for(int docID=0;docID<r.maxDoc();docID++) {
+                int id = (int) docIDToID.get(docID);
+                boolean expected = deleted.contains(id) == false && rectContainsPointEnc(lat0, lat1, lon0, lon1, lats[id], lons[id]);
+                if (hits.get(docID) != expected) {
+                  if (query instanceof BKDPointInPolygonQuery &&
+                      (Math.abs(lat0-lats[id]) < TOLERANCE ||
+                       Math.abs(lat1-lats[id]) < TOLERANCE ||
+                       Math.abs(lon0-lons[id]) < TOLERANCE ||
+                       Math.abs(lon1-lons[id]) < TOLERANCE)) {
+                    // The poly check quantizes slightly differently, so we allow for boundary cases to disagree
+                  } else {
+                    // We do exact quantized comparison so the bbox query should never disagree:
+                    fail(Thread.currentThread().getName() + ": iter=" + iter + " id=" + id + " docID=" + docID + " lat=" + lats[id] + " lon=" + lons[id] + " (bbox: lat=" + lat0 + " TO " + lat1 + " lon=" + lon0 + " TO " + lon1 + ") expected " + expected + " but got: " + hits.get(docID) + " deleted?=" + deleted.contains(id) + " query=" + query);
+                  }
+                }
+              }
+            }
+          }
+        };
+      thread.setName("T" + i);
+      thread.start();
+      threads.add(thread);
+    }
+    startingGun.countDown();
+    for(Thread thread : threads) {
+      thread.join();
+    }
+    IOUtils.close(r, dir);
+  }
+
+  private static boolean rectContainsPointEnc(double rectLatMin, double rectLatMax,
+                                              double rectLonMin, double rectLonMax,
+                                              double pointLat, double pointLon) {
+    if (Double.isNaN(pointLat)) {
+      return false;
+    }
+    int rectLatMinEnc = BKDTreeWriter.encodeLat(rectLatMin);
+    int rectLatMaxEnc = BKDTreeWriter.encodeLat(rectLatMax);
+    int rectLonMinEnc = BKDTreeWriter.encodeLon(rectLonMin);
+    int rectLonMaxEnc = BKDTreeWriter.encodeLon(rectLonMax);
+    int pointLatEnc = BKDTreeWriter.encodeLat(pointLat);
+    int pointLonEnc = BKDTreeWriter.encodeLon(pointLon);
+
+    return pointLatEnc >= rectLatMinEnc &&
+      pointLatEnc < rectLatMaxEnc &&
+      pointLonEnc >= rectLonMinEnc &&
+      pointLonEnc < rectLonMaxEnc;
+  }
+
+  private static double randomLat() {
+    if (smallBBox) {
+      return 2.0 * (random().nextDouble()-0.5);
+    } else {
+      return -90 + 180.0 * random().nextDouble();
+    }
+  }
+
+  private static double randomLon() {
+    if (smallBBox) {
+      return 2.0 * (random().nextDouble()-0.5);
+    } else {
+      return -180 + 360.0 * random().nextDouble();
+    }
+  }
+
+  public void testEncodeDecode() throws Exception {
+    int iters = atLeast(10000);
+    for(int iter=0;iter<iters;iter++) {
+      double lat = randomLat();
+      double latQuantized = BKDTreeWriter.decodeLat(BKDTreeWriter.encodeLat(lat));
+      assertEquals(lat, latQuantized, BKDTreeWriter.TOLERANCE);
+
+      double lon = randomLon();
+      double lonQuantized = BKDTreeWriter.decodeLon(BKDTreeWriter.encodeLon(lon));
+      assertEquals(lon, lonQuantized, BKDTreeWriter.TOLERANCE);
+    }
+  }
+
+  public void testEncodeDecodeMax() throws Exception {
+    int x = BKDTreeWriter.encodeLat(Math.nextAfter(90.0, Double.POSITIVE_INFINITY));
+    assertTrue(x < Integer.MAX_VALUE);
+
+    int y = BKDTreeWriter.encodeLon(Math.nextAfter(180.0, Double.POSITIVE_INFINITY));
+    assertTrue(y < Integer.MAX_VALUE);
+  }
+}
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java
index 34f2a12..b585579 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShape.java
@@ -28,6 +28,7 @@
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoAreaFactory;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoPoint;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoShape;
+import org.apache.lucene.spatial.spatial4j.geo3d.PlanetModel;
 
 /**
  * A 3D planar geometry based Spatial4j Shape implementation.
@@ -38,17 +39,23 @@
 
   public final SpatialContext ctx;
   public final GeoShape shape;
+  public final PlanetModel planetModel;
 
   private Rectangle boundingBox = null;
 
   public final static double RADIANS_PER_DEGREE = Math.PI / 180.0;
   public final static double DEGREES_PER_RADIAN = 1.0 / RADIANS_PER_DEGREE;
 
-  public Geo3dShape(GeoShape shape, SpatialContext ctx) {
+  public Geo3dShape(final GeoShape shape, final SpatialContext ctx) {
+    this(PlanetModel.SPHERE, shape, ctx);
+  }
+  
+  public Geo3dShape(final PlanetModel planetModel, final GeoShape shape, final SpatialContext ctx) {
     if (!ctx.isGeo()) {
       throw new IllegalArgumentException("SpatialContext.isGeo() must be true");
     }
     this.ctx = ctx;
+    this.planetModel = planetModel;
     this.shape = shape;
   }
 
@@ -64,7 +71,8 @@
 
   protected SpatialRelation relate(Rectangle r) {
     // Construct the right kind of GeoArea first
-    GeoArea geoArea = GeoAreaFactory.makeGeoArea(r.getMaxY() * RADIANS_PER_DEGREE,
+    GeoArea geoArea = GeoAreaFactory.makeGeoArea(planetModel,
+        r.getMaxY() * RADIANS_PER_DEGREE,
         r.getMinY() * RADIANS_PER_DEGREE,
         r.getMinX() * RADIANS_PER_DEGREE,
         r.getMaxX() * RADIANS_PER_DEGREE);
@@ -83,7 +91,7 @@
 
   protected SpatialRelation relate(Point p) {
     // Create a GeoPoint
-    GeoPoint point = new GeoPoint(p.getY()*RADIANS_PER_DEGREE, p.getX()*RADIANS_PER_DEGREE);
+    GeoPoint point = new GeoPoint(planetModel, p.getY()*RADIANS_PER_DEGREE, p.getX()*RADIANS_PER_DEGREE);
     if (shape.isWithin(point)) {
       // Point within shape
       return SpatialRelation.CONTAINS;
@@ -91,7 +99,9 @@
     return SpatialRelation.DISJOINT;
   }
 
-  protected final double ROUNDOFF_ADJUSTMENT = 0.01;
+  // The required size of this adjustment depends on the actual planetary model chosen.
+  // This value is big enough to account for WGS84.
+  protected final double ROUNDOFF_ADJUSTMENT = 0.05;
   
   @Override
   public Rectangle getBoundingBox() {
@@ -150,7 +160,7 @@
 
   @Override
   public String toString() {
-    return "Geo3dShape{" + shape + '}';
+    return "Geo3dShape{planetmodel=" + planetModel+", shape="+shape + '}';
   }
 
   @Override
@@ -158,11 +168,11 @@
     if (!(other instanceof Geo3dShape))
       return false;
     Geo3dShape tr = (Geo3dShape)other;
-    return tr.shape.equals(shape);
+    return tr.planetModel.equals(planetModel) && tr.shape.equals(shape);
   }
 
   @Override
   public int hashCode() {
-    return shape.hashCode();
+    return planetModel.hashCode() + shape.hashCode();
   }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Bounds.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Bounds.java
index cee58ae..82ba62c 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Bounds.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Bounds.java
@@ -254,11 +254,11 @@
     }
   }
 
-  public Bounds addPoint(Vector v) {
+  public Bounds addPoint(final Vector v) {
     return addPoint(v.x, v.y, v.z);
   }
 
-  public Bounds addPoint(double x, double y, double z) {
+  public Bounds addPoint(final double x, final double y, final double z) {
     if (!noLongitudeBound) {
       // Get a longitude value
       double longitude = Math.atan2(y, x);
@@ -267,7 +267,7 @@
     }
     if (!noTopLatitudeBound || !noBottomLatitudeBound) {
       // Compute a latitude value
-      double latitude = Math.asin(z);
+      double latitude = Math.asin(z/Math.sqrt(z * z + x * x + y * y));
       addLatitudeBound(latitude);
     }
     return this;
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoAreaFactory.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoAreaFactory.java
index ab49cad..c273e9e 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoAreaFactory.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoAreaFactory.java
@@ -35,8 +35,8 @@
    * @param rightLon  is the right longitude
    * @return a GeoArea corresponding to what was specified.
    */
-  public static GeoArea makeGeoArea(double topLat, double bottomLat, double leftLon, double rightLon) {
-    return GeoBBoxFactory.makeGeoBBox(topLat, bottomLat, leftLon, rightLon);
+  public static GeoArea makeGeoArea(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, final double rightLon) {
+    return GeoBBoxFactory.makeGeoBBox(planetModel, topLat, bottomLat, leftLon, rightLon);
   }
 
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxFactory.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxFactory.java
index 8e2b8df..863aca5 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxFactory.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxFactory.java
@@ -29,13 +29,14 @@
   /**
    * Create a geobbox of the right kind given the specified bounds.
    *
+   * @param planetModel is the planet model
    * @param topLat    is the top latitude
    * @param bottomLat is the bottom latitude
    * @param leftLon   is the left longitude
    * @param rightLon  is the right longitude
    * @return a GeoBBox corresponding to what was specified.
    */
-  public static GeoBBox makeGeoBBox(double topLat, double bottomLat, double leftLon, double rightLon) {
+  public static GeoBBox makeGeoBBox(final PlanetModel planetModel, double topLat, double bottomLat, double leftLon, double rightLon) {
     //System.err.println("Making rectangle for topLat="+topLat*180.0/Math.PI+", bottomLat="+bottomLat*180.0/Math.PI+", leftLon="+leftLon*180.0/Math.PI+", rightlon="+rightLon*180.0/Math.PI);
     if (topLat > Math.PI * 0.5)
       topLat = Math.PI * 0.5;
@@ -47,17 +48,17 @@
       rightLon = Math.PI;
     if (Math.abs(leftLon + Math.PI) < Vector.MINIMUM_RESOLUTION && Math.abs(rightLon - Math.PI) < Vector.MINIMUM_RESOLUTION) {
       if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION && Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION)
-        return new GeoWorld();
+        return new GeoWorld(planetModel);
       if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_RESOLUTION) {
         if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION || Math.abs(topLat + Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION)
-          return new GeoDegeneratePoint(topLat, 0.0);
-        return new GeoDegenerateLatitudeZone(topLat);
+          return new GeoDegeneratePoint(planetModel, topLat, 0.0);
+        return new GeoDegenerateLatitudeZone(planetModel, topLat);
       }
       if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION)
-        return new GeoNorthLatitudeZone(bottomLat);
+        return new GeoNorthLatitudeZone(planetModel, bottomLat);
       else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION)
-        return new GeoSouthLatitudeZone(topLat);
-      return new GeoLatitudeZone(topLat, bottomLat);
+        return new GeoSouthLatitudeZone(planetModel, topLat);
+      return new GeoLatitudeZone(planetModel, topLat, bottomLat);
     }
     //System.err.println(" not latitude zone");
     double extent = rightLon - leftLon;
@@ -65,47 +66,47 @@
       extent += Math.PI * 2.0;
     if (topLat == Math.PI * 0.5 && bottomLat == -Math.PI * 0.5) {
       if (Math.abs(leftLon - rightLon) < Vector.MINIMUM_RESOLUTION)
-        return new GeoDegenerateLongitudeSlice(leftLon);
+        return new GeoDegenerateLongitudeSlice(planetModel, leftLon);
 
       if (extent >= Math.PI)
-        return new GeoWideLongitudeSlice(leftLon, rightLon);
+        return new GeoWideLongitudeSlice(planetModel, leftLon, rightLon);
 
-      return new GeoLongitudeSlice(leftLon, rightLon);
+      return new GeoLongitudeSlice(planetModel, leftLon, rightLon);
     }
     //System.err.println(" not longitude slice");
     if (Math.abs(leftLon - rightLon) < Vector.MINIMUM_RESOLUTION) {
       if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_RESOLUTION)
-        return new GeoDegeneratePoint(topLat, leftLon);
-      return new GeoDegenerateVerticalLine(topLat, bottomLat, leftLon);
+        return new GeoDegeneratePoint(planetModel, topLat, leftLon);
+      return new GeoDegenerateVerticalLine(planetModel, topLat, bottomLat, leftLon);
     }
     //System.err.println(" not vertical line");
     if (extent >= Math.PI) {
       if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_RESOLUTION) {
         //System.err.println(" wide degenerate line");
-        return new GeoWideDegenerateHorizontalLine(topLat, leftLon, rightLon);
+        return new GeoWideDegenerateHorizontalLine(planetModel, topLat, leftLon, rightLon);
       }
       if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION) {
-        return new GeoWideNorthRectangle(bottomLat, leftLon, rightLon);
+        return new GeoWideNorthRectangle(planetModel, bottomLat, leftLon, rightLon);
       } else if (Math.abs(bottomLat + Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION) {
-        return new GeoWideSouthRectangle(topLat, leftLon, rightLon);
+        return new GeoWideSouthRectangle(planetModel, topLat, leftLon, rightLon);
       }
       //System.err.println(" wide rect");
-      return new GeoWideRectangle(topLat, bottomLat, leftLon, rightLon);
+      return new GeoWideRectangle(planetModel, topLat, bottomLat, leftLon, rightLon);
     }
     if (Math.abs(topLat - bottomLat) < Vector.MINIMUM_RESOLUTION) {
       if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION || Math.abs(topLat + Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION) {
-        return new GeoDegeneratePoint(topLat, 0.0);
+        return new GeoDegeneratePoint(planetModel, topLat, 0.0);
       }
       //System.err.println(" horizontal line");
-      return new GeoDegenerateHorizontalLine(topLat, leftLon, rightLon);
+      return new GeoDegenerateHorizontalLine(planetModel, topLat, leftLon, rightLon);
     }
     if (Math.abs(topLat - Math.PI * 0.5) < Vector.MINIMUM_RESOLUTION) {
-      return new GeoNorthRectangle(bottomLat, leftLon, rightLon);
+      return new GeoNorthRectangle(planetModel, bottomLat, leftLon, rightLon);
     } else if (Math.abs(bottomLat + Math.PI * 0.5) <  Vector.MINIMUM_RESOLUTION) {
-      return new GeoSouthRectangle(topLat, leftLon, rightLon);
+      return new GeoSouthRectangle(planetModel, topLat, leftLon, rightLon);
     }
     //System.err.println(" rectangle");
-    return new GeoRectangle(topLat, bottomLat, leftLon, rightLon);
+    return new GeoRectangle(planetModel, topLat, bottomLat, leftLon, rightLon);
   }
 
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxBase.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseBBox.java
similarity index 81%
rename from lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxBase.java
rename to lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseBBox.java
index 1e1a603..1e52aaf 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxBase.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseBBox.java
@@ -23,11 +23,12 @@
  *
  * @lucene.internal
  */
-public abstract class GeoBBoxBase implements GeoBBox {
+public abstract class GeoBaseBBox extends GeoBaseShape implements GeoBBox {
 
-  protected final static GeoPoint NORTH_POLE = new GeoPoint(0.0, 0.0, 1.0);
-  protected final static GeoPoint SOUTH_POLE = new GeoPoint(0.0, 0.0, -1.0);
-
+  public GeoBaseBBox(final PlanetModel planetModel) {
+    super(planetModel);
+  }
+  
   @Override
   public abstract boolean isWithin(final Vector point);
 
@@ -45,6 +46,9 @@
       } else {
         foundOutside = true;
       }
+      if (foundInside && foundOutside) {
+        return SOME_INSIDE;
+      }
     }
     if (!foundInside && !foundOutside)
       return NONE_INSIDE;
@@ -54,5 +58,15 @@
       return NONE_INSIDE;
     return SOME_INSIDE;
   }
+  
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
+  
+  @Override
+  public boolean equals(final Object o) {
+    return super.equals(o);
+  }
 }
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseExtendedShape.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseExtendedShape.java
index 10572a8c..e3a1c03 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseExtendedShape.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseExtendedShape.java
@@ -22,11 +22,10 @@
  *
  * @lucene.internal
  */
-public abstract class GeoBaseExtendedShape implements GeoShape {
-  protected final static GeoPoint NORTH_POLE = new GeoPoint(0.0, 0.0, 1.0);
-  protected final static GeoPoint SOUTH_POLE = new GeoPoint(0.0, 0.0, -1.0);
+public abstract class GeoBaseExtendedShape extends GeoBaseShape implements GeoShape {
 
-  public GeoBaseExtendedShape() {
+  public GeoBaseExtendedShape(final PlanetModel planetModel) {
+    super(planetModel);
   }
 
   /**
@@ -83,12 +82,22 @@
   public Bounds getBounds(Bounds bounds) {
     if (bounds == null)
       bounds = new Bounds();
-    if (isWithin(NORTH_POLE)) {
+    if (isWithin(planetModel.NORTH_POLE)) {
       bounds.noTopLatitudeBound().noLongitudeBound();
     }
-    if (isWithin(SOUTH_POLE)) {
+    if (isWithin(planetModel.SOUTH_POLE)) {
       bounds.noBottomLatitudeBound().noLongitudeBound();
     }
     return bounds;
   }
+  
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
+  
+  @Override
+  public boolean equals(final Object o) {
+    return super.equals(o);
+  }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseShape.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseShape.java
new file mode 100644
index 0000000..f5a3dad
--- /dev/null
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoBaseShape.java
@@ -0,0 +1,47 @@
+package org.apache.lucene.spatial.spatial4j.geo3d;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * All bounding box shapes can derive from this base class, which furnishes
+ * some common code
+ *
+ * @lucene.internal
+ */
+public abstract class GeoBaseShape {
+
+  protected final PlanetModel planetModel;
+  
+  public GeoBaseShape(final PlanetModel planetModel) {
+    this.planetModel = planetModel;
+  }
+  
+  @Override
+  public int hashCode() {
+    return planetModel.hashCode();
+  }
+  
+  @Override
+  public boolean equals(final Object o) {
+    if (!(o instanceof GeoBaseShape))
+      return false;
+    return planetModel.equals(((GeoBaseShape)o).planetModel);
+  }
+}
+
+
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircle.java
index b04d0fa..23d8288 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircle.java
@@ -25,45 +25,42 @@
 public class GeoCircle extends GeoBaseExtendedShape implements GeoDistanceShape, GeoSizeable {
   public final GeoPoint center;
   public final double cutoffAngle;
-  public final double cutoffNormalDistance;
-  public final double cutoffLinearDistance;
   public final SidedPlane circlePlane;
   public final GeoPoint[] edgePoints;
   public static final GeoPoint[] circlePoints = new GeoPoint[0];
 
-  public GeoCircle(final double lat, final double lon, final double cutoffAngle) {
-    super();
+  public GeoCircle(final PlanetModel planetModel, final double lat, final double lon, final double cutoffAngle) {
+    super(planetModel);
     if (lat < -Math.PI * 0.5 || lat > Math.PI * 0.5)
       throw new IllegalArgumentException("Latitude out of bounds");
     if (lon < -Math.PI || lon > Math.PI)
       throw new IllegalArgumentException("Longitude out of bounds");
     if (cutoffAngle <= 0.0 || cutoffAngle > Math.PI)
       throw new IllegalArgumentException("Cutoff angle out of bounds");
-    final double sinAngle = Math.sin(cutoffAngle);
     final double cosAngle = Math.cos(cutoffAngle);
-    this.center = new GeoPoint(lat, lon);
-    this.cutoffNormalDistance = sinAngle;
-    // Need the chord distance.  This is just the chord distance: sqrt((1 - cos(angle))^2 + (sin(angle))^2).
-    final double xDiff = 1.0 - cosAngle;
-    this.cutoffLinearDistance = Math.sqrt(xDiff * xDiff + sinAngle * sinAngle);
+    this.center = new GeoPoint(planetModel, lat, lon);
+    final double magnitude = center.magnitude();
+    // In an ellipsoidal world, cutoff distances make no sense, unfortunately.  Only membership
+    // can be used to make in/out determination.
     this.cutoffAngle = cutoffAngle;
-    this.circlePlane = new SidedPlane(center, center, -cosAngle);
+    // The plane's normal vector needs to be normalized, since we compute D on that basis
+    this.circlePlane = new SidedPlane(center, center.normalize(), -cosAngle * magnitude);
 
     // Compute a point on the circle boundary.
     if (cutoffAngle == Math.PI)
       this.edgePoints = new GeoPoint[0];
     else {
-      // Move from center only in latitude.  Then, if we go past the north pole, adjust the longitude also.
-      double newLat = lat + cutoffAngle;
-      double newLon = lon;
-      if (newLat > Math.PI * 0.5) {
-        newLat = Math.PI - newLat;
-        newLon += Math.PI;
+      // We already have circle plane, which is the definitive determination of the edge of the "circle".
+      // Next, compute vertical plane going through origin and the center point (C = 0, D = 0).
+      Plane verticalPlane = Plane.constructNormalizedVerticalPlane(this.center.x, this.center.y);
+      if (verticalPlane == null) {
+        verticalPlane = new Plane(1.0,0.0);
       }
-      while (newLon > Math.PI) {
-        newLon -= Math.PI * 2.0;
+      // Finally, use Plane.findIntersections() to find the intersection points.
+      final GeoPoint edgePoint = this.circlePlane.getSampleIntersectionPoint(planetModel, verticalPlane);
+      if (edgePoint == null) {
+        throw new RuntimeException("Could not find edge point for circle at lat="+lat+" lon="+lon+" cutoffAngle="+cutoffAngle+" planetModel="+planetModel);
       }
-      final GeoPoint edgePoint = new GeoPoint(newLat, newLon);
       //if (Math.abs(circlePlane.evaluate(edgePoint)) > 1e-10)
       //    throw new RuntimeException("Computed an edge point that does not satisfy circlePlane equation! "+circlePlane.evaluate(edgePoint));
       this.edgePoints = new GeoPoint[]{edgePoint};
@@ -92,10 +89,9 @@
    */
   @Override
   public double computeNormalDistance(final GeoPoint point) {
-    double normalDistance = this.center.normalDistance(point);
-    if (normalDistance > cutoffNormalDistance)
+    if (!isWithin(point))
       return Double.MAX_VALUE;
-    return normalDistance;
+    return this.center.normalDistance(point);
   }
 
   /**
@@ -105,10 +101,9 @@
    */
   @Override
   public double computeNormalDistance(final double x, final double y, final double z) {
-    double normalDistance = this.center.normalDistance(x, y, z);
-    if (normalDistance > cutoffNormalDistance)
+    if (!isWithin(x,y,z))
       return Double.MAX_VALUE;
-    return normalDistance;
+    return this.center.normalDistance(x, y, z);
   }
 
   /**
@@ -118,10 +113,9 @@
    */
   @Override
   public double computeSquaredNormalDistance(final GeoPoint point) {
-    double normalDistanceSquared = this.center.normalDistanceSquared(point);
-    if (normalDistanceSquared > cutoffNormalDistance * cutoffNormalDistance)
+    if (!isWithin(point))
       return Double.MAX_VALUE;
-    return normalDistanceSquared;
+    return this.center.normalDistanceSquared(point);
   }
 
   /**
@@ -131,10 +125,9 @@
    */
   @Override
   public double computeSquaredNormalDistance(final double x, final double y, final double z) {
-    double normalDistanceSquared = this.center.normalDistanceSquared(x, y, z);
-    if (normalDistanceSquared > cutoffNormalDistance * cutoffNormalDistance)
+    if (!isWithin(x,y,z))
       return Double.MAX_VALUE;
-    return normalDistanceSquared;
+    return this.center.normalDistanceSquared(x, y, z);
   }
 
   /**
@@ -143,10 +136,9 @@
    */
   @Override
   public double computeLinearDistance(final GeoPoint point) {
-    double linearDistance = this.center.linearDistance(point);
-    if (linearDistance > cutoffLinearDistance)
+    if (!isWithin(point))
       return Double.MAX_VALUE;
-    return linearDistance;
+    return this.center.linearDistance(point);
   }
 
   /**
@@ -155,10 +147,9 @@
    */
   @Override
   public double computeLinearDistance(final double x, final double y, final double z) {
-    double linearDistance = this.center.linearDistance(x, y, z);
-    if (linearDistance > cutoffLinearDistance)
+    if (!isWithin(x,y,z))
       return Double.MAX_VALUE;
-    return linearDistance;
+    return this.center.linearDistance(x, y, z);
   }
 
   /**
@@ -166,10 +157,9 @@
    */
   @Override
   public double computeSquaredLinearDistance(final GeoPoint point) {
-    double linearDistanceSquared = this.center.linearDistanceSquared(point);
-    if (linearDistanceSquared > cutoffLinearDistance * cutoffLinearDistance)
+    if (!isWithin(point))
       return Double.MAX_VALUE;
-    return linearDistanceSquared;
+    return this.center.linearDistanceSquared(point);
   }
 
   /**
@@ -177,10 +167,9 @@
    */
   @Override
   public double computeSquaredLinearDistance(final double x, final double y, final double z) {
-    double linearDistanceSquared = this.center.linearDistanceSquared(x, y, z);
-    if (linearDistanceSquared > cutoffLinearDistance * cutoffLinearDistance)
+    if (!isWithin(x,y,z))
       return Double.MAX_VALUE;
-    return linearDistanceSquared;
+    return this.center.linearDistanceSquared(x, y, z);
   }
 
   /**
@@ -189,10 +178,9 @@
    */
   @Override
   public double computeArcDistance(final GeoPoint point) {
-    double dist = this.center.arcDistance(point);
-    if (dist > cutoffAngle)
+    if (!isWithin(point))
       return Double.MAX_VALUE;
-    return dist;
+    return this.center.arcDistance(point);
   }
 
   @Override
@@ -214,7 +202,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return circlePlane.intersects(p, notablePoints, circlePoints, bounds);
+    return circlePlane.intersects(planetModel, p, notablePoints, circlePoints, bounds);
   }
 
   /**
@@ -230,7 +218,7 @@
   public Bounds getBounds(Bounds bounds) {
     bounds = super.getBounds(bounds);
     bounds.addPoint(center);
-    circlePlane.recordBounds(bounds);
+    circlePlane.recordBounds(planetModel, bounds);
     return bounds;
   }
 
@@ -239,21 +227,20 @@
     if (!(o instanceof GeoCircle))
       return false;
     GeoCircle other = (GeoCircle) o;
-    return other.center.equals(center) && other.cutoffAngle == cutoffAngle;
+    return super.equals(other) && other.center.equals(center) && other.cutoffAngle == cutoffAngle;
   }
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    result = center.hashCode();
-    temp = Double.doubleToLongBits(cutoffAngle);
+    int result = super.hashCode();
+    result = 31 * result + center.hashCode();
+    long temp = Double.doubleToLongBits(cutoffAngle);
     result = 31 * result + (int) (temp ^ (temp >>> 32));
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoCircle: {center=" + center + ", radius=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + ")}";
+    return "GeoCircle: {planetmodel=" + planetModel+", center=" + center + ", radius=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + ")}";
   }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygon.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygon.java
index cf9a1d7..26a763a 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygon.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygon.java
@@ -45,7 +45,8 @@
    * Create a convex polygon from a list of points.  The first point must be on the
    * external edge.
    */
-  public GeoConvexPolygon(final List<GeoPoint> pointList) {
+  public GeoConvexPolygon(final PlanetModel planetModel, final List<GeoPoint> pointList) {
+    super(planetModel);
     this.points = pointList;
     this.isInternalEdges = null;
     donePoints(false);
@@ -55,7 +56,8 @@
    * Create a convex polygon from a list of points, keeping track of which boundaries
    * are internal.  This is used when creating a polygon as a building block for another shape.
    */
-  public GeoConvexPolygon(final List<GeoPoint> pointList, final BitSet internalEdgeFlags, final boolean returnEdgeInternal) {
+  public GeoConvexPolygon(final PlanetModel planetModel, final List<GeoPoint> pointList, final BitSet internalEdgeFlags, final boolean returnEdgeInternal) {
+    super(planetModel);
     this.points = pointList;
     this.isInternalEdges = internalEdgeFlags;
     donePoints(returnEdgeInternal);
@@ -65,7 +67,8 @@
    * Create a convex polygon, with a starting latitude and longitude.
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}
    */
-  public GeoConvexPolygon(final double startLatitude, final double startLongitude) {
+  public GeoConvexPolygon(final PlanetModel planetModel, final double startLatitude, final double startLongitude) {
+    super(planetModel);
     points = new ArrayList<GeoPoint>();
     isInternalEdges = new BitSet();
     // Argument checking
@@ -74,7 +77,7 @@
     if (startLongitude < -Math.PI || startLongitude > Math.PI)
       throw new IllegalArgumentException("Longitude out of range");
 
-    final GeoPoint p = new GeoPoint(startLatitude, startLongitude);
+    final GeoPoint p = new GeoPoint(planetModel, startLatitude, startLongitude);
     points.add(p);
   }
 
@@ -94,7 +97,7 @@
     if (longitude < -Math.PI || longitude > Math.PI)
       throw new IllegalArgumentException("Longitude out of range");
 
-    final GeoPoint p = new GeoPoint(latitude, longitude);
+    final GeoPoint p = new GeoPoint(planetModel, latitude, longitude);
     isInternalEdges.set(points.size(), isInternalEdge);
     points.add(p);
   }
@@ -191,7 +194,7 @@
             membershipBounds[count++] = edges[otherIndex];
           }
         }
-        if (edge.intersects(p, notablePoints, points, bounds, membershipBounds)) {
+        if (edge.intersects(planetModel, p, notablePoints, points, bounds, membershipBounds)) {
           //System.err.println(" intersects!");
           return true;
         }
@@ -230,7 +233,7 @@
           membershipBounds[count++] = edges[otherIndex];
         }
       }
-      edge.recordBounds(bounds, membershipBounds);
+      edge.recordBounds(planetModel, bounds, membershipBounds);
     }
 
     if (fullDistance >= Math.PI) {
@@ -245,6 +248,8 @@
     if (!(o instanceof GeoConvexPolygon))
       return false;
     GeoConvexPolygon other = (GeoConvexPolygon) o;
+    if (!super.equals(other))
+      return false;
     if (other.points.size() != points.size())
       return false;
 
@@ -257,17 +262,14 @@
 
   @Override
   public int hashCode() {
-    return points.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + points.hashCode();
+    return result;
   }
 
   @Override
   public String toString() {
-    StringBuilder edgeString = new StringBuilder("{");
-    for (int i = 0; i < edges.length; i++) {
-      edgeString.append(edges[i]).append(" internal? ").append(internalEdges[i]).append("; ");
-    }
-    edgeString.append("}");
-    return "GeoConvexPolygon: {points=" + points + " edges=" + edgeString + "}";
+    return "GeoConvexPolygon: {planetmodel=" + planetModel + ", points=" + points + "}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateHorizontalLine.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateHorizontalLine.java
index 14a7396..e1fd4ea 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateHorizontalLine.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateHorizontalLine.java
@@ -24,7 +24,7 @@
  *
  * @lucene.internal
  */
-public class GeoDegenerateHorizontalLine extends GeoBBoxBase {
+public class GeoDegenerateHorizontalLine extends GeoBaseBBox {
   public final double latitude;
   public final double leftLon;
   public final double rightLon;
@@ -44,7 +44,8 @@
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}
    */
-  public GeoDegenerateHorizontalLine(final double latitude, final double leftLon, double rightLon) {
+  public GeoDegenerateHorizontalLine(final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (latitude > Math.PI * 0.5 || latitude < -Math.PI * 0.5)
       throw new IllegalArgumentException("Latitude out of range");
@@ -71,10 +72,10 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the two points
-    this.LHC = new GeoPoint(sinLatitude, sinLeftLon, cosLatitude, cosLeftLon);
-    this.RHC = new GeoPoint(sinLatitude, sinRightLon, cosLatitude, cosRightLon);
+    this.LHC = new GeoPoint(planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon);
+    this.RHC = new GeoPoint(planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon);
 
-    this.plane = new Plane(sinLatitude);
+    this.plane = new Plane(planetModel, sinLatitude);
 
     // Normalize
     while (leftLon > rightLon) {
@@ -84,7 +85,7 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
@@ -107,7 +108,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -148,7 +149,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(plane, notablePoints, planePoints, bounds, leftPlane, rightPlane);
+    return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, leftPlane, rightPlane);
   }
 
   /**
@@ -170,12 +171,18 @@
 
   @Override
   public int getRelationship(final GeoShape path) {
-    if (path.intersects(plane, planePoints, leftPlane, rightPlane))
+    //System.err.println("getting relationship between "+this+" and "+path);
+    if (path.intersects(plane, planePoints, leftPlane, rightPlane)) {
+      //System.err.println(" overlaps");
       return OVERLAPS;
+    }
 
-    if (path.isWithin(centerPoint))
+    if (path.isWithin(centerPoint)) {
+      //System.err.println(" contains");
       return CONTAINS;
+    }
 
+    //System.err.println(" disjoint");
     return DISJOINT;
   }
 
@@ -184,19 +191,20 @@
     if (!(o instanceof GeoDegenerateHorizontalLine))
       return false;
     GeoDegenerateHorizontalLine other = (GeoDegenerateHorizontalLine) o;
-    return other.LHC.equals(LHC) && other.RHC.equals(RHC);
+    return super.equals(other) && other.LHC.equals(LHC) && other.RHC.equals(RHC);
   }
 
   @Override
   public int hashCode() {
-    int result = LHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + LHC.hashCode();
     result = 31 * result + RHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoDegenerateHorizontalLine: {latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoDegenerateHorizontalLine: {planetmodel="+planetModel+", latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLatitudeZone.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLatitudeZone.java
index 982f8f5..31bd77b 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLatitudeZone.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLatitudeZone.java
@@ -23,7 +23,7 @@
  *
  * @lucene.internal
  */
-public class GeoDegenerateLatitudeZone extends GeoBBoxBase {
+public class GeoDegenerateLatitudeZone extends GeoBaseBBox {
   public final double latitude;
 
   public final double sinLatitude;
@@ -32,14 +32,15 @@
   public final GeoPoint[] edgePoints;
   public final static GeoPoint[] planePoints = new GeoPoint[0];
 
-  public GeoDegenerateLatitudeZone(final double latitude) {
+  public GeoDegenerateLatitudeZone(final PlanetModel planetModel, final double latitude) {
+    super(planetModel);
     this.latitude = latitude;
 
     this.sinLatitude = Math.sin(latitude);
     double cosLatitude = Math.cos(latitude);
-    this.plane = new Plane(sinLatitude);
+    this.plane = new Plane(planetModel, sinLatitude);
     // Compute an interior point.
-    interiorPoint = new GeoPoint(cosLatitude, 0.0, sinLatitude);
+    interiorPoint = new GeoPoint(planetModel, sinLatitude, 0.0, cosLatitude, 1.0);
     edgePoints = new GeoPoint[]{interiorPoint};
   }
 
@@ -47,7 +48,7 @@
   public GeoBBox expand(final double angle) {
     double newTopLat = latitude + angle;
     double newBottomLat = latitude - angle;
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, -Math.PI, Math.PI);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, -Math.PI, Math.PI);
   }
 
   @Override
@@ -83,7 +84,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(plane, notablePoints, planePoints, bounds);
+    return p.intersects(planetModel, plane, notablePoints, planePoints, bounds);
   }
 
   /**
@@ -125,19 +126,20 @@
     if (!(o instanceof GeoDegenerateLatitudeZone))
       return false;
     GeoDegenerateLatitudeZone other = (GeoDegenerateLatitudeZone) o;
-    return other.latitude == latitude;
+    return super.equals(other) && other.latitude == latitude;
   }
 
   @Override
   public int hashCode() {
+    int result = super.hashCode();
     long temp = Double.doubleToLongBits(latitude);
-    int result = (int) (temp ^ (temp >>> 32));
+    result = 31 * result + (int) (temp ^ (temp >>> 32));
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoDegenerateLatitudeZone: {lat=" + latitude + "(" + latitude * 180.0 / Math.PI + ")}";
+    return "GeoDegenerateLatitudeZone: {planetmodel="+planetModel+", lat=" + latitude + "(" + latitude * 180.0 / Math.PI + ")}";
   }
 }
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLongitudeSlice.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLongitudeSlice.java
index 69c703b..1e41aca 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLongitudeSlice.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateLongitudeSlice.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoDegenerateLongitudeSlice extends GeoBBoxBase {
+public class GeoDegenerateLongitudeSlice extends GeoBaseBBox {
   public final double longitude;
 
   public final double sinLongitude;
@@ -32,12 +32,13 @@
   public final GeoPoint interiorPoint;
   public final GeoPoint[] edgePoints;
 
-  public final static GeoPoint[] planePoints = new GeoPoint[]{NORTH_POLE, SOUTH_POLE};
+  public final GeoPoint[] planePoints;
 
   /**
    * Accepts only values in the following ranges: lon: {@code -PI -> PI}
    */
-  public GeoDegenerateLongitudeSlice(final double longitude) {
+  public GeoDegenerateLongitudeSlice(final PlanetModel planetModel, final double longitude) {
+    super(planetModel);
     // Argument checking
     if (longitude < -Math.PI || longitude > Math.PI)
       throw new IllegalArgumentException("Longitude out of range");
@@ -48,9 +49,10 @@
 
     this.plane = new Plane(cosLongitude, sinLongitude);
     // We need a bounding plane too, which is perpendicular to the longitude plane and sided so that the point (0.0, longitude) is inside.
-    this.interiorPoint = new GeoPoint(cosLongitude, sinLongitude, 0.0);
+    this.interiorPoint = new GeoPoint(planetModel, 0.0, sinLongitude, 1.0, cosLongitude);
     this.boundingPlane = new SidedPlane(interiorPoint, -sinLongitude, cosLongitude);
     this.edgePoints = new GeoPoint[]{interiorPoint};
+    this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE};
   }
 
   @Override
@@ -63,7 +65,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
   }
 
   @Override
@@ -100,7 +102,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(plane, notablePoints, planePoints, bounds, boundingPlane);
+    return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, boundingPlane);
   }
 
   /**
@@ -138,21 +140,20 @@
     if (!(o instanceof GeoDegenerateLongitudeSlice))
       return false;
     GeoDegenerateLongitudeSlice other = (GeoDegenerateLongitudeSlice) o;
-    return other.longitude == longitude;
+    return super.equals(other) && other.longitude == longitude;
   }
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    temp = Double.doubleToLongBits(longitude);
-    result = (int) (temp ^ (temp >>> 32));
+    int result = super.hashCode();
+    long temp = Double.doubleToLongBits(longitude);
+    result = result * 31 + (int) (temp ^ (temp >>> 32));
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoDegenerateLongitudeSlice: {longitude=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}";
+    return "GeoDegenerateLongitudeSlice: {planetmodel="+planetModel+", longitude=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegeneratePoint.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegeneratePoint.java
index 2f76ea8..258b8dd 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegeneratePoint.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegeneratePoint.java
@@ -26,10 +26,12 @@
 public class GeoDegeneratePoint extends GeoPoint implements GeoBBox {
   public final double latitude;
   public final double longitude;
+  public final PlanetModel planetModel;
   public final GeoPoint[] edgePoints;
 
-  public GeoDegeneratePoint(final double lat, final double lon) {
-    super(lat, lon);
+  public GeoDegeneratePoint(final PlanetModel planetModel, final double lat, final double lon) {
+    super(planetModel, lat, lon);
+    this.planetModel = planetModel;
     this.latitude = lat;
     this.longitude = lon;
     this.edgePoints = new GeoPoint[]{this};
@@ -47,7 +49,7 @@
     final double newBottomLat = latitude - angle;
     final double newLeftLon = longitude - angle;
     final double newRightLon = longitude + angle;
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   /**
@@ -108,15 +110,14 @@
     if (!(o instanceof GeoDegeneratePoint))
       return false;
     GeoDegeneratePoint other = (GeoDegeneratePoint) o;
-    return other.latitude == latitude && other.longitude == longitude;
+    return super.equals(other) && other.latitude == latitude && other.longitude == longitude;
   }
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    temp = Double.doubleToLongBits(latitude);
-    result = (int) (temp ^ (temp >>> 32));
+    int result = super.hashCode();
+    long temp = Double.doubleToLongBits(latitude);
+    result = 31 * result + (int) (temp ^ (temp >>> 32));
     temp = Double.doubleToLongBits(longitude);
     result = 31 * result + (int) (temp ^ (temp >>> 32));
     return result;
@@ -124,7 +125,7 @@
 
   @Override
   public String toString() {
-    return "GeoDegeneratePoint: {lat=" + latitude + "(" + latitude * 180.0 / Math.PI + "), lon=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}";
+    return "GeoDegeneratePoint: {planetmodel="+planetModel+", lat=" + latitude + "(" + latitude * 180.0 / Math.PI + "), lon=" + longitude + "(" + longitude * 180.0 / Math.PI + ")}";
   }
 
   /**
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateVerticalLine.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateVerticalLine.java
index dd0306b..7e9c8b5 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateVerticalLine.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoDegenerateVerticalLine.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoDegenerateVerticalLine extends GeoBBoxBase {
+public class GeoDegenerateVerticalLine extends GeoBaseBBox {
   public final double topLat;
   public final double bottomLat;
   public final double longitude;
@@ -43,7 +43,8 @@
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, longitude: {@code -PI -> PI}
    */
-  public GeoDegenerateVerticalLine(final double topLat, final double bottomLat, final double longitude) {
+  public GeoDegenerateVerticalLine(final PlanetModel planetModel, final double topLat, final double bottomLat, final double longitude) {
+    super(planetModel);
     // Argument checking
     if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Top latitude out of range");
@@ -66,8 +67,8 @@
     final double cosLongitude = Math.cos(longitude);
 
     // Now build the two points
-    this.UHC = new GeoPoint(sinTopLat, sinLongitude, cosTopLat, cosLongitude);
-    this.LHC = new GeoPoint(sinBottomLat, sinLongitude, cosBottomLat, cosLongitude);
+    this.UHC = new GeoPoint(planetModel, sinTopLat, sinLongitude, cosTopLat, cosLongitude);
+    this.LHC = new GeoPoint(planetModel, sinBottomLat, sinLongitude, cosBottomLat, cosLongitude);
 
     this.plane = new Plane(cosLongitude, sinLongitude);
 
@@ -75,10 +76,10 @@
     final double sinMiddleLat = Math.sin(middleLat);
     final double cosMiddleLat = Math.cos(middleLat);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinLongitude, cosMiddleLat, cosLongitude);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinLongitude, cosMiddleLat, cosLongitude);
 
-    this.topPlane = new SidedPlane(centerPoint, sinTopLat);
-    this.bottomPlane = new SidedPlane(centerPoint, sinBottomLat);
+    this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat);
+    this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat);
 
     this.boundingPlane = new SidedPlane(centerPoint, -sinLongitude, cosLongitude);
 
@@ -98,7 +99,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -144,7 +145,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(plane, notablePoints, planePoints, bounds, boundingPlane, topPlane, bottomPlane);
+    return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, boundingPlane, topPlane, bottomPlane);
   }
 
   /**
@@ -187,12 +188,13 @@
     if (!(o instanceof GeoDegenerateVerticalLine))
       return false;
     GeoDegenerateVerticalLine other = (GeoDegenerateVerticalLine) o;
-    return other.UHC.equals(UHC) && other.LHC.equals(LHC);
+    return super.equals(other) && other.UHC.equals(UHC) && other.LHC.equals(LHC);
   }
 
   @Override
   public int hashCode() {
-    int result = UHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + UHC.hashCode();
     result = 31 * result + LHC.hashCode();
     return result;
   }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLatitudeZone.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLatitudeZone.java
index 132893c..5bec580 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLatitudeZone.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLatitudeZone.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoLatitudeZone extends GeoBBoxBase {
+public class GeoLatitudeZone extends GeoBaseBBox {
   public final double topLat;
   public final double bottomLat;
   public final double cosTopLat;
@@ -40,7 +40,8 @@
   // Edge points
   public final GeoPoint[] edgePoints;
 
-  public GeoLatitudeZone(final double topLat, final double bottomLat) {
+  public GeoLatitudeZone(final PlanetModel planetModel, final double topLat, final double bottomLat) {
+    super(planetModel);
     this.topLat = topLat;
     this.bottomLat = bottomLat;
 
@@ -49,19 +50,15 @@
     this.cosTopLat = Math.cos(topLat);
     this.cosBottomLat = Math.cos(bottomLat);
 
-    // Construct sample points, so we get our sidedness right
-    final Vector topPoint = new Vector(0.0, 0.0, sinTopLat);
-    final Vector bottomPoint = new Vector(0.0, 0.0, sinBottomLat);
-
     // Compute an interior point.  Pick one whose lat is between top and bottom.
     final double middleLat = (topLat + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
-    this.interiorPoint = new GeoPoint(Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 0.0, sinMiddleLat);
-    this.topBoundaryPoint = new GeoPoint(Math.sqrt(1.0 - sinTopLat * sinTopLat), 0.0, sinTopLat);
-    this.bottomBoundaryPoint = new GeoPoint(Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 0.0, sinBottomLat);
+    this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0);
+    this.topBoundaryPoint = new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0);
+    this.bottomBoundaryPoint = new GeoPoint(planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0);
 
-    this.topPlane = new SidedPlane(interiorPoint, sinTopLat);
-    this.bottomPlane = new SidedPlane(interiorPoint, sinBottomLat);
+    this.topPlane = new SidedPlane(interiorPoint, planetModel, sinTopLat);
+    this.bottomPlane = new SidedPlane(interiorPoint, planetModel, sinBottomLat);
 
     this.edgePoints = new GeoPoint[]{topBoundaryPoint, bottomBoundaryPoint};
   }
@@ -70,7 +67,7 @@
   public GeoBBox expand(final double angle) {
     final double newTopLat = topLat + angle;
     final double newBottomLat = bottomLat - angle;
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, -Math.PI, Math.PI);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, -Math.PI, Math.PI);
   }
 
   @Override
@@ -115,8 +112,8 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(topPlane, notablePoints, planePoints, bounds, bottomPlane) ||
-        p.intersects(bottomPlane, notablePoints, planePoints, bounds, topPlane);
+    return p.intersects(planetModel, topPlane, notablePoints, planePoints, bounds, bottomPlane) ||
+        p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds, topPlane);
   }
 
   /**
@@ -181,18 +178,19 @@
     if (!(o instanceof GeoLatitudeZone))
       return false;
     GeoLatitudeZone other = (GeoLatitudeZone) o;
-    return other.topPlane.equals(topPlane) && other.bottomPlane.equals(bottomPlane);
+    return super.equals(other) && other.topBoundaryPoint.equals(topBoundaryPoint) && other.bottomBoundaryPoint.equals(bottomBoundaryPoint);
   }
 
   @Override
   public int hashCode() {
-    int result = topPlane.hashCode();
-    result = 31 * result + bottomPlane.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + topBoundaryPoint.hashCode();
+    result = 31 * result + bottomBoundaryPoint.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoLatitudeZone: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}";
+    return "GeoLatitudeZone: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}";
   }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLongitudeSlice.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLongitudeSlice.java
index adf1fe3..d500b95 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLongitudeSlice.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoLongitudeSlice.java
@@ -24,23 +24,24 @@
  *
  * @lucene.internal
  */
-public class GeoLongitudeSlice extends GeoBBoxBase {
+public class GeoLongitudeSlice extends GeoBaseBBox {
   public final double leftLon;
   public final double rightLon;
 
   public final SidedPlane leftPlane;
   public final SidedPlane rightPlane;
 
-  public final static GeoPoint[] planePoints = new GeoPoint[]{NORTH_POLE, SOUTH_POLE};
+  public final GeoPoint[] planePoints;
 
   public final GeoPoint centerPoint;
 
-  public final static GeoPoint[] edgePoints = new GeoPoint[]{NORTH_POLE};
+  public final GeoPoint[] edgePoints;
 
   /**
    * Accepts only values in the following ranges: lon: {@code -PI -> PI}
    */
-  public GeoLongitudeSlice(final double leftLon, double rightLon) {
+  public GeoLongitudeSlice(final PlanetModel planetModel, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (leftLon < -Math.PI || leftLon > Math.PI)
       throw new IllegalArgumentException("Left longitude out of range");
@@ -66,11 +67,13 @@
       rightLon += Math.PI * 2.0;
     }
     final double middleLon = (leftLon + rightLon) * 0.5;
-    this.centerPoint = new GeoPoint(0.0, middleLon);
+    this.centerPoint = new GeoPoint(planetModel, 0.0, middleLon);
 
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
+    this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE};
+    this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE};
   }
 
   @Override
@@ -85,7 +88,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
   }
 
   @Override
@@ -126,8 +129,8 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(leftPlane, notablePoints, planePoints, bounds, rightPlane) ||
-        p.intersects(rightPlane, notablePoints, planePoints, bounds, leftPlane);
+    return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds, rightPlane) ||
+        p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds, leftPlane);
   }
 
   /**
@@ -154,7 +157,7 @@
     if (insideRectangle == SOME_INSIDE)
       return OVERLAPS;
 
-    final boolean insideShape = path.isWithin(NORTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.NORTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape)
       return OVERLAPS;
@@ -180,15 +183,14 @@
     if (!(o instanceof GeoLongitudeSlice))
       return false;
     GeoLongitudeSlice other = (GeoLongitudeSlice) o;
-    return other.leftLon == leftLon && other.rightLon == rightLon;
+    return super.equals(other) && other.leftLon == leftLon && other.rightLon == rightLon;
   }
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    temp = Double.doubleToLongBits(leftLon);
-    result = (int) (temp ^ (temp >>> 32));
+    int result = super.hashCode();
+    long temp = Double.doubleToLongBits(leftLon);
+    result = 31 * result + (int) (temp ^ (temp >>> 32));
     temp = Double.doubleToLongBits(rightLon);
     result = 31 * result + (int) (temp ^ (temp >>> 32));
     return result;
@@ -196,7 +198,7 @@
 
   @Override
   public String toString() {
-    return "GeoLongitudeSlice: {leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoLongitudeSlice: {planetmodel="+planetModel+", leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthLatitudeZone.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthLatitudeZone.java
index 4a03be2..1a2c128 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthLatitudeZone.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthLatitudeZone.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoNorthLatitudeZone extends GeoBBoxBase {
+public class GeoNorthLatitudeZone extends GeoBaseBBox {
   public final double bottomLat;
   public final double cosBottomLat;
   public final SidedPlane bottomPlane;
@@ -34,22 +34,20 @@
   // Edge points
   public final GeoPoint[] edgePoints;
 
-  public GeoNorthLatitudeZone(final double bottomLat) {
+  public GeoNorthLatitudeZone(final PlanetModel planetModel, final double bottomLat) {
+    super(planetModel);
     this.bottomLat = bottomLat;
 
     final double sinBottomLat = Math.sin(bottomLat);
     this.cosBottomLat = Math.cos(bottomLat);
 
-    // Construct sample points, so we get our sidedness right
-    final Vector bottomPoint = new Vector(0.0, 0.0, sinBottomLat);
-
     // Compute an interior point.  Pick one whose lat is between top and bottom.
     final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
-    this.interiorPoint = new GeoPoint(Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 0.0, sinMiddleLat);
-    this.bottomBoundaryPoint = new GeoPoint(Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 0.0, sinBottomLat);
+    this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0);
+    this.bottomBoundaryPoint = new GeoPoint(planetModel, sinBottomLat, 0.0, Math.sqrt(1.0 - sinBottomLat * sinBottomLat), 1.0);
 
-    this.bottomPlane = new SidedPlane(interiorPoint, sinBottomLat);
+    this.bottomPlane = new SidedPlane(interiorPoint, planetModel, sinBottomLat);
 
     this.edgePoints = new GeoPoint[]{bottomBoundaryPoint};
   }
@@ -58,7 +56,7 @@
   public GeoBBox expand(final double angle) {
     final double newTopLat = Math.PI * 0.5;
     final double newBottomLat = bottomLat - angle;
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, -Math.PI, Math.PI);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, -Math.PI, Math.PI);
   }
 
   @Override
@@ -101,7 +99,7 @@
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     return
-        p.intersects(bottomPlane, notablePoints, planePoints, bounds);
+        p.intersects(planetModel, bottomPlane, notablePoints, planePoints, bounds);
   }
 
   /**
@@ -159,18 +157,19 @@
     if (!(o instanceof GeoNorthLatitudeZone))
       return false;
     GeoNorthLatitudeZone other = (GeoNorthLatitudeZone) o;
-    return other.bottomPlane.equals(bottomPlane);
+    return super.equals(other) && other.bottomBoundaryPoint.equals(bottomBoundaryPoint);
   }
 
   @Override
   public int hashCode() {
-    int result = bottomPlane.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + bottomBoundaryPoint.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoNorthLatitudeZone: {bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}";
+    return "GeoNorthLatitudeZone: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + ")}";
   }
 }
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthRectangle.java
index be0c1cb..32f0a2e 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoNorthRectangle.java
@@ -25,7 +25,7 @@
  *
  * @lucene.internal
  */
-public class GeoNorthRectangle extends GeoBBoxBase {
+public class GeoNorthRectangle extends GeoBaseBBox {
   public final double bottomLat;
   public final double leftLon;
   public final double rightLon;
@@ -45,12 +45,13 @@
 
   public final GeoPoint centerPoint;
 
-  public final GeoPoint[] edgePoints = new GeoPoint[]{NORTH_POLE};
+  public final GeoPoint[] edgePoints;
 
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}
    */
-  public GeoNorthRectangle(final double bottomLat, final double leftLon, double rightLon) {
+  public GeoNorthRectangle(final PlanetModel planetModel, final double bottomLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Bottom latitude out of range");
@@ -77,8 +78,8 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the points
-    this.LRHC = new GeoPoint(sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
-    this.LLHC = new GeoPoint(sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
+    this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
+    this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
 
     final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -91,16 +92,17 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.bottomPlane = new SidedPlane(centerPoint, sinBottomLat);
+    this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
     this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC};
-    this.leftPlanePoints = new GeoPoint[]{NORTH_POLE, LLHC};
-    this.rightPlanePoints = new GeoPoint[]{NORTH_POLE, LRHC};
+    this.leftPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LLHC};
+    this.rightPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LRHC};
 
+    this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE};
   }
 
   @Override
@@ -117,7 +119,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -164,9 +166,9 @@
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     return
-        p.intersects(bottomPlane, notablePoints, bottomPlanePoints, bounds, leftPlane, rightPlane) ||
-            p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, bottomPlane) ||
-            p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, bottomPlane);
+        p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, leftPlane, rightPlane) ||
+            p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, bottomPlane) ||
+            p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, bottomPlane);
   }
 
   /**
@@ -196,7 +198,7 @@
       return OVERLAPS;
     }
 
-    final boolean insideShape = path.isWithin(NORTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.NORTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape) {
       //System.err.println(" inside of each other");
@@ -229,19 +231,20 @@
     if (!(o instanceof GeoNorthRectangle))
       return false;
     GeoNorthRectangle other = (GeoNorthRectangle) o;
-    return other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC);
+    return super.equals(other) && other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC);
   }
 
   @Override
   public int hashCode() {
-    int result = LLHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + LLHC.hashCode();
     result = 31 * result + LRHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoNorthRectangle: {bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoNorthRectangle: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPath.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPath.java
index 2ff08b0..2dfacac 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPath.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPath.java
@@ -29,29 +29,25 @@
  * @lucene.experimental
  */
 public class GeoPath extends GeoBaseExtendedShape implements GeoDistanceShape {
+  
   public final double cutoffAngle;
-  public final double cutoffOffset;
-  public final double originDistance;
-  public final double chordDistance;
+  public final double sinAngle;
+  public final double cosAngle;
 
-  public final List<SegmentEndpoint> points = new ArrayList<SegmentEndpoint>();
+  public final List<GeoPoint> points = new ArrayList<GeoPoint>();
+  
+  public final List<SegmentEndpoint> endPoints = new ArrayList<SegmentEndpoint>();
   public final List<PathSegment> segments = new ArrayList<PathSegment>();
 
   public GeoPoint[] edgePoints = null;
 
-  public GeoPath(final double cutoffAngle) {
-    super();
-    if (cutoffAngle <= 0.0 || cutoffAngle > Math.PI * 0.5)
+  public GeoPath(final PlanetModel planetModel, final double maxCutoffAngle) {
+    super(planetModel);
+    if (maxCutoffAngle <= 0.0 || maxCutoffAngle > Math.PI * 0.5)
       throw new IllegalArgumentException("Cutoff angle out of bounds");
-    this.cutoffAngle = cutoffAngle;
-    final double cosAngle = Math.cos(cutoffAngle);
-    final double sinAngle = Math.sin(cutoffAngle);
-    // Cutoff offset is the linear distance given the angle
-    this.cutoffOffset = sinAngle;
-    this.originDistance = cosAngle;
-    // Compute chord distance
-    double xDiff = 1.0 - cosAngle;
-    this.chordDistance = Math.sqrt(xDiff * xDiff + sinAngle * sinAngle);
+    this.cutoffAngle = maxCutoffAngle;
+    this.cosAngle = Math.cos(maxCutoffAngle);
+    this.sinAngle = Math.sin(maxCutoffAngle);
   }
 
   public void addPoint(double lat, double lon) {
@@ -59,57 +55,86 @@
       throw new IllegalArgumentException("Latitude out of range");
     if (lon < -Math.PI || lon > Math.PI)
       throw new IllegalArgumentException("Longitude out of range");
-    final GeoPoint end = new GeoPoint(lat, lon);
-    if (points.size() > 0) {
-      final GeoPoint start = points.get(points.size() - 1).point;
-      final PathSegment ps = new PathSegment(start, end, cutoffOffset, cutoffAngle, chordDistance);
-      // Check for degeneracy; if the segment is degenerate, don't include the point
-      if (ps.isDegenerate())
-        return;
-      segments.add(ps);
-    } else {
-      // First point.  We compute the basic set of edgepoints here because we've got the lat and lon available.
-      // Move from center only in latitude.  Then, if we go past the north pole, adjust the longitude also.
-      double newLat = lat + cutoffAngle;
-      double newLon = lon;
-      if (newLat > Math.PI * 0.5) {
-        newLat = Math.PI - newLat;
-        newLon += Math.PI;
-      }
-      while (newLon > Math.PI) {
-        newLon -= Math.PI * 2.0;
-      }
-      final GeoPoint edgePoint = new GeoPoint(newLat, newLon);
-      this.edgePoints = new GeoPoint[]{edgePoint};
-    }
-    final SegmentEndpoint se = new SegmentEndpoint(end, originDistance, cutoffOffset, cutoffAngle, chordDistance);
-    points.add(se);
+    points.add(new GeoPoint(planetModel, lat, lon));
   }
-
+  
   public void done() {
     if (points.size() == 0)
       throw new IllegalArgumentException("Path must have at least one point");
-    if (segments.size() > 0) {
-      edgePoints = new GeoPoint[]{points.get(0).circlePlane.getSampleIntersectionPoint(segments.get(0).invertedStartCutoffPlane)};
-    }
-    for (int i = 0; i < points.size(); i++) {
-      final SegmentEndpoint pathPoint = points.get(i);
-      Membership previousEndBound = null;
-      GeoPoint[] previousEndNotablePoints = null;
-      Membership nextStartBound = null;
-      GeoPoint[] nextStartNotablePoints = null;
-      if (i > 0) {
-        final PathSegment previousSegment = segments.get(i - 1);
-        previousEndBound = previousSegment.invertedEndCutoffPlane;
-        previousEndNotablePoints = previousSegment.endCutoffPlanePoints;
+    // Compute an offset to use for all segments.  This will be based on the minimum magnitude of 
+    // the entire ellipsoid.
+    final double cutoffOffset = this.sinAngle * planetModel.getMinimumMagnitude();
+    
+    // First, build all segments.  We'll then go back and build corresponding segment endpoints.
+    GeoPoint lastPoint = null;
+    for (final GeoPoint end : points) {
+      if (lastPoint != null) {
+        final Plane normalizedConnectingPlane = new Plane(lastPoint, end).normalize();
+        if (normalizedConnectingPlane == null) {
+          continue;
+        }
+        segments.add(new PathSegment(planetModel, lastPoint, end, normalizedConnectingPlane, cutoffOffset));
       }
-      if (i < segments.size()) {
-        final PathSegment nextSegment = segments.get(i);
-        nextStartBound = nextSegment.invertedStartCutoffPlane;
-        nextStartNotablePoints = nextSegment.startCutoffPlanePoints;
-      }
-      pathPoint.setCutoffPlanes(previousEndNotablePoints, previousEndBound, nextStartNotablePoints, nextStartBound);
+      lastPoint = end;
     }
+    
+    if (segments.size() == 0) {
+      // Simple circle
+      final SegmentEndpoint onlyEndpoint = new SegmentEndpoint(points.get(0), cutoffOffset);
+      endPoints.add(onlyEndpoint);
+      // Find an edgepoint
+      // We already have circle plane, which is the definitive determination of the edge of the "circle".
+      // Next, compute vertical plane going through origin and the center point (C = 0, D = 0).
+      Plane verticalPlane = Plane.constructNormalizedVerticalPlane(onlyEndpoint.point.x, onlyEndpoint.point.y);
+      if (verticalPlane == null) {
+        verticalPlane = new Plane(1.0,0.0);
+      }
+      // Finally, use Plane.findIntersections() to find the intersection points.
+      final GeoPoint edgePoint = onlyEndpoint.circlePlane.getSampleIntersectionPoint(planetModel, verticalPlane);
+      if (edgePoint == null) {
+        throw new RuntimeException("Could not find edge point for path endpoint="+onlyEndpoint.point+" cutoffOffset="+cutoffOffset+" planetModel="+planetModel);
+      }
+      this.edgePoints = new GeoPoint[]{edgePoint};
+      return;
+    }
+    
+    // Create segment endpoints.  Use an appropriate constructor for the start and end of the path.
+    for (int i = 0; i < segments.size(); i++) {
+      final PathSegment currentSegment = segments.get(i);
+      
+      if (i == 0) {
+        // Starting endpoint
+        final SegmentEndpoint startEndpoint = new SegmentEndpoint(currentSegment.start, 
+          currentSegment.startCutoffPlane, currentSegment.ULHC, currentSegment.LLHC);
+        endPoints.add(startEndpoint);
+        this.edgePoints = new GeoPoint[]{currentSegment.ULHC};
+        continue;
+      }
+      
+      // General intersection case
+      final PathSegment prevSegment = segments.get(i-1);
+      // We construct four separate planes, and evaluate which one includes all interior points with least overlap
+      final SidedPlane candidate1 = SidedPlane.constructNormalizedThreePointSidedPlane(currentSegment.start, prevSegment.URHC, currentSegment.ULHC, currentSegment.LLHC);
+      final SidedPlane candidate2 = SidedPlane.constructNormalizedThreePointSidedPlane(currentSegment.start, currentSegment.ULHC, currentSegment.LLHC, prevSegment.LRHC);
+      final SidedPlane candidate3 = SidedPlane.constructNormalizedThreePointSidedPlane(currentSegment.start, currentSegment.LLHC, prevSegment.LRHC, prevSegment.URHC);
+      final SidedPlane candidate4 = SidedPlane.constructNormalizedThreePointSidedPlane(currentSegment.start, prevSegment.LRHC, prevSegment.URHC, currentSegment.ULHC);
+
+      if (candidate1 == null && candidate2 == null && candidate3 == null && candidate4 == null) {
+        // The planes are identical.  We don't need a circle at all.  Special constructor...
+        endPoints.add(new SegmentEndpoint(currentSegment.start));
+      } else {
+        endPoints.add(new SegmentEndpoint(currentSegment.start,
+          prevSegment.endCutoffPlane, currentSegment.startCutoffPlane,
+          prevSegment.URHC, prevSegment.LRHC,
+          currentSegment.ULHC, currentSegment.LLHC,
+          candidate1, candidate2, candidate3, candidate4));
+      }
+    }
+    // Do final endpoint
+    final PathSegment lastSegment = segments.get(segments.size()-1);
+    endPoints.add(new SegmentEndpoint(lastSegment.end,
+      lastSegment.endCutoffPlane, lastSegment.URHC, lastSegment.LRHC));
+
   }
 
   /**
@@ -132,7 +157,7 @@
 
     int segmentIndex = 0;
     currentDistance = 0.0;
-    for (SegmentEndpoint endpoint : points) {
+    for (SegmentEndpoint endpoint : endPoints) {
       double distance = endpoint.pathNormalDistance(point);
       if (distance != Double.MAX_VALUE)
         return currentDistance + distance;
@@ -194,7 +219,7 @@
 
     int segmentIndex = 0;
     currentDistance = 0.0;
-    for (SegmentEndpoint endpoint : points) {
+    for (SegmentEndpoint endpoint : endPoints) {
       double distance = endpoint.pathLinearDistance(point);
       if (distance != Double.MAX_VALUE)
         return currentDistance + distance;
@@ -251,7 +276,7 @@
 
     int segmentIndex = 0;
     currentDistance = 0.0;
-    for (SegmentEndpoint endpoint : points) {
+    for (SegmentEndpoint endpoint : endPoints) {
       double distance = endpoint.pathDistance(point);
       if (distance != Double.MAX_VALUE)
         return currentDistance + distance;
@@ -264,20 +289,26 @@
 
   @Override
   public boolean isWithin(final Vector point) {
-    for (SegmentEndpoint pathPoint : points) {
-      if (pathPoint.isWithin(point))
+    //System.err.println("Assessing whether point "+point+" is within geopath "+this);
+    for (SegmentEndpoint pathPoint : endPoints) {
+      if (pathPoint.isWithin(point)) {
+        //System.err.println(" point is within SegmentEndpoint "+pathPoint);
         return true;
+      }
     }
     for (PathSegment pathSegment : segments) {
-      if (pathSegment.isWithin(point))
+      if (pathSegment.isWithin(point)) {
+        //System.err.println(" point is within PathSegment "+pathSegment);
         return true;
+      }
     }
+    //System.err.println(" point is not within geopath");
     return false;
   }
 
   @Override
   public boolean isWithin(final double x, final double y, final double z) {
-    for (SegmentEndpoint pathPoint : points) {
+    for (SegmentEndpoint pathPoint : endPoints) {
       if (pathPoint.isWithin(x, y, z))
         return true;
     }
@@ -304,15 +335,15 @@
     // any of the intersection points are within the bounds, then we've detected an intersection.
     // Well, sort of.  We can detect intersections also due to overlap of segments with each other.
     // But that's an edge case and we won't be optimizing for it.
-
-    for (final SegmentEndpoint pathPoint : points) {
-      if (pathPoint.intersects(plane, notablePoints, bounds)) {
+    //System.err.println(" Looking for intersection of plane "+plane+" with path "+this);
+    for (final SegmentEndpoint pathPoint : endPoints) {
+      if (pathPoint.intersects(planetModel, plane, notablePoints, bounds)) {
         return true;
       }
     }
 
     for (final PathSegment pathSegment : segments) {
-      if (pathSegment.intersects(plane, notablePoints, bounds)) {
+      if (pathSegment.intersects(planetModel, plane, notablePoints, bounds)) {
         return true;
       }
     }
@@ -336,10 +367,10 @@
     // never more than 180 degrees longitude at a pop or we risk having the
     // bounds object get itself inverted.  So do the edges first.
     for (PathSegment pathSegment : segments) {
-      pathSegment.getBounds(bounds);
+      pathSegment.getBounds(planetModel, bounds);
     }
-    for (SegmentEndpoint pathPoint : points) {
-      pathPoint.getBounds(bounds);
+    for (SegmentEndpoint pathPoint : endPoints) {
+      pathPoint.getBounds(planetModel, bounds);
     }
     return bounds;
   }
@@ -349,13 +380,15 @@
     if (!(o instanceof GeoPath))
       return false;
     GeoPath p = (GeoPath) o;
-    if (points.size() != p.points.size())
+    if (!super.equals(p))
+      return false;
+    if (endPoints.size() != p.endPoints.size())
       return false;
     if (cutoffAngle != p.cutoffAngle)
       return false;
-    for (int i = 0; i < points.size(); i++) {
-      SegmentEndpoint point = points.get(i);
-      SegmentEndpoint point2 = p.points.get(i);
+    for (int i = 0; i < endPoints.size(); i++) {
+      SegmentEndpoint point = endPoints.get(i);
+      SegmentEndpoint point2 = p.endPoints.get(i);
       if (!point.equals(point2))
         return false;
     }
@@ -364,101 +397,166 @@
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    temp = Double.doubleToLongBits(cutoffAngle);
-    result = (int) (temp ^ (temp >>> 32));
-    result = 31 * result + points.hashCode();
+    int result = super.hashCode();
+    long temp = Double.doubleToLongBits(cutoffAngle);
+    result = 31 * result + (int) (temp ^ (temp >>> 32));
+    result = 31 * result + endPoints.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoPath: {width=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + "), points={" + points + "}}";
+    return "GeoPath: {planetmodel=" + planetModel+", width=" + cutoffAngle + "(" + cutoffAngle * 180.0 / Math.PI + "), points={" + points + "}}";
   }
 
   /**
    * This is precalculated data for segment endpoint.
+   * Note well: This is not necessarily a circle.  There are four cases:
+   * (1) The path consists of a single endpoint.  In this case, we build a simple circle with the proper cutoff offset.
+   * (2) This is the end of a path.  The circle plane must be constructed to go through two supplied points and be perpendicular to a connecting plane.
+   * (3) This is an intersection in a path.  We are supplied FOUR planes.  If there are intersections within bounds for both upper and lower, then
+   *    we generate no circle at all.  If there is one intersection only, then we generate a plane that includes that intersection, as well as the remaining
+   *    cutoff plane/edge plane points.
    */
   public static class SegmentEndpoint {
     public final GeoPoint point;
     public final SidedPlane circlePlane;
-    public final double cutoffNormalDistance;
-    public final double cutoffAngle;
-    public final double chordDistance;
-    public Membership[] cutoffPlanes = null;
-    public GeoPoint[] notablePoints = null;
+    public final Membership[] cutoffPlanes;
+    public final GeoPoint[] notablePoints;
 
     public final static GeoPoint[] circlePoints = new GeoPoint[0];
 
-    public SegmentEndpoint(final GeoPoint point, final double originDistance, final double cutoffOffset, final double cutoffAngle, final double chordDistance) {
+    /** Base case.  Does nothing at all.
+     */
+    public SegmentEndpoint(final GeoPoint point) {
       this.point = point;
-      this.cutoffNormalDistance = cutoffOffset;
-      this.cutoffAngle = cutoffAngle;
-      this.chordDistance = chordDistance;
-      this.circlePlane = new SidedPlane(point, point, -originDistance);
+      this.circlePlane = null;
+      this.cutoffPlanes = null;
+      this.notablePoints = null;
     }
+    
+    /** Constructor for case (1).
+     * Generate a simple circle cutoff plane.
+     */
+    public SegmentEndpoint(final GeoPoint point, final double cutoffOffset) {
+      this.point = point;
+      final double magnitude = point.magnitude();
+      // Normalize vector to make D value correct
+      this.circlePlane = new SidedPlane(point, point.normalize(), -Math.sqrt(magnitude * magnitude - cutoffOffset * cutoffOffset));
+      this.cutoffPlanes = new Membership[0];
+      this.notablePoints = new GeoPoint[0];
+    }
+    
+    /** Constructor for case (2).
+     * Generate an endpoint, given a single cutoff plane plus upper and lower edge points.
+     */
+    public SegmentEndpoint(final GeoPoint point,
+      final SidedPlane cutoffPlane, final GeoPoint topEdgePoint, final GeoPoint bottomEdgePoint) {
+      this.point = point;
+      this.cutoffPlanes = new Membership[]{new SidedPlane(cutoffPlane)};
+      this.notablePoints = new GeoPoint[]{topEdgePoint, bottomEdgePoint};
+      // To construct the plane, we now just need D, which is simply the negative of the evaluation of the circle normal vector at one of the points.
+      this.circlePlane = SidedPlane.constructNormalizedPerpendicularSidedPlane(point, cutoffPlane, topEdgePoint, bottomEdgePoint);
+    }
+    
+    /** Constructor for case (3).
+     * Generate an endpoint for an intersection, given four points.
+     */
+    public SegmentEndpoint(final GeoPoint point,
+      final SidedPlane prevCutoffPlane, final SidedPlane nextCutoffPlane,
+      final GeoPoint notCand2Point, final GeoPoint notCand1Point,
+      final GeoPoint notCand3Point, final GeoPoint notCand4Point,
+      final SidedPlane candidate1, final SidedPlane candidate2, final SidedPlane candidate3, final SidedPlane candidate4) {
+      // Note: What we really need is a single plane that goes through all four points.
+      // Since that's not possible in the ellipsoid case (because three points determine a plane, not four), we
+      // need an approximation that at least creates a boundary that has no interruptions.
+      // There are three obvious choices for the third point: either (a) one of the two remaining points, or (b) the top or bottom edge
+      // intersection point.  (a) has no guarantee of continuity, while (b) is capable of producing something very far from a circle if
+      // the angle between segments is acute.
+      // The solution is to look for the side (top or bottom) that has an intersection within the shape.  We use the two points from
+      // the opposite side to determine the plane, AND we pick the third to be either of the two points on the intersecting side
+      // PROVIDED that the other point is within the final circle we come up with.
+      this.point = point;
+      
+      // We construct four separate planes, and evaluate which one includes all interior points with least overlap
+      // (Constructed beforehand because we need them for degeneracy check)
 
-    public void setCutoffPlanes(final GeoPoint[] previousEndNotablePoints, final Membership previousEndPlane,
-                                final GeoPoint[] nextStartNotablePoints, final Membership nextStartPlane) {
-      if (previousEndNotablePoints == null && nextStartNotablePoints == null) {
-        cutoffPlanes = new Membership[0];
-        notablePoints = new GeoPoint[0];
-      } else if (previousEndNotablePoints != null && nextStartNotablePoints == null) {
-        cutoffPlanes = new Membership[]{previousEndPlane};
-        notablePoints = previousEndNotablePoints;
-      } else if (previousEndNotablePoints == null && nextStartNotablePoints != null) {
-        cutoffPlanes = new Membership[]{nextStartPlane};
-        notablePoints = nextStartNotablePoints;
+      final boolean cand1IsOtherWithin = candidate1!=null?candidate1.isWithin(notCand1Point):false;
+      final boolean cand2IsOtherWithin = candidate2!=null?candidate2.isWithin(notCand2Point):false;
+      final boolean cand3IsOtherWithin = candidate3!=null?candidate3.isWithin(notCand3Point):false;
+      final boolean cand4IsOtherWithin = candidate4!=null?candidate4.isWithin(notCand4Point):false;
+      
+      if (cand1IsOtherWithin && cand2IsOtherWithin && cand3IsOtherWithin && cand4IsOtherWithin) {
+        // The only way we should see both within is if all four points are coplanar.  In that case, we default to the simplest treatment.
+        this.circlePlane = candidate1;  // doesn't matter which
+        this.notablePoints = new GeoPoint[]{notCand2Point, notCand3Point, notCand1Point, notCand4Point};
+        this.cutoffPlanes = new Membership[]{new SidedPlane(prevCutoffPlane), new SidedPlane(nextCutoffPlane)};
+      } else if (cand1IsOtherWithin) {
+        // Use candidate1, and DON'T include prevCutoffPlane in the cutoff planes list
+        this.circlePlane = candidate1;
+        this.notablePoints = new GeoPoint[]{notCand2Point, notCand3Point, notCand4Point};
+        this.cutoffPlanes = new Membership[]{new SidedPlane(nextCutoffPlane)};
+      } else if (cand2IsOtherWithin) {
+        // Use candidate2
+        this.circlePlane = candidate2;
+        this.notablePoints = new GeoPoint[]{notCand3Point, notCand4Point, notCand1Point};
+        this.cutoffPlanes = new Membership[]{new SidedPlane(nextCutoffPlane)};
+      } else if (cand3IsOtherWithin) {
+        this.circlePlane = candidate3;
+        this.notablePoints = new GeoPoint[]{notCand4Point, notCand1Point, notCand2Point};
+        this.cutoffPlanes = new Membership[]{new SidedPlane(prevCutoffPlane)};
+      } else if (cand4IsOtherWithin) {
+        this.circlePlane = candidate4;
+        this.notablePoints = new GeoPoint[]{notCand1Point, notCand2Point, notCand3Point};
+        this.cutoffPlanes = new Membership[]{new SidedPlane(prevCutoffPlane)};
       } else {
-        cutoffPlanes = new Membership[]{previousEndPlane, nextStartPlane};
-        notablePoints = new GeoPoint[previousEndNotablePoints.length + nextStartNotablePoints.length];
-        int i = 0;
-        for (GeoPoint p : previousEndNotablePoints) {
-          notablePoints[i++] = p;
-        }
-        for (GeoPoint p : nextStartNotablePoints) {
-          notablePoints[i++] = p;
-        }
+        // dunno what happened
+        throw new RuntimeException("Couldn't come up with a plane through three points that included the fourth");
       }
     }
 
     public boolean isWithin(final Vector point) {
+      if (circlePlane == null)
+        return false;
       return circlePlane.isWithin(point);
     }
 
     public boolean isWithin(final double x, final double y, final double z) {
+      if (circlePlane == null)
+        return false;
       return circlePlane.isWithin(x, y, z);
     }
 
     public double pathDistance(final GeoPoint point) {
-      double dist = this.point.arcDistance(point);
-      if (dist > cutoffAngle)
+      if (!isWithin(point))
         return Double.MAX_VALUE;
-      return dist;
+      return this.point.arcDistance(point);
     }
 
     public double pathNormalDistance(final GeoPoint point) {
-      double dist = this.point.normalDistance(point);
-      if (dist > cutoffNormalDistance)
+      if (!isWithin(point))
         return Double.MAX_VALUE;
-      return dist;
+      return this.point.normalDistance(point);
     }
 
     public double pathLinearDistance(final GeoPoint point) {
-      double dist = this.point.linearDistance(point);
-      if (dist > chordDistance)
+      if (!isWithin(point))
         return Double.MAX_VALUE;
-      return dist;
+      return this.point.linearDistance(point);
     }
 
-    public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) {
-      return circlePlane.intersects(p, notablePoints, this.notablePoints, bounds, this.cutoffPlanes);
+    public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) {
+      //System.err.println("  looking for intersection between plane "+p+" and circle "+circlePlane+" on proper side of "+cutoffPlanes+" within "+bounds);
+      if (circlePlane == null)
+        return false;
+      return circlePlane.intersects(planetModel, p, notablePoints, this.notablePoints, bounds, this.cutoffPlanes);
     }
 
-    public void getBounds(Bounds bounds) {
+    public void getBounds(final PlanetModel planetModel, Bounds bounds) {
       bounds.addPoint(point);
-      circlePlane.recordBounds(bounds);
+      if (circlePlane == null)
+        return;
+      circlePlane.recordBounds(planetModel, bounds);
     }
 
     @Override
@@ -494,69 +592,70 @@
     public final SidedPlane lowerConnectingPlane;
     public final SidedPlane startCutoffPlane;
     public final SidedPlane endCutoffPlane;
+    public final GeoPoint URHC;
+    public final GeoPoint LRHC;
+    public final GeoPoint ULHC;
+    public final GeoPoint LLHC;
     public final GeoPoint[] upperConnectingPlanePoints;
     public final GeoPoint[] lowerConnectingPlanePoints;
     public final GeoPoint[] startCutoffPlanePoints;
     public final GeoPoint[] endCutoffPlanePoints;
     public final double planeBoundingOffset;
-    public final double arcWidth;
-    public final double chordDistance;
 
-    // For the adjoining SegmentEndpoint...
-    public final SidedPlane invertedStartCutoffPlane;
-    public final SidedPlane invertedEndCutoffPlane;
-
-    public PathSegment(final GeoPoint start, final GeoPoint end, final double planeBoundingOffset, final double arcWidth, final double chordDistance) {
+    public PathSegment(final PlanetModel planetModel, final GeoPoint start, final GeoPoint end,
+      final Plane normalizedConnectingPlane, final double planeBoundingOffset) {
       this.start = start;
       this.end = end;
+      this.normalizedConnectingPlane = normalizedConnectingPlane;
       this.planeBoundingOffset = planeBoundingOffset;
-      this.arcWidth = arcWidth;
-      this.chordDistance = chordDistance;
 
       fullDistance = start.arcDistance(end);
       fullNormalDistance = start.normalDistance(end);
       fullLinearDistance = start.linearDistance(end);
-      normalizedConnectingPlane = new Plane(start, end).normalize();
-      if (normalizedConnectingPlane == null) {
-        upperConnectingPlane = null;
-        lowerConnectingPlane = null;
-        startCutoffPlane = null;
-        endCutoffPlane = null;
-        upperConnectingPlanePoints = null;
-        lowerConnectingPlanePoints = null;
-        startCutoffPlanePoints = null;
-        endCutoffPlanePoints = null;
-        invertedStartCutoffPlane = null;
-        invertedEndCutoffPlane = null;
-      } else {
-        // Either start or end should be on the correct side
-        upperConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, -planeBoundingOffset);
-        lowerConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, planeBoundingOffset);
-        // Cutoff planes use opposite endpoints as correct side examples
-        startCutoffPlane = new SidedPlane(end, normalizedConnectingPlane, start);
-        endCutoffPlane = new SidedPlane(start, normalizedConnectingPlane, end);
-        final Membership[] upperSide = new Membership[]{upperConnectingPlane};
-        final Membership[] lowerSide = new Membership[]{lowerConnectingPlane};
-        final Membership[] startSide = new Membership[]{startCutoffPlane};
-        final Membership[] endSide = new Membership[]{endCutoffPlane};
-        final GeoPoint ULHC = upperConnectingPlane.findIntersections(startCutoffPlane, lowerSide, endSide)[0];
-        final GeoPoint URHC = upperConnectingPlane.findIntersections(endCutoffPlane, lowerSide, startSide)[0];
-        final GeoPoint LLHC = lowerConnectingPlane.findIntersections(startCutoffPlane, upperSide, endSide)[0];
-        final GeoPoint LRHC = lowerConnectingPlane.findIntersections(endCutoffPlane, upperSide, startSide)[0];
-        upperConnectingPlanePoints = new GeoPoint[]{ULHC, URHC};
-        lowerConnectingPlanePoints = new GeoPoint[]{LLHC, LRHC};
-        startCutoffPlanePoints = new GeoPoint[]{ULHC, LLHC};
-        endCutoffPlanePoints = new GeoPoint[]{URHC, LRHC};
-        invertedStartCutoffPlane = new SidedPlane(startCutoffPlane);
-        invertedEndCutoffPlane = new SidedPlane(endCutoffPlane);
+      // Either start or end should be on the correct side
+      upperConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, -planeBoundingOffset);
+      lowerConnectingPlane = new SidedPlane(start, normalizedConnectingPlane, planeBoundingOffset);
+      // Cutoff planes use opposite endpoints as correct side examples
+      startCutoffPlane = new SidedPlane(end, normalizedConnectingPlane, start);
+      endCutoffPlane = new SidedPlane(start, normalizedConnectingPlane, end);
+      final Membership[] upperSide = new Membership[]{upperConnectingPlane};
+      final Membership[] lowerSide = new Membership[]{lowerConnectingPlane};
+      final Membership[] startSide = new Membership[]{startCutoffPlane};
+      final Membership[] endSide = new Membership[]{endCutoffPlane};
+      GeoPoint[] points;
+      points = upperConnectingPlane.findIntersections(planetModel, startCutoffPlane, lowerSide, endSide);
+      if (points.length == 0) {
+        throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide");
       }
-    }
-
-    public boolean isDegenerate() {
-      return normalizedConnectingPlane == null;
+      this.ULHC = points[0];
+      points = upperConnectingPlane.findIntersections(planetModel, endCutoffPlane, lowerSide, startSide);
+      if (points.length == 0) {
+        throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide");
+      }
+      this.URHC = points[0];
+      points = lowerConnectingPlane.findIntersections(planetModel, startCutoffPlane, upperSide, endSide);
+      if (points.length == 0) {
+        throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide");
+      }
+      this.LLHC = points[0];
+      points = lowerConnectingPlane.findIntersections(planetModel, endCutoffPlane, upperSide, startSide);
+      if (points.length == 0) {
+        throw new IllegalArgumentException("Some segment boundary points are off the ellipsoid; path too wide");
+      }
+      this.LRHC = points[0];
+      upperConnectingPlanePoints = new GeoPoint[]{ULHC, URHC};
+      lowerConnectingPlanePoints = new GeoPoint[]{LLHC, LRHC};
+      startCutoffPlanePoints = new GeoPoint[]{ULHC, LLHC};
+      endCutoffPlanePoints = new GeoPoint[]{URHC, LRHC};
     }
 
     public boolean isWithin(final Vector point) {
+      //System.err.println(" assessing whether point "+point+" is within path segment "+this);
+      //System.err.println("  within "+startCutoffPlane+": "+startCutoffPlane.isWithin(point));
+      //System.err.println("  within "+endCutoffPlane+": "+endCutoffPlane.isWithin(point));
+      //System.err.println("  within "+upperConnectingPlane+": "+upperConnectingPlane.isWithin(point));
+      //System.err.println("  within "+lowerConnectingPlane+": "+lowerConnectingPlane.isWithin(point));
+
       return startCutoffPlane.isWithin(point) &&
           endCutoffPlane.isWithin(point) &&
           upperConnectingPlane.isWithin(point) &&
@@ -640,22 +739,22 @@
       return point.linearDistance(normLineX, normLineY, normLineZ) + start.linearDistance(normLineX, normLineY, normLineZ);
     }
 
-    public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) {
-      return upperConnectingPlane.intersects(p, notablePoints, upperConnectingPlanePoints, bounds, lowerConnectingPlane, startCutoffPlane, endCutoffPlane) ||
-          lowerConnectingPlane.intersects(p, notablePoints, lowerConnectingPlanePoints, bounds, upperConnectingPlane, startCutoffPlane, endCutoffPlane);
+    public boolean intersects(final PlanetModel planetModel, final Plane p, final GeoPoint[] notablePoints, final Membership[] bounds) {
+      return upperConnectingPlane.intersects(planetModel, p, notablePoints, upperConnectingPlanePoints, bounds, lowerConnectingPlane, startCutoffPlane, endCutoffPlane) ||
+          lowerConnectingPlane.intersects(planetModel, p, notablePoints, lowerConnectingPlanePoints, bounds, upperConnectingPlane, startCutoffPlane, endCutoffPlane);
     }
 
-    public void getBounds(Bounds bounds) {
+    public void getBounds(final PlanetModel planetModel, Bounds bounds) {
       // We need to do all bounding planes as well as corner points
       bounds.addPoint(start).addPoint(end);
-      upperConnectingPlane.recordBounds(startCutoffPlane, bounds, lowerConnectingPlane, endCutoffPlane);
-      startCutoffPlane.recordBounds(lowerConnectingPlane, bounds, endCutoffPlane, upperConnectingPlane);
-      lowerConnectingPlane.recordBounds(endCutoffPlane, bounds, upperConnectingPlane, startCutoffPlane);
-      endCutoffPlane.recordBounds(upperConnectingPlane, bounds, startCutoffPlane, lowerConnectingPlane);
-      upperConnectingPlane.recordBounds(bounds, lowerConnectingPlane, startCutoffPlane, endCutoffPlane);
-      lowerConnectingPlane.recordBounds(bounds, upperConnectingPlane, startCutoffPlane, endCutoffPlane);
-      startCutoffPlane.recordBounds(bounds, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane);
-      endCutoffPlane.recordBounds(bounds, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane);
+      upperConnectingPlane.recordBounds(planetModel, startCutoffPlane, bounds, lowerConnectingPlane, endCutoffPlane);
+      startCutoffPlane.recordBounds(planetModel, lowerConnectingPlane, bounds, endCutoffPlane, upperConnectingPlane);
+      lowerConnectingPlane.recordBounds(planetModel, endCutoffPlane, bounds, upperConnectingPlane, startCutoffPlane);
+      endCutoffPlane.recordBounds(planetModel, upperConnectingPlane, bounds, startCutoffPlane, lowerConnectingPlane);
+      upperConnectingPlane.recordBounds(planetModel, bounds, lowerConnectingPlane, startCutoffPlane, endCutoffPlane);
+      lowerConnectingPlane.recordBounds(planetModel, bounds, upperConnectingPlane, startCutoffPlane, endCutoffPlane);
+      startCutoffPlane.recordBounds(planetModel, bounds, endCutoffPlane, upperConnectingPlane, lowerConnectingPlane);
+      endCutoffPlane.recordBounds(planetModel, bounds, startCutoffPlane, upperConnectingPlane, lowerConnectingPlane);
       if (fullDistance >= Math.PI) {
         // Too large a segment basically means that we can confuse the Bounds object.  Specifically, if our span exceeds 180 degrees
         // in longitude (which even a segment whose actual length is less than that might if it goes close to a pole).
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPoint.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPoint.java
index 93c0c84..520db18 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPoint.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPoint.java
@@ -23,20 +23,85 @@
  * @lucene.experimental
  */
 public class GeoPoint extends Vector {
-  public GeoPoint(final double sinLat, final double sinLon, final double cosLat, final double cosLon) {
-    super(cosLat * cosLon, cosLat * sinLon, sinLat);
+  
+  /** This is the lazily-evaluated magnitude.  Some constructors include it, but others don't, and
+   * we try not to create extra computation by always computing it. */
+  protected double magnitude = Double.NEGATIVE_INFINITY;
+  
+  /** Construct a GeoPoint from the trig functions of a lat and lon pair.
+   * @param planetModel is the planetModel to put the point on.
+   * @param sinLat is the sin of the latitude.
+   * @param sinLon is the sin of the longitude.
+   * @param cosLat is the cos of the latitude.
+   * @param cosLon is the cos of the longitude.
+   */
+  public GeoPoint(final PlanetModel planetModel, final double sinLat, final double sinLon, final double cosLat, final double cosLon) {
+    this(computeDesiredEllipsoidMagnitude(planetModel, cosLat * cosLon, cosLat * sinLon, sinLat),
+      cosLat * cosLon, cosLat * sinLon, sinLat);
   }
 
-  public GeoPoint(final double lat, final double lon) {
-    this(Math.sin(lat), Math.sin(lon), Math.cos(lat), Math.cos(lon));
+  /** Construct a GeoPoint from a latitude/longitude pair.
+   * @param planetModel is the planetModel to put the point on.
+   * @param lat is the latitude.
+   * @param lon is the longitude.
+   */
+  public GeoPoint(final PlanetModel planetModel, final double lat, final double lon) {
+    this(planetModel, Math.sin(lat), Math.sin(lon), Math.cos(lat), Math.cos(lon));
   }
 
+  /** Construct a GeoPoint from a unit (x,y,z) vector and a magnitude.
+   * @param magnitude is the desired magnitude, provided to put the point on the ellipsoid.
+   * @param x is the unit x value.
+   * @param y is the unit y value.
+   * @param z is the unit z value.
+   */
+  public GeoPoint(final double magnitude, final double x, final double y, final double z) {
+    super(x * magnitude, y * magnitude, z * magnitude);
+    this.magnitude = magnitude;
+  }
+  
+  /** Construct a GeoPoint from an (x,y,z) value.
+   * The (x,y,z) tuple must be on the desired ellipsoid.
+   * @param x is the ellipsoid point x value.
+   * @param y is the ellipsoid point y value.
+   * @param z is the ellipsoid point z value.
+   */
   public GeoPoint(final double x, final double y, final double z) {
     super(x, y, z);
   }
 
+  /** Compute an arc distance between two points.
+   * @param v is the second point.
+   * @return the angle, in radians, between the two points.
+   */
   public double arcDistance(final GeoPoint v) {
-    return Tools.safeAcos(dotProduct(v));
+    return Tools.safeAcos(dotProduct(v)/(magnitude() * v.magnitude()));
   }
 
+  /** Compute the latitude for the point.
+   *@return the latitude.
+   */
+  public double getLatitude() {
+    return Math.asin(z / magnitude() );
+  }
+  
+  /** Compute the longitude for the point.
+   * @return the longitude value.  Uses 0.0 if there is no computable longitude.
+   */
+  public double getLongitude() {
+    if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION)
+      return 0.0;
+    return Math.atan2(y,x);
+  }
+  
+  /** Compute the linear magnitude of the point.
+   * @return the magnitude.
+   */
+  @Override
+  public double magnitude() {
+    if (this.magnitude == Double.NEGATIVE_INFINITY) {
+      this.magnitude = super.magnitude();
+    }
+    return magnitude;
+  }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonFactory.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonFactory.java
index b630dc0..0f1b48c 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonFactory.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonFactory.java
@@ -38,16 +38,16 @@
    *                         its neighbors determines inside/outside for the entire polygon.
    * @return a GeoMembershipShape corresponding to what was specified.
    */
-  public static GeoMembershipShape makeGeoPolygon(List<GeoPoint> pointList, int convexPointIndex) {
+  public static GeoMembershipShape makeGeoPolygon(final PlanetModel planetModel, final List<GeoPoint> pointList, final int convexPointIndex) {
     // The basic operation uses a set of points, two points determining one particular edge, and a sided plane
     // describing membership.
-    return buildPolygonShape(pointList, convexPointIndex, getLegalIndex(convexPointIndex + 1, pointList.size()),
+    return buildPolygonShape(planetModel, pointList, convexPointIndex, getLegalIndex(convexPointIndex + 1, pointList.size()),
         new SidedPlane(pointList.get(getLegalIndex(convexPointIndex - 1, pointList.size())),
             pointList.get(convexPointIndex), pointList.get(getLegalIndex(convexPointIndex + 1, pointList.size()))),
         false);
   }
 
-  public static GeoMembershipShape buildPolygonShape(List<GeoPoint> pointsList, int startPointIndex, int endPointIndex, SidedPlane startingEdge, boolean isInternalEdge) {
+  public static GeoMembershipShape buildPolygonShape(final PlanetModel planetModel, final List<GeoPoint> pointsList, final int startPointIndex, final int endPointIndex, final SidedPlane startingEdge, final boolean isInternalEdge) {
     // Algorithm as follows:
     // Start with sided edge.  Go through all points in some order.  For each new point, determine if the point is within all edges considered so far.
     // If not, put it into a list of points for recursion.  If it is within, add new edge and keep going.
@@ -112,7 +112,7 @@
             }
             // We want the other side for the recursion
             SidedPlane otherSideNewBoundary = new SidedPlane(newBoundary);
-            rval.addShape(buildPolygonShape(recursionList, recursionList.size() - 2, recursionList.size() - 1, otherSideNewBoundary, true));
+            rval.addShape(buildPolygonShape(planetModel, recursionList, recursionList.size() - 2, recursionList.size() - 1, otherSideNewBoundary, true));
             recursionList.clear();
           }
           currentList.add(newPoint);
@@ -140,11 +140,11 @@
       SidedPlane newBoundary = new SidedPlane(currentList.get(currentList.size() - 2), currentList.get(0), currentList.get(currentList.size() - 1));
       // We want the other side for the recursion
       SidedPlane otherSideNewBoundary = new SidedPlane(newBoundary);
-      rval.addShape(buildPolygonShape(recursionList, recursionList.size() - 2, recursionList.size() - 1, otherSideNewBoundary, true));
+      rval.addShape(buildPolygonShape(planetModel, recursionList, recursionList.size() - 2, recursionList.size() - 1, otherSideNewBoundary, true));
       recursionList.clear();
     }
     // Now, add in the current shape.
-    rval.addShape(new GeoConvexPolygon(currentList, internalEdgeList, returnEdgeInternalBoundary));
+    rval.addShape(new GeoConvexPolygon(planetModel, currentList, internalEdgeList, returnEdgeInternalBoundary));
     //System.out.println("Done creating polygon");
     return rval;
   }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoRectangle.java
index 4592036..166a668 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoRectangle.java
@@ -24,7 +24,7 @@
  *
  * @lucene.internal
  */
-public class GeoRectangle extends GeoBBoxBase {
+public class GeoRectangle extends GeoBaseBBox {
   public final double topLat;
   public final double bottomLat;
   public final double leftLon;
@@ -54,7 +54,8 @@
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}
    */
-  public GeoRectangle(final double topLat, final double bottomLat, final double leftLon, double rightLon) {
+  public GeoRectangle(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Top latitude out of range");
@@ -88,10 +89,10 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the four points
-    this.ULHC = new GeoPoint(sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
-    this.URHC = new GeoPoint(sinTopLat, sinRightLon, cosTopLat, cosRightLon);
-    this.LRHC = new GeoPoint(sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
-    this.LLHC = new GeoPoint(sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
+    this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
+    this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon);
+    this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
+    this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
 
     final double middleLat = (topLat + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -104,10 +105,10 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.topPlane = new SidedPlane(centerPoint, sinTopLat);
-    this.bottomPlane = new SidedPlane(centerPoint, sinBottomLat);
+    this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat);
+    this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
@@ -133,7 +134,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -180,10 +181,10 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, leftPlane, rightPlane) ||
-        p.intersects(bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, leftPlane, rightPlane) ||
-        p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane, bottomPlane) ||
-        p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane, bottomPlane);
+    return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, leftPlane, rightPlane) ||
+        p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, leftPlane, rightPlane) ||
+        p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane, bottomPlane) ||
+        p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane, bottomPlane);
   }
 
   /**
@@ -246,19 +247,20 @@
     if (!(o instanceof GeoRectangle))
       return false;
     GeoRectangle other = (GeoRectangle) o;
-    return other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC);
+    return super.equals(other) && other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC);
   }
 
   @Override
   public int hashCode() {
-    int result = ULHC.hashCode();
-    result = 31 * result + LRHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result  + ULHC.hashCode();
+    result = 31 * result  + LRHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoRectangle: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthLatitudeZone.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthLatitudeZone.java
index 8bff3ac..aa8ae35 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthLatitudeZone.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthLatitudeZone.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoSouthLatitudeZone extends GeoBBoxBase {
+public class GeoSouthLatitudeZone extends GeoBaseBBox {
   public final double topLat;
   public final double cosTopLat;
   public final SidedPlane topPlane;
@@ -34,22 +34,20 @@
   // Edge points
   public final GeoPoint[] edgePoints;
 
-  public GeoSouthLatitudeZone(final double topLat) {
+  public GeoSouthLatitudeZone(final PlanetModel planetModel, final double topLat) {
+    super(planetModel);
     this.topLat = topLat;
 
     final double sinTopLat = Math.sin(topLat);
     this.cosTopLat = Math.cos(topLat);
 
-    // Construct sample points, so we get our sidedness right
-    final Vector topPoint = new Vector(0.0, 0.0, sinTopLat);
-
     // Compute an interior point.  Pick one whose lat is between top and bottom.
     final double middleLat = (topLat - Math.PI * 0.5) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
-    this.interiorPoint = new GeoPoint(Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 0.0, sinMiddleLat);
-    this.topBoundaryPoint = new GeoPoint(Math.sqrt(1.0 - sinTopLat * sinTopLat), 0.0, sinTopLat);
+    this.interiorPoint = new GeoPoint(planetModel, sinMiddleLat, 0.0, Math.sqrt(1.0 - sinMiddleLat * sinMiddleLat), 1.0);
+    this.topBoundaryPoint = new GeoPoint(planetModel, sinTopLat, 0.0, Math.sqrt(1.0 - sinTopLat * sinTopLat), 1.0);
 
-    this.topPlane = new SidedPlane(interiorPoint, sinTopLat);
+    this.topPlane = new SidedPlane(interiorPoint, planetModel, sinTopLat);
 
     this.edgePoints = new GeoPoint[]{topBoundaryPoint};
   }
@@ -58,7 +56,7 @@
   public GeoBBox expand(final double angle) {
     final double newTopLat = topLat + angle;
     final double newBottomLat = -Math.PI * 0.5;
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, -Math.PI, Math.PI);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, -Math.PI, Math.PI);
   }
 
   @Override
@@ -98,7 +96,7 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(topPlane, notablePoints, planePoints, bounds);
+    return p.intersects(planetModel, topPlane, notablePoints, planePoints, bounds);
   }
 
   /**
@@ -155,18 +153,19 @@
     if (!(o instanceof GeoSouthLatitudeZone))
       return false;
     GeoSouthLatitudeZone other = (GeoSouthLatitudeZone) o;
-    return other.topPlane.equals(topPlane);
+    return super.equals(other) && other.topBoundaryPoint.equals(topBoundaryPoint);
   }
 
   @Override
   public int hashCode() {
-    int result = topPlane.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + topBoundaryPoint.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoSouthLatitudeZone: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + ")}";
+    return "GeoSouthLatitudeZone: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + ")}";
   }
 }
 
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthRectangle.java
index 1b79367..1270fef 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoSouthRectangle.java
@@ -25,7 +25,7 @@
  *
  * @lucene.internal
  */
-public class GeoSouthRectangle extends GeoBBoxBase {
+public class GeoSouthRectangle extends GeoBaseBBox {
   public final double topLat;
   public final double leftLon;
   public final double rightLon;
@@ -45,12 +45,13 @@
 
   public final GeoPoint centerPoint;
 
-  public final GeoPoint[] edgePoints = new GeoPoint[]{SOUTH_POLE};
+  public final GeoPoint[] edgePoints;
 
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}
    */
-  public GeoSouthRectangle(final double topLat, final double leftLon, double rightLon) {
+  public GeoSouthRectangle(final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Top latitude out of range");
@@ -77,8 +78,8 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the four points
-    this.ULHC = new GeoPoint(sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
-    this.URHC = new GeoPoint(sinTopLat, sinRightLon, cosTopLat, cosRightLon);
+    this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
+    this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon);
 
     final double middleLat = (topLat - Math.PI * 0.5) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -91,15 +92,18 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.topPlane = new SidedPlane(centerPoint, sinTopLat);
+    this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
     this.topPlanePoints = new GeoPoint[]{ULHC, URHC};
-    this.leftPlanePoints = new GeoPoint[]{ULHC, SOUTH_POLE};
-    this.rightPlanePoints = new GeoPoint[]{URHC, SOUTH_POLE};
+    this.leftPlanePoints = new GeoPoint[]{ULHC, planetModel.SOUTH_POLE};
+    this.rightPlanePoints = new GeoPoint[]{URHC, planetModel.SOUTH_POLE};
+    
+    this.edgePoints = new GeoPoint[]{planetModel.SOUTH_POLE};
+
   }
 
   @Override
@@ -116,7 +120,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -160,9 +164,9 @@
 
   @Override
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
-    return p.intersects(topPlane, notablePoints, topPlanePoints, bounds, leftPlane, rightPlane) ||
-        p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane) ||
-        p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane);
+    return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, leftPlane, rightPlane) ||
+        p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, rightPlane, topPlane) ||
+        p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, leftPlane, topPlane);
   }
 
   /**
@@ -192,7 +196,7 @@
       return OVERLAPS;
     }
 
-    final boolean insideShape = path.isWithin(SOUTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.SOUTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape) {
       //System.err.println(" inside of each other");
@@ -224,19 +228,20 @@
     if (!(o instanceof GeoSouthRectangle))
       return false;
     GeoSouthRectangle other = (GeoSouthRectangle) o;
-    return other.ULHC.equals(ULHC) && other.URHC.equals(URHC);
+    return super.equals(other) && other.ULHC.equals(ULHC) && other.URHC.equals(URHC);
   }
 
   @Override
   public int hashCode() {
-    int result = ULHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + ULHC.hashCode();
     result = 31 * result + URHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoSouthRectangle: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoSouthRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideDegenerateHorizontalLine.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideDegenerateHorizontalLine.java
index a22d6f4..0346dd0 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideDegenerateHorizontalLine.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideDegenerateHorizontalLine.java
@@ -22,7 +22,7 @@
  *
  * @lucene.internal
  */
-public class GeoWideDegenerateHorizontalLine extends GeoBBoxBase {
+public class GeoWideDegenerateHorizontalLine extends GeoBaseBBox {
   public final double latitude;
   public final double leftLon;
   public final double rightLon;
@@ -46,7 +46,8 @@
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}.
    * Horizontal angle must be greater than or equal to PI.
    */
-  public GeoWideDegenerateHorizontalLine(final double latitude, final double leftLon, double rightLon) {
+  public GeoWideDegenerateHorizontalLine(final PlanetModel planetModel, final double latitude, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (latitude > Math.PI * 0.5 || latitude < -Math.PI * 0.5)
       throw new IllegalArgumentException("Latitude out of range");
@@ -73,10 +74,10 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the two points
-    this.LHC = new GeoPoint(sinLatitude, sinLeftLon, cosLatitude, cosLeftLon);
-    this.RHC = new GeoPoint(sinLatitude, sinRightLon, cosLatitude, cosRightLon);
+    this.LHC = new GeoPoint(planetModel, sinLatitude, sinLeftLon, cosLatitude, cosLeftLon);
+    this.RHC = new GeoPoint(planetModel, sinLatitude, sinRightLon, cosLatitude, cosRightLon);
 
-    this.plane = new Plane(sinLatitude);
+    this.plane = new Plane(planetModel, sinLatitude);
 
     // Normalize
     while (leftLon > rightLon) {
@@ -86,7 +87,7 @@
     double sinMiddleLon = Math.sin(middleLon);
     double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinLatitude, sinMiddleLon, cosLatitude, cosMiddleLon);
 
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
@@ -112,7 +113,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -160,7 +161,7 @@
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one
     // requires crossing into the right part of the other.  So intersection can ignore the left/right bounds.
-    return p.intersects(plane, notablePoints, planePoints, bounds, eitherBound);
+    return p.intersects(planetModel, plane, notablePoints, planePoints, bounds, eitherBound);
   }
 
   /**
@@ -199,19 +200,20 @@
     if (!(o instanceof GeoWideDegenerateHorizontalLine))
       return false;
     GeoWideDegenerateHorizontalLine other = (GeoWideDegenerateHorizontalLine) o;
-    return other.LHC.equals(LHC) && other.RHC.equals(RHC);
+    return super.equals(other) && other.LHC.equals(LHC) && other.RHC.equals(RHC);
   }
 
   @Override
   public int hashCode() {
-    int result = LHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + LHC.hashCode();
     result = 31 * result + RHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoWideDegenerateHorizontalLine: {latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoWideDegenerateHorizontalLine: {planetmodel="+planetModel+", latitude=" + latitude + "(" + latitude * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightLon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 
   protected class EitherBound implements Membership {
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideLongitudeSlice.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideLongitudeSlice.java
index 4ebae4d..ff24c49 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideLongitudeSlice.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideLongitudeSlice.java
@@ -23,24 +23,25 @@
  *
  * @lucene.internal
  */
-public class GeoWideLongitudeSlice extends GeoBBoxBase {
+public class GeoWideLongitudeSlice extends GeoBaseBBox {
   public final double leftLon;
   public final double rightLon;
 
   public final SidedPlane leftPlane;
   public final SidedPlane rightPlane;
 
-  public final static GeoPoint[] planePoints = new GeoPoint[]{NORTH_POLE, SOUTH_POLE};
+  public final GeoPoint[] planePoints;
 
   public final GeoPoint centerPoint;
 
-  public final static GeoPoint[] edgePoints = new GeoPoint[]{NORTH_POLE};
+  public final GeoPoint[] edgePoints; 
 
   /**
    * Accepts only values in the following ranges: lon: {@code -PI -> PI}.
    * Horizantal angle must be greater than or equal to PI.
    */
-  public GeoWideLongitudeSlice(final double leftLon, double rightLon) {
+  public GeoWideLongitudeSlice(final PlanetModel planetModel, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (leftLon < -Math.PI || leftLon > Math.PI)
       throw new IllegalArgumentException("Left longitude out of range");
@@ -66,10 +67,13 @@
       rightLon += Math.PI * 2.0;
     }
     final double middleLon = (leftLon + rightLon) * 0.5;
-    this.centerPoint = new GeoPoint(0.0, middleLon);
+    this.centerPoint = new GeoPoint(planetModel, 0.0, middleLon);
 
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
+    
+    this.planePoints = new GeoPoint[]{planetModel.NORTH_POLE, planetModel.SOUTH_POLE};
+    this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE};
   }
 
   @Override
@@ -84,7 +88,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, Math.PI * 0.5, -Math.PI * 0.5, newLeftLon, newRightLon);
   }
 
   @Override
@@ -127,8 +131,8 @@
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one
     // requires crossing into the right part of the other.  So intersection can ignore the left/right bounds.
-    return p.intersects(leftPlane, notablePoints, planePoints, bounds) ||
-        p.intersects(rightPlane, notablePoints, planePoints, bounds);
+    return p.intersects(planetModel, leftPlane, notablePoints, planePoints, bounds) ||
+        p.intersects(planetModel, rightPlane, notablePoints, planePoints, bounds);
   }
 
   /**
@@ -155,7 +159,7 @@
     if (insideRectangle == SOME_INSIDE)
       return OVERLAPS;
 
-    final boolean insideShape = path.isWithin(NORTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.NORTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape)
       return OVERLAPS;
@@ -178,15 +182,14 @@
     if (!(o instanceof GeoWideLongitudeSlice))
       return false;
     GeoWideLongitudeSlice other = (GeoWideLongitudeSlice) o;
-    return other.leftLon == leftLon && other.rightLon == rightLon;
+    return super.equals(other) && other.leftLon == leftLon && other.rightLon == rightLon;
   }
 
   @Override
   public int hashCode() {
-    int result;
-    long temp;
-    temp = Double.doubleToLongBits(leftLon);
-    result = (int) (temp ^ (temp >>> 32));
+    int result = super.hashCode();
+    long temp = Double.doubleToLongBits(leftLon);
+    result = 31 * result + (int) (temp ^ (temp >>> 32));
     temp = Double.doubleToLongBits(rightLon);
     result = 31 * result + (int) (temp ^ (temp >>> 32));
     return result;
@@ -194,7 +197,7 @@
 
   @Override
   public String toString() {
-    return "GeoWideLongitudeSlice: {leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoWideLongitudeSlice: {planetmodel="+planetModel+", leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 }
   
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideNorthRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideNorthRectangle.java
index c46abcf..134dad1 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideNorthRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideNorthRectangle.java
@@ -23,7 +23,7 @@
  *
  * @lucene.internal
  */
-public class GeoWideNorthRectangle extends GeoBBoxBase {
+public class GeoWideNorthRectangle extends GeoBaseBBox {
   public final double bottomLat;
   public final double leftLon;
   public final double rightLon;
@@ -45,13 +45,14 @@
 
   public final EitherBound eitherBound;
 
-  public final GeoPoint[] edgePoints = new GeoPoint[]{NORTH_POLE};
+  public final GeoPoint[] edgePoints;
 
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}.
    * Horizontal angle must be greater than or equal to PI.
    */
-  public GeoWideNorthRectangle(final double bottomLat, final double leftLon, double rightLon) {
+  public GeoWideNorthRectangle(final PlanetModel planetModel, final double bottomLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (bottomLat > Math.PI * 0.5 || bottomLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Bottom latitude out of range");
@@ -78,8 +79,8 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the four points
-    this.LRHC = new GeoPoint(sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
-    this.LLHC = new GeoPoint(sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
+    this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
+    this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
 
     final double middleLat = (Math.PI * 0.5 + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -92,17 +93,18 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.bottomPlane = new SidedPlane(centerPoint, sinBottomLat);
+    this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
     this.bottomPlanePoints = new GeoPoint[]{LLHC, LRHC};
-    this.leftPlanePoints = new GeoPoint[]{NORTH_POLE, LLHC};
-    this.rightPlanePoints = new GeoPoint[]{NORTH_POLE, LRHC};
+    this.leftPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LLHC};
+    this.rightPlanePoints = new GeoPoint[]{planetModel.NORTH_POLE, LRHC};
 
     this.eitherBound = new EitherBound();
+    this.edgePoints = new GeoPoint[]{planetModel.NORTH_POLE};
   }
 
   @Override
@@ -119,7 +121,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -168,9 +170,9 @@
     // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one
     // requires crossing into the right part of the other.  So intersection can ignore the left/right bounds.
     return
-        p.intersects(bottomPlane, notablePoints, bottomPlanePoints, bounds, eitherBound) ||
-            p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, bottomPlane) ||
-            p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, bottomPlane);
+        p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, eitherBound) ||
+            p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, bottomPlane) ||
+            p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, bottomPlane);
   }
 
   /**
@@ -200,7 +202,7 @@
       return OVERLAPS;
     }
 
-    final boolean insideShape = path.isWithin(NORTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.NORTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape) {
       //System.err.println(" both inside each other");
@@ -234,19 +236,20 @@
     if (!(o instanceof GeoWideNorthRectangle))
       return false;
     GeoWideNorthRectangle other = (GeoWideNorthRectangle) o;
-    return other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC);
+    return super.equals(other) && other.LLHC.equals(LLHC) && other.LRHC.equals(LRHC);
   }
 
   @Override
   public int hashCode() {
-    int result = LLHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + LLHC.hashCode();
     result = 31 * result + LRHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoWideNorthRectangle: {bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoWideNorthRectangle: {planetmodel="+planetModel+", bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 
   protected class EitherBound implements Membership {
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideRectangle.java
index 2f46404..a80b6d1 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideRectangle.java
@@ -23,7 +23,7 @@
  *
  * @lucene.internal
  */
-public class GeoWideRectangle extends GeoBBoxBase {
+public class GeoWideRectangle extends GeoBaseBBox {
   public final double topLat;
   public final double bottomLat;
   public final double leftLon;
@@ -56,7 +56,8 @@
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}.
    * Horizontal angle must be greater than or equal to PI.
    */
-  public GeoWideRectangle(final double topLat, final double bottomLat, final double leftLon, double rightLon) {
+  public GeoWideRectangle(final PlanetModel planetModel, final double topLat, final double bottomLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Top latitude out of range");
@@ -90,10 +91,10 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the four points
-    this.ULHC = new GeoPoint(sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
-    this.URHC = new GeoPoint(sinTopLat, sinRightLon, cosTopLat, cosRightLon);
-    this.LRHC = new GeoPoint(sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
-    this.LLHC = new GeoPoint(sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
+    this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
+    this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon);
+    this.LRHC = new GeoPoint(planetModel, sinBottomLat, sinRightLon, cosBottomLat, cosRightLon);
+    this.LLHC = new GeoPoint(planetModel, sinBottomLat, sinLeftLon, cosBottomLat, cosLeftLon);
 
     final double middleLat = (topLat + bottomLat) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -106,10 +107,10 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.topPlane = new SidedPlane(centerPoint, sinTopLat);
-    this.bottomPlane = new SidedPlane(centerPoint, sinBottomLat);
+    this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat);
+    this.bottomPlane = new SidedPlane(centerPoint, planetModel, sinBottomLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
@@ -137,7 +138,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -186,10 +187,10 @@
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one
     // requires crossing into the right part of the other.  So intersection can ignore the left/right bounds.
-    return p.intersects(topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, eitherBound) ||
-        p.intersects(bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, eitherBound) ||
-        p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, topPlane, bottomPlane) ||
-        p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, topPlane, bottomPlane);
+    return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, bottomPlane, eitherBound) ||
+        p.intersects(planetModel, bottomPlane, notablePoints, bottomPlanePoints, bounds, topPlane, eitherBound) ||
+        p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane, bottomPlane) ||
+        p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, topPlane, bottomPlane);
   }
 
   /**
@@ -253,19 +254,20 @@
     if (!(o instanceof GeoWideRectangle))
       return false;
     GeoWideRectangle other = (GeoWideRectangle) o;
-    return other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC);
+    return super.equals(other) && other.ULHC.equals(ULHC) && other.LRHC.equals(LRHC);
   }
 
   @Override
   public int hashCode() {
-    int result = ULHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + ULHC.hashCode();
     result = 31 * result + LRHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoWideRectangle: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoWideRectangle: {planetmodel=" + planetModel + ", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), bottomlat=" + bottomLat + "(" + bottomLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 
   protected class EitherBound implements Membership {
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideSouthRectangle.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideSouthRectangle.java
index 97568cd..816f36b 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideSouthRectangle.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWideSouthRectangle.java
@@ -23,7 +23,7 @@
  *
  * @lucene.internal
  */
-public class GeoWideSouthRectangle extends GeoBBoxBase {
+public class GeoWideSouthRectangle extends GeoBaseBBox {
   public final double topLat;
   public final double leftLon;
   public final double rightLon;
@@ -45,13 +45,14 @@
 
   public final EitherBound eitherBound;
 
-  public final GeoPoint[] edgePoints = new GeoPoint[]{SOUTH_POLE};
+  public final GeoPoint[] edgePoints;
 
   /**
    * Accepts only values in the following ranges: lat: {@code -PI/2 -> PI/2}, lon: {@code -PI -> PI}.
    * Horizontal angle must be greater than or equal to PI.
    */
-  public GeoWideSouthRectangle(final double topLat, final double leftLon, double rightLon) {
+  public GeoWideSouthRectangle(final PlanetModel planetModel, final double topLat, final double leftLon, double rightLon) {
+    super(planetModel);
     // Argument checking
     if (topLat > Math.PI * 0.5 || topLat < -Math.PI * 0.5)
       throw new IllegalArgumentException("Top latitude out of range");
@@ -78,8 +79,8 @@
     final double cosRightLon = Math.cos(rightLon);
 
     // Now build the four points
-    this.ULHC = new GeoPoint(sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
-    this.URHC = new GeoPoint(sinTopLat, sinRightLon, cosTopLat, cosRightLon);
+    this.ULHC = new GeoPoint(planetModel, sinTopLat, sinLeftLon, cosTopLat, cosLeftLon);
+    this.URHC = new GeoPoint(planetModel, sinTopLat, sinRightLon, cosTopLat, cosRightLon);
 
     final double middleLat = (topLat - Math.PI * 0.5) * 0.5;
     final double sinMiddleLat = Math.sin(middleLat);
@@ -92,17 +93,19 @@
     final double sinMiddleLon = Math.sin(middleLon);
     final double cosMiddleLon = Math.cos(middleLon);
 
-    this.centerPoint = new GeoPoint(sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
+    this.centerPoint = new GeoPoint(planetModel, sinMiddleLat, sinMiddleLon, cosMiddleLat, cosMiddleLon);
 
-    this.topPlane = new SidedPlane(centerPoint, sinTopLat);
+    this.topPlane = new SidedPlane(centerPoint, planetModel, sinTopLat);
     this.leftPlane = new SidedPlane(centerPoint, cosLeftLon, sinLeftLon);
     this.rightPlane = new SidedPlane(centerPoint, cosRightLon, sinRightLon);
 
     this.topPlanePoints = new GeoPoint[]{ULHC, URHC};
-    this.leftPlanePoints = new GeoPoint[]{ULHC, SOUTH_POLE};
-    this.rightPlanePoints = new GeoPoint[]{URHC, SOUTH_POLE};
+    this.leftPlanePoints = new GeoPoint[]{ULHC, planetModel.SOUTH_POLE};
+    this.rightPlanePoints = new GeoPoint[]{URHC, planetModel.SOUTH_POLE};
 
     this.eitherBound = new EitherBound();
+    
+    this.edgePoints = new GeoPoint[]{planetModel.SOUTH_POLE};
   }
 
   @Override
@@ -119,7 +122,7 @@
       newLeftLon = -Math.PI;
       newRightLon = Math.PI;
     }
-    return GeoBBoxFactory.makeGeoBBox(newTopLat, newBottomLat, newLeftLon, newRightLon);
+    return GeoBBoxFactory.makeGeoBBox(planetModel, newTopLat, newBottomLat, newLeftLon, newRightLon);
   }
 
   @Override
@@ -165,9 +168,9 @@
   public boolean intersects(final Plane p, final GeoPoint[] notablePoints, final Membership... bounds) {
     // Right and left bounds are essentially independent hemispheres; crossing into the wrong part of one
     // requires crossing into the right part of the other.  So intersection can ignore the left/right bounds.
-    return p.intersects(topPlane, notablePoints, topPlanePoints, bounds, eitherBound) ||
-        p.intersects(leftPlane, notablePoints, leftPlanePoints, bounds, topPlane) ||
-        p.intersects(rightPlane, notablePoints, rightPlanePoints, bounds, topPlane);
+    return p.intersects(planetModel, topPlane, notablePoints, topPlanePoints, bounds, eitherBound) ||
+        p.intersects(planetModel, leftPlane, notablePoints, leftPlanePoints, bounds, topPlane) ||
+        p.intersects(planetModel, rightPlane, notablePoints, rightPlanePoints, bounds, topPlane);
   }
 
   /**
@@ -197,7 +200,7 @@
       return OVERLAPS;
     }
 
-    final boolean insideShape = path.isWithin(SOUTH_POLE);
+    final boolean insideShape = path.isWithin(planetModel.SOUTH_POLE);
 
     if (insideRectangle == ALL_INSIDE && insideShape) {
       //System.err.println(" both inside each other");
@@ -230,19 +233,20 @@
     if (!(o instanceof GeoWideSouthRectangle))
       return false;
     GeoWideSouthRectangle other = (GeoWideSouthRectangle) o;
-    return other.ULHC.equals(ULHC) && other.URHC.equals(URHC);
+    return super.equals(other) && other.ULHC.equals(ULHC) && other.URHC.equals(URHC);
   }
 
   @Override
   public int hashCode() {
-    int result = ULHC.hashCode();
+    int result = super.hashCode();
+    result = 31 * result + ULHC.hashCode();
     result = 31 * result + URHC.hashCode();
     return result;
   }
 
   @Override
   public String toString() {
-    return "GeoWideSouthRectangle: {toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
+    return "GeoWideSouthRectangle: {planetmodel="+planetModel+", toplat=" + topLat + "(" + topLat * 180.0 / Math.PI + "), leftlon=" + leftLon + "(" + leftLon * 180.0 / Math.PI + "), rightlon=" + rightLon + "(" + rightLon * 180.0 / Math.PI + ")}";
   }
 
   protected class EitherBound implements Membership {
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWorld.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWorld.java
index dac957a..0a6bded 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWorld.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/GeoWorld.java
@@ -22,11 +22,13 @@
  *
  * @lucene.internal
  */
-public class GeoWorld extends GeoBBoxBase {
-  protected final static GeoPoint originPoint = new GeoPoint(1.0, 0.0, 0.0);
+public class GeoWorld extends GeoBaseBBox {
   protected final static GeoPoint[] edgePoints = new GeoPoint[0];
-
-  public GeoWorld() {
+  protected final GeoPoint originPoint;
+  
+  public GeoWorld(final PlanetModel planetModel) {
+    super(planetModel);
+    originPoint = new GeoPoint(planetModel.ab, 1.0, 0.0, 0.0);
   }
 
   @Override
@@ -100,16 +102,16 @@
   public boolean equals(Object o) {
     if (!(o instanceof GeoWorld))
       return false;
-    return true;
+    return super.equals(o);
   }
 
   @Override
   public int hashCode() {
-    return 0;
+    return super.hashCode();
   }
 
   @Override
   public String toString() {
-    return "GeoWorld";
+    return "GeoWorld: {planetmodel="+planetModel+"}";
   }
 }
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Plane.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Plane.java
index b25e5c9..b5f79df 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Plane.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Plane.java
@@ -51,11 +51,12 @@
   /**
    * Construct a horizontal plane at a specified Z.
    *
-   * @param height is the specified Z coordinate.
+   * @param planetModel is the planet model.
+   * @param sinLat is the sin(latitude).
    */
-  public Plane(final double height) {
+  public Plane(final PlanetModel planetModel, final double sinLat) {
     super(0.0, 0.0, 1.0);
-    D = -height;
+    D = -sinLat * computeDesiredEllipsoidMagnitude(planetModel, sinLat);
   }
 
   /**
@@ -81,6 +82,15 @@
     this.D = D;
   }
 
+  /** Construct a normalized, vertical plane through an x-y point.  If the x-y point is at (0,0), return null.
+  */
+  public static Plane constructNormalizedVerticalPlane(final double x, final double y) {
+    if (Math.abs(x) < MINIMUM_RESOLUTION && Math.abs(y) < MINIMUM_RESOLUTION)
+      return null;
+    final double denom = 1.0 / Math.sqrt(x*x + y*y);
+    return new Plane(x * denom, y * denom);
+  }
+  
   /**
    * Evaluate the plane equation for a given point, as represented
    * by a vector.
@@ -290,14 +300,26 @@
   }
 
   /**
+   * Public version of findIntersections.
+   */
+  public GeoPoint[] findIntersections(final PlanetModel planetModel, final Plane q, final Membership... bounds) {
+    if (isNumericallyIdentical(q)) {
+      return null;
+    }
+    return findIntersections(planetModel, q, bounds, NO_BOUNDS);
+  }
+  
+  /**
    * Find the intersection points between two planes, given a set of bounds.
    *
+   * @param planetModel is the planet model to use in finding points.
    * @param q          is the plane to intersect with.
    * @param bounds     is the set of bounds.
    * @param moreBounds is another set of bounds.
    * @return the intersection point(s) on the unit sphere, if there are any.
    */
-  protected GeoPoint[] findIntersections(final Plane q, final Membership[] bounds, final Membership[] moreBounds) {
+  protected GeoPoint[] findIntersections(final PlanetModel planetModel, final Plane q, final Membership[] bounds, final Membership[] moreBounds) {
+    //System.err.println("Looking for intersection between plane "+this+" and plane "+q+" within bounds");
     final Vector lineVector = new Vector(this, q);
     if (Math.abs(lineVector.x) < MINIMUM_RESOLUTION && Math.abs(lineVector.y) < MINIMUM_RESOLUTION && Math.abs(lineVector.z) < MINIMUM_RESOLUTION) {
       // Degenerate case: parallel planes
@@ -363,16 +385,18 @@
       z0 = 0.0;
     }
 
-    // Once an intersecting line is determined, the next step is to intersect that line with the unit sphere, which
+    // Once an intersecting line is determined, the next step is to intersect that line with the ellipsoid, which
     // will yield zero, one, or two points.
-    // The equation of the sphere is: 1.0 = x^2 + y^2 + z^2.  Plugging in the parameterized line values yields:
-    // 1.0 = (At+A0)^2 + (Bt+B0)^2 + (Ct+C0)^2
-    // A^2 t^2 + 2AA0t + A0^2 + B^2 t^2 + 2BB0t + B0^2 + C^2 t^2 + 2CC0t + C0^2 - 1,0 = 0.0
-    // [A^2 + B^2 + C^2] t^2 + [2AA0 + 2BB0 + 2CC0] t + [A0^2 + B0^2 + C0^2 - 1,0] = 0.0
+    // The ellipsoid equation: 1,0 = x^2/a^2 + y^2/b^2 + z^2/c^2
+    // 1.0 = (At+A0)^2/a^2 + (Bt+B0)^2/b^2 + (Ct+C0)^2/c^2
+    // A^2 t^2 / a^2 + 2AA0t / a^2 + A0^2 / a^2 + B^2 t^2 / b^2 + 2BB0t / b^2 + B0^2 / b^2 + C^2 t^2 / c^2 + 2CC0t / c^2 + C0^2 / c^2  - 1,0 = 0.0
+    // [A^2 / a^2 + B^2 / b^2 + C^2 / c^2] t^2 + [2AA0 / a^2 + 2BB0 / b^2 + 2CC0 / c^2] t + [A0^2 / a^2 + B0^2 / b^2 + C0^2 / c^2 - 1,0] = 0.0
     // Use the quadratic formula to determine t values and candidate point(s)
-    final double A = lineVector.x * lineVector.x + lineVector.y * lineVector.y + lineVector.z * lineVector.z;
-    final double B = 2.0 * (lineVector.x * x0 + lineVector.y * y0 + lineVector.z * z0);
-    final double C = x0 * x0 + y0 * y0 + z0 * z0 - 1.0;
+    final double A = lineVector.x * lineVector.x * planetModel.inverseAbSquared +
+      lineVector.y * lineVector.y * planetModel.inverseAbSquared +
+      lineVector.z * lineVector.z * planetModel.inverseCSquared;
+    final double B = 2.0 * (lineVector.x * x0 * planetModel.inverseAbSquared + lineVector.y * y0 * planetModel.inverseAbSquared + lineVector.z * z0 * planetModel.inverseCSquared);
+    final double C = x0 * x0 * planetModel.inverseAbSquared + y0 * y0 * planetModel.inverseAbSquared + z0 * z0 * planetModel.inverseCSquared - 1.0;
 
     final double BsquaredMinus = B * B - 4.0 * A * C;
     if (Math.abs(BsquaredMinus) < MINIMUM_RESOLUTION_SQUARED) {
@@ -381,6 +405,8 @@
       // One solution only
       final double t = -B * inverse2A;
       GeoPoint point = new GeoPoint(lineVector.x * t + x0, lineVector.y * t + y0, lineVector.z * t + z0);
+      //System.err.println("  point: "+point);
+      //verifyPoint(planetModel, point, q);
       if (point.isWithin(bounds, moreBounds))
         return new GeoPoint[]{point};
       return NO_POINTS;
@@ -393,6 +419,8 @@
       final double t2 = (-B - sqrtTerm) * inverse2A;
       GeoPoint point1 = new GeoPoint(lineVector.x * t1 + x0, lineVector.y * t1 + y0, lineVector.z * t1 + z0);
       GeoPoint point2 = new GeoPoint(lineVector.x * t2 + x0, lineVector.y * t2 + y0, lineVector.z * t2 + z0);
+      //verifyPoint(planetModel, point1, q);
+      //verifyPoint(planetModel, point2, q);
       //System.err.println("  "+point1+" and "+point2);
       if (point1.isWithin(bounds, moreBounds)) {
         if (point2.isWithin(bounds, moreBounds))
@@ -408,18 +436,30 @@
     }
   }
 
+  /*
+  protected void verifyPoint(final PlanetModel planetModel, final GeoPoint point, final Plane q) {
+    if (!evaluateIsZero(point))
+      throw new RuntimeException("Intersection point not on original plane; point="+point+", plane="+this);
+    if (!q.evaluateIsZero(point))
+      throw new RuntimeException("Intersection point not on intersected plane; point="+point+", plane="+q);
+    if (Math.abs(point.x * point.x * planetModel.inverseASquared + point.y * point.y * planetModel.inverseBSquared + point.z * point.z * planetModel.inverseCSquared - 1.0) >= MINIMUM_RESOLUTION) 
+      throw new RuntimeException("Intersection point not on ellipsoid; point="+point);
+  }
+  */
+  
   /**
    * Accumulate bounds information for this plane, intersected with another plane
    * and with the unit sphere.
    * Updates both latitude and longitude information, using max/min points found
    * within the specified bounds.
    *
+   * @param planetModel is the planet model to use to determine bounding points
    * @param q          is the plane to intersect with.
    * @param boundsInfo is the info to update with additional bounding information.
    * @param bounds     are the surfaces delineating what's inside the shape.
    */
-  public void recordBounds(final Plane q, final Bounds boundsInfo, final Membership... bounds) {
-    final GeoPoint[] intersectionPoints = findIntersections(q, bounds, NO_BOUNDS);
+  public void recordBounds(final PlanetModel planetModel, final Plane q, final Bounds boundsInfo, final Membership... bounds) {
+    final GeoPoint[] intersectionPoints = findIntersections(planetModel, q, bounds, NO_BOUNDS);
     for (GeoPoint intersectionPoint : intersectionPoints) {
       boundsInfo.addPoint(intersectionPoint);
     }
@@ -430,10 +470,11 @@
    * Updates both latitude and longitude information, using max/min points found
    * within the specified bounds.
    *
+   * @param planetModel is the planet model to use in determining bounds.
    * @param boundsInfo is the info to update with additional bounding information.
    * @param bounds     are the surfaces delineating what's inside the shape.
    */
-  public void recordBounds(final Bounds boundsInfo, final Membership... bounds) {
+  public void recordBounds(final PlanetModel planetModel, final Bounds boundsInfo, final Membership... bounds) {
     // For clarity, load local variables with good names
     final double A = this.x;
     final double B = this.y;
@@ -442,236 +483,27 @@
     // Now compute latitude min/max points
     if (!boundsInfo.checkNoTopLatitudeBound() || !boundsInfo.checkNoBottomLatitudeBound()) {
       //System.err.println("Looking at latitude for plane "+this);
+      // With ellipsoids, we really have only one viable way to do this computation.
+      // Specifically, we compute an appropriate vertical plane, based on the current plane's x-y orientation, and
+      // then intersect it with this one and with the ellipsoid.  This gives us zero, one, or two points to use
+      // as bounds.
+      // There is one special case: horizontal circles.  These require TWO vertical planes: one for the x, and one for
+      // the y, and we use all four resulting points in the bounds computation.
       if ((Math.abs(A) >= MINIMUM_RESOLUTION || Math.abs(B) >= MINIMUM_RESOLUTION)) {
-        //System.out.println("A = "+A+" B = "+B+" C = "+C+" D = "+D);
-        // sin (phi) = z
-        // cos (theta - phi) = D
-        // sin (theta) = C  (the dot product of (0,0,1) and (A,B,C) )
-        // Q: what is z?
-        //
-        // cos (theta-phi) = cos(theta)cos(phi) + sin(theta)sin(phi) = D
-
-        if (Math.abs(C) < MINIMUM_RESOLUTION) {
-          // Special case: circle is vertical.
-          //System.err.println(" Degenerate case; it's vertical circle");
-          // cos(phi) = D, and we want sin(phi) = z
-          // There are two solutions for phi given cos(phi) = D: a positive solution and a negative solution.
-          // So, when we compute z = sqrt(1-D^2), it's really z = +/- sqrt(1-D^2) .
-
-          double z;
-          double x;
-          double y;
-
-          final double denom = 1.0 / (A * A + B * B);
-
-          z = Math.sqrt(1.0 - D * D);
-          y = -B * D * denom;
-          x = -A * D * denom;
-          addPoint(boundsInfo, bounds, x, y, z);
-
-          z = -z;
-          addPoint(boundsInfo, bounds, x, y, z);
-        } else if (Math.abs(D) < MINIMUM_RESOLUTION) {
-          //System.err.println(" Plane through origin case");
-          // The general case is degenerate when the plane goes through the origin.
-          // Luckily there's a pretty good way to figure out the max and min for that case though.
-          // We find the two z values by computing the angle of the plane's inclination with the normal.
-          // E.g., if this.z == 1, then our z value is 0, and if this.z == 0, our z value is 1.
-          // Also if this.z == -1, then z value is 0 again.
-          // Another way of putting this is that our z = sqrt(this.x^2 + this.y^2).
-          //
-          // The only tricky part is computing x and y.
-          double z;
-          double x;
-          double y;
-
-          final double denom = 1.0 / (A * A + B * B);
-
-          z = Math.sqrt((A * A + B * B) / (A * A + B * B + C * C));
-          y = -B * (C * z) * denom;
-          x = -A * (C * z) * denom;
-          addPoint(boundsInfo, bounds, x, y, z);
-
-          z = -z;
-          y = -B * (C * z) * denom;
-          x = -A * (C * z) * denom;
-          addPoint(boundsInfo, bounds, x, y, z);
-
-        } else {
-          //System.err.println(" General latitude case");
-          // We might be able to identify a specific new latitude maximum or minimum.
-          //
-          // cos (theta-phi) = cos(theta)cos(phi) + sin(theta)sin(phi) = D
-          //
-          // This is tricky.  If cos(phi) = something, and we want to figure out
-          // what sin(phi) is, in order to capture all solutions we need to recognize
-          // that sin(phi) = +/- sqrt(1 - cos(phi)^2).  Basically, this means that
-          // whatever solution we find we have to mirror it across the x-y plane,
-          // and include both +z and -z solutions.
-          //
-          // cos (phi) = +/- sqrt(1-sin(phi)^2) = +/- sqrt(1-z^2)
-          // cos (theta) = +/- sqrt(1-sin(theta)^2) = +/- sqrt(1-C^2)
-          //
-          // D = cos(theta)cos(phi) + sin(theta)sin(phi)
-          // Substitute:
-          // D = sqrt(1-C^2) * sqrt(1-z^2) -/+ C * z
-          // Solve for z...
-          // D +/- Cz = sqrt(1-C^2)*sqrt(1-z^2) = sqrt(1 - z^2 - C^2 + z^2*C^2)
-          // Square both sides.
-          // (D +/- Cz)^2 = 1 - z^2 - C^2 + z^2*C^2
-          // D^2 +/- 2DCz + C^2*z^2 = 1 - z^2 - C^2 + z^2*C^2
-          // D^2 +/- 2DCz  = 1 - C^2 - z^2
-          // 0 = z^2 +/- 2DCz + (C^2 +D^2-1) = 0
-          //
-          // z = (+/- 2DC +/- sqrt(4*D^2*C^2 - 4*(C^2+D^2-1))) / (2)
-          // z  = +/- DC +/- sqrt(D^2*C^2 + 1 - C^2 - D^2 )
-          //    = +/- DC +/- sqrt(D^2*C^2 + 1 - C^2 - D^2)
-          //
-          // NOTE WELL: The above is clearly degenerate when D = 0.  So we'll have to
-          // code a different solution for that case!
-
-          // To get x and y, we need to plug z into the equations, as follows:
-          //
-          // Ax + By = -Cz - D
-          // x^2 + y^2 = 1 - z^2
-          //
-          // x = (-Cz -D -By) /A
-          // y = (-Cz -D -Ax) /B
-          //
-          // [(-Cz -D -By) /A]^2 + y^2 = 1 - z^2
-          // [-Cz -D -By]^2 + A^2*y^2 = A^2 - A^2*z^2
-          // C^2*z^2 + D^2 + B^2*y^2 + 2CDz + 2CBzy + 2DBy + A^2*y^2 - A^2 + A^2*z^2 = 0
-          // y^2 [A^2 + B^2]  + y [2DB + 2CBz] + [C^2*z^2 + D^2 + 2CDz - A^2 + A^2*z^2] = 0
-          //
-          //
-          // Use quadratic formula, where:
-          // a = [A^2 + B^2]
-          // b = [2BD + 2CBz]
-          // c = [C^2*z^2 + D^2 + 2CDz - A^2 + A^2*z^2]
-          //
-          // y = (-[2BD + 2CBz] +/- sqrt([2BD + 2CBz]^2 - 4 * [A^2 + B^2] * [C^2*z^2 + D^2 + 2CDz - A^2 + A^2*z^2]) ) / (2 * [A^2 + B^2])
-          // Take out a 2:
-          // y = (-[DB +CBz] +/- sqrt([DB + CBz]^2 - [A^2 + B^2] * [C^2*z^2 + D^2 + 2CDz - A^2 + A^2*z^2]) ) / [A^2 + B^2]
-          //
-          // The sqrt term simplifies:
-          //
-          // B^2*D^2 + C^2*B^2*z^2 + 2C*D*B^2*z - [A^2 + B^2] * [C^2*z^2 + D^2 + 2CDz - A^2 + A^2*z^2] = ?
-          // B^2*D^2 + C^2*B^2*z^2 + 2C*D*B^2*z - [A^2 * C^2 * z^2 + A^2 * D^2 + 2 * A^2 * CDz - A^4 + A^4*z^2
-          //                  + B^2 * C^2 * z^2 + B^2 * D^2 + 2 * B^2 * CDz - A^2 * B^2 + B^2 * A^2 * z^2] =?
-          // C^2*B^2*z^2 + 2C*D*B^2*z - [A^2 * C^2 * z^2 + A^2 * D^2 + 2 * A^2 * CDz - A^4 + A^4*z^2
-          //                  + B^2 * C^2 * z^2 + 2 * B^2 * CDz - A^2 * B^2 + B^2 * A^2 * z^2] =?
-          // 2C*D*B^2*z - [A^2 * C^2 * z^2 + A^2 * D^2 + 2 * A^2 * CDz - A^4 + A^4*z^2
-          //                  + 2 * B^2 * CDz - A^2 * B^2 + B^2 * A^2 * z^2] =?
-          // - [A^2 * C^2 * z^2 + A^2 * D^2 + 2 * A^2 * CDz - A^4 + A^4*z^2
-          //                  - A^2 * B^2 + B^2 * A^2 * z^2] =?
-          // - A^2 * [C^2 * z^2 + D^2 + 2 * CDz - A^2 + A^2*z^2
-          //                  - B^2 + B^2 * z^2] =?
-          // - A^2 * [z^2[A^2 + B^2 + C^2] - [A^2 + B^2 - D^2] + 2CDz] =?
-          // - A^2 * [z^2 - [A^2 + B^2 - D^2] + 2CDz] =?
-          //
-          // y = (-[DB +CBz] +/- A*sqrt([A^2 + B^2 - D^2] - z^2 - 2CDz) ) / [A^2 + B^2]
-          //
-          // correspondingly:
-          // x = (-[DA +CAz] +/- B*sqrt([A^2 + B^2 - D^2] - z^2 - 2CDz) ) / [A^2 + B^2]
-          //
-          // However, for the maximum or minimum we seek, the clause inside the sqrt should be zero.  If
-          // it is NOT zero, then we aren't looking at the right z value.
-
-          double z;
-          double x;
-          double y;
-
-          double sqrtValue = D * D * C * C + 1.0 - C * C - D * D;
-          double denom = 1.0 / (A * A + B * B);
-          if (Math.abs(sqrtValue) < MINIMUM_RESOLUTION_SQUARED) {
-            //System.err.println(" One latitude solution");
-            double insideValue;
-            double sqrtTerm;
-
-            z = D * C;
-            // Since we squared both sides of the equation, we may have introduced spurious solutions, so we have to check.
-            // But the same check applies to BOTH solutions -- the +z one as well as the -z one.
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-            // Check the solution on the other side of the x-y plane
-            z = -z;
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-          } else if (sqrtValue > 0.0) {
-            //System.err.println(" Two latitude solutions");
-            double sqrtResult = Math.sqrt(sqrtValue);
-
-            double insideValue;
-            double sqrtTerm;
-
-            z = D * C + sqrtResult;
-            //System.out.println("z= "+z+" D-C*z = " + (D-C*z) + " Math.sqrt(1.0 - z*z - C*C + z*z*C*C) = "+(Math.sqrt(1.0 - z*z - C*C + z*z*C*C)));
-            // Since we squared both sides of the equation, we may have introduced spurios solutions, so we have to check.
-            // But the same check applies to BOTH solutions -- the +z one as well as the -z one.
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            //System.err.println(" z="+z+" C="+C+" D="+D+" inside value "+insideValue);
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-            // Check the solution on the other side of the x-y plane
-            z = -z;
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            //System.err.println(" z="+z+" C="+C+" D="+D+" inside value "+insideValue);
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-            z = D * C - sqrtResult;
-            //System.out.println("z= "+z+" D-C*z = " + (D-C*z) + " Math.sqrt(1.0 - z*z - C*C + z*z*C*C) = "+(Math.sqrt(1.0 - z*z - C*C + z*z*C*C)));
-            // Since we squared both sides of the equation, we may have introduced spurios solutions, so we have to check.
-            // But the same check applies to BOTH solutions -- the +z one as well as the -z one.
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            //System.err.println(" z="+z+" C="+C+" D="+D+" inside value "+insideValue);
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-            // Check the solution on the other side of the x-y plane
-            z = -z;
-            insideValue = A * A + B * B - D * D - z * z - 2.0 * C * D * z;
-            //System.err.println(" z="+z+" C="+C+" D="+D+" inside value "+insideValue);
-            if (Math.abs(insideValue) < MINIMUM_RESOLUTION) {
-              y = -B * (D + C * z) * denom;
-              x = -A * (D + C * z) * denom;
-              if (evaluateIsZero(x, y, z)) {
-                addPoint(boundsInfo, bounds, x, y, z);
-              }
-            }
-          }
+        // NOT a horizontal circle!
+        //System.err.println(" Not a horizontal circle");
+        final Plane verticalPlane = constructNormalizedVerticalPlane(A,B);
+        final GeoPoint[] points = findIntersections(planetModel, verticalPlane, NO_BOUNDS, NO_BOUNDS);
+        for (final GeoPoint point : points) {
+          addPoint(boundsInfo, bounds, point.x, point.y, point.z);
         }
       } else {
-        // Horizontal circle.
-        // Since the recordBounds() method will be called ONLY for planes that constitute edges of a shape,
-        // we can be sure that some part of the horizontal circle will be part of the boundary, so we don't need
-        // to check Membership objects.
-        boundsInfo.addHorizontalCircle(-D * C);
+        // Horizontal circle.  Since a==b, one vertical plane suffices.
+        final Plane verticalPlane = new Plane(1.0,0.0);
+        final GeoPoint[] points = findIntersections(planetModel, verticalPlane, NO_BOUNDS, NO_BOUNDS);
+        // There will always be two points; we only need one.
+        final GeoPoint point = points[0];
+        boundsInfo.addHorizontalCircle(point.z/Math.sqrt(point.x * point.x + point.y * point.y + point.z * point.z));
       }
       //System.err.println("Done latitude bounds");
     }
@@ -697,8 +529,8 @@
             // Geometrically, we have a line segment in x-y space.  We need to locate the endpoints
             // of that line.  But luckily, we know some things: specifically, since it is a
             // degenerate situation in projection, the C value had to have been 0.  That
-            // means that our line's endpoints will coincide with the unit circle.  All we
-            // need to do then is to find the intersection of the unit circle and the line
+            // means that our line's endpoints will coincide with the projected ellipse.  All we
+            // need to do then is to find the intersection of the projected ellipse and the line
             // equation:
             //
             // A x + B y + D = 0
@@ -706,20 +538,20 @@
             // Since A != 0:
             // x = (-By - D)/A
             //
-            // The unit circle:
-            // x^2 + y^2 - 1 = 0
+            // The projected ellipse:
+            // x^2/a^2 + y^2/b^2 - 1 = 0
             // Substitute:
-            // [(-By-D)/A]^2 + y^2 -1 = 0
+            // [(-By-D)/A]^2/a^2 + y^2/b^2 -1 = 0
             // Multiply through by A^2:
-            // [-By - D]^2 + A^2*y^2 - A^2 = 0
+            // [-By - D]^2/a^2 + A^2*y^2/b^2 - A^2 = 0
             // Multiply out:
-            // B^2*y^2 + 2BDy + D^2 + A^2*y^2 - A^2 = 0
+            // B^2*y^2/a^2 + 2BDy/a^2 + D^2/a^2 + A^2*y^2/b^2 - A^2 = 0
             // Group:
-            // y^2 * [B^2 + A^2] + y [2BD] + [D^2-A^2] = 0
+            // y^2 * [B^2/a^2 + A^2/b^2] + y [2BD/a^2] + [D^2/a^2-A^2] = 0
 
-            a = B * B + A * A;
-            b = 2.0 * B * D;
-            c = D * D - A * A;
+            a = B * B * planetModel.inverseAbSquared + A * A * planetModel.inverseAbSquared;
+            b = 2.0 * B * D * planetModel.inverseAbSquared;
+            c = D * D * planetModel.inverseAbSquared - A * A;
 
             double sqrtClause = b * b - 4.0 * a * c;
 
@@ -750,9 +582,9 @@
             // Use equation suitable for B != 0
             // Since I != 0, we rewrite:
             // y = (-Ax - D)/B
-            a = B * B + A * A;
-            b = 2.0 * A * D;
-            c = D * D - B * B;
+            a = B * B * planetModel.inverseAbSquared + A * A * planetModel.inverseAbSquared;
+            b = 2.0 * A * D * planetModel.inverseAbSquared;
+            c = D * D * planetModel.inverseAbSquared - B * B;
 
             double sqrtClause = b * b - 4.0 * a * c;
 
@@ -786,25 +618,25 @@
         // They are for lat/lon calculation purposes only.  x-y is meant to be used for longitude determination,
         // and z for latitude, and that's all the values are good for.
 
-        // (1) Intersect the plane and the unit sphere, and project the results into the x-y plane:
+        // (1) Intersect the plane and the ellipsoid, and project the results into the x-y plane:
         // From plane:
         // z = (-Ax - By - D) / C
-        // From unit sphere:
-        // x^2 + y^2 + [(-Ax - By - D) / C]^2 = 1
+        // From ellipsoid:
+        // x^2/a^2 + y^2/b^2 + [(-Ax - By - D) / C]^2/c^2 = 1
         // Simplify/expand:
-        // C^2*x^2 + C^2*y^2 + (-Ax - By - D)^2 = C^2
+        // C^2*x^2/a^2 + C^2*y^2/b^2 + (-Ax - By - D)^2/c^2 = C^2
         //
-        // x^2 * C^2 + y^2 * C^2 + x^2 * (A^2 + ABxy + ADx) + (ABxy + y^2 * B^2 + BDy) + (ADx + BDy + D^2) = C^2
+        // x^2 * C^2/a^2 + y^2 * C^2/b^2 + x^2 * A^2/c^2 + ABxy/c^2 + ADx/c^2 + ABxy/c^2 + y^2 * B^2/c^2 + BDy/c^2 + ADx/c^2 + BDy/c^2 + D^2/c^2 = C^2
         // Group:
-        // [A^2 + C^2] x^2 + [B^2 + C^2] y^2 + [2AB]xy + [2AD]x + [2BD]y + [D^2-C^2] = 0
+        // [A^2/c^2 + C^2/a^2] x^2 + [B^2/c^2 + C^2/b^2] y^2 + [2AB/c^2]xy + [2AD/c^2]x + [2BD/c^2]y + [D^2/c^2-C^2] = 0
         // For convenience, introduce post-projection coefficient variables to make life easier.
         // E x^2 + F y^2 + G xy + H x + I y + J = 0
-        double E = A * A + C * C;
-        double F = B * B + C * C;
-        double G = 2.0 * A * B;
-        double H = 2.0 * A * D;
-        double I = 2.0 * B * D;
-        double J = D * D - C * C;
+        double E = A * A * planetModel.inverseCSquared + C * C * planetModel.inverseAbSquared;
+        double F = B * B * planetModel.inverseCSquared + C * C * planetModel.inverseAbSquared;
+        double G = 2.0 * A * B * planetModel.inverseCSquared;
+        double H = 2.0 * A * D * planetModel.inverseCSquared;
+        double I = 2.0 * B * D * planetModel.inverseCSquared;
+        double J = D * D * planetModel.inverseCSquared - C * C;
 
         //System.err.println("E = " + E + " F = " + F + " G = " + G + " H = "+ H + " I = " + I + " J = " + J);
 
@@ -962,6 +794,7 @@
    * Determine whether the plane intersects another plane within the
    * bounds provided.
    *
+   * @param planetModel is the planet model to use in determining intersection.
    * @param q                 is the other plane.
    * @param notablePoints     are points to look at to disambiguate cases when the two planes are identical.
    * @param moreNotablePoints are additional points to look at to disambiguate cases when the two planes are identical.
@@ -969,7 +802,7 @@
    * @param moreBounds        are more bounds.
    * @return true if there's an intersection.
    */
-  public boolean intersects(final Plane q, final GeoPoint[] notablePoints, final GeoPoint[] moreNotablePoints, final Membership[] bounds, final Membership... moreBounds) {
+  public boolean intersects(final PlanetModel planetModel, final Plane q, final GeoPoint[] notablePoints, final GeoPoint[] moreNotablePoints, final Membership[] bounds, final Membership... moreBounds) {
     //System.err.println("Does plane "+this+" intersect with plane "+q);
     // If the two planes are identical, then the math will find no points of intersection.
     // So a special case of this is to check for plane equality.  But that is not enough, because
@@ -994,7 +827,7 @@
       //System.err.println("  no notable points inside found; no intersection");
       return false;
     }
-    return findIntersections(q, bounds, moreBounds).length > 0;
+    return findIntersections(planetModel, q, bounds, moreBounds).length > 0;
   }
 
   /**
@@ -1042,8 +875,8 @@
   /**
    * Find a sample point on the intersection between two planes and the unit sphere.
    */
-  public GeoPoint getSampleIntersectionPoint(final Plane q) {
-    final GeoPoint[] intersections = findIntersections(q, NO_BOUNDS, NO_BOUNDS);
+  public GeoPoint getSampleIntersectionPoint(final PlanetModel planetModel, final Plane q) {
+    final GeoPoint[] intersections = findIntersections(planetModel, q, NO_BOUNDS, NO_BOUNDS);
     if (intersections.length == 0)
       return null;
     return intersections[0];
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/PlanetModel.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/PlanetModel.java
new file mode 100644
index 0000000..4e0bc0d
--- /dev/null
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/PlanetModel.java
@@ -0,0 +1,102 @@
+package org.apache.lucene.spatial.spatial4j.geo3d;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Holds mathematical constants associated with the model of a planet.
+ * @lucene.experimental
+ */
+public class PlanetModel {
+  
+  /** Planet model corresponding to sphere. */
+  public static final PlanetModel SPHERE = new PlanetModel(1.0,1.0);
+
+  /** Mean radius */
+  public static final double WGS84_MEAN = 6371009.0;
+  /** Polar radius */
+  public static final double WGS84_POLAR = 6356752.3;
+  /** Equatorial radius */
+  public static final double WGS84_EQUATORIAL = 6378137.0;
+  /** Planet model corresponding to WGS84 */
+  public static final PlanetModel WGS84 = new PlanetModel(WGS84_EQUATORIAL/WGS84_MEAN,
+    WGS84_POLAR/WGS84_MEAN);
+
+  // Surface of the planet:
+  // x^2/a^2 + y^2/b^2 + z^2/c^2 = 1.0
+  // Scaling factors are a,b,c.  geo3d can only support models where a==b, so use ab instead.
+  public final double ab;
+  public final double c;
+  public final double inverseAb;
+  public final double inverseC;
+  public final double inverseAbSquared;
+  public final double inverseCSquared;
+  // We do NOT include radius, because all computations in geo3d are in radians, not meters.
+  
+  // Compute north and south pole for planet model, since these are commonly used.
+  public final GeoPoint NORTH_POLE;
+  public final GeoPoint SOUTH_POLE;
+  
+  public PlanetModel(final double ab, final double c) {
+    this.ab = ab;
+    this.c = c;
+    this.inverseAb = 1.0 / ab;
+    this.inverseC = 1.0 / c;
+    this.inverseAbSquared = inverseAb * inverseAb;
+    this.inverseCSquared = inverseC * inverseC;
+    this.NORTH_POLE = new GeoPoint(c, 0.0, 0.0, 1.0);
+    this.SOUTH_POLE = new GeoPoint(c, 0.0, 0.0, -1.0);
+  }
+  
+  /** Find the minimum magnitude of all points on the ellipsoid.
+   */
+  public double getMinimumMagnitude() {
+    return Math.min(this.ab, this.c);
+  }
+
+  /** Find the maximum magnitude of all points on the ellipsoid.
+   */
+  public double getMaximumMagnitude() {
+    return Math.max(this.ab, this.c);
+  }
+  
+  @Override
+  public boolean equals(final Object o) {
+    if (!(o instanceof PlanetModel))
+      return false;
+    final PlanetModel other = (PlanetModel)o;
+    return ab == other.ab && c == other.c;
+  }
+  
+  @Override
+  public int hashCode() {
+    return Double.hashCode(ab) + Double.hashCode(c);
+  }
+  
+  @Override
+  public String toString() {
+    if (this.equals(SPHERE)) {
+      return "PlanetModel.SPHERE";
+    } else if (this.equals(WGS84)) {
+      return "PlanetModel.WGS84";
+    } else {
+      return "PlanetModel(ab="+ab+" c="+c+")";
+    }
+  }
+}
+
+
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/SidedPlane.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/SidedPlane.java
index 6c0f49d..7af3615 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/SidedPlane.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/SidedPlane.java
@@ -53,10 +53,11 @@
    * Construct a sided plane from a point and a Z coordinate.
    *
    * @param p      point to evaluate.
-   * @param height is the Z coordinate of the plane.
+   * @param planetModel is the planet model.
+   * @param sinLat is the sin of the latitude of the plane.
    */
-  public SidedPlane(Vector p, double height) {
-    super(height);
+  public SidedPlane(Vector p, final PlanetModel planetModel, double sinLat) {
+    super(planetModel, sinLat);
     sigNum = Math.signum(evaluate(p));
   }
 
@@ -84,6 +85,28 @@
     sigNum = Math.signum(evaluate(p));
   }
 
+  /** Construct a sided plane from two points and a third normal vector.
+   */
+  public static SidedPlane constructNormalizedPerpendicularSidedPlane(final Vector insidePoint,
+    final Vector normalVector, final Vector point1, final Vector point2) {
+    final Vector pointsVector = new Vector(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z);
+    final Vector newNormalVector = new Vector(normalVector, pointsVector).normalize();
+    // To construct the plane, we now just need D, which is simply the negative of the evaluation of the circle normal vector at one of the points.
+    return new SidedPlane(insidePoint, newNormalVector, -newNormalVector.dotProduct(point1));
+  }
+  
+  /** Construct a sided plane from three points.
+   */
+  public static SidedPlane constructNormalizedThreePointSidedPlane(final Vector insidePoint,
+    final Vector point1, final Vector point2, final Vector point3) {
+    final Vector planeNormal = new Vector(
+      new Vector(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z),
+      new Vector(point2.x - point3.x, point2.y - point3.y, point2.z - point3.z)).normalize();
+    if (planeNormal == null)
+      return null;
+    return new SidedPlane(insidePoint, planeNormal, -planeNormal.dotProduct(point2));
+  }
+
   /**
    * Check if a point is within this shape.
    *
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Vector.java b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Vector.java
index 4ea5812..a29249f 100755
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Vector.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/spatial4j/geo3d/Vector.java
@@ -114,14 +114,20 @@
    */
   public boolean isWithin(final Membership[] bounds, final Membership[] moreBounds) {
     // Return true if the point described is within all provided bounds
+    //System.err.println("  checking if "+this+" is within bounds");
     for (Membership bound : bounds) {
-      if (bound != null && !bound.isWithin(this))
+      if (bound != null && !bound.isWithin(this)) {
+        //System.err.println("    NOT within "+bound);
         return false;
+      }
     }
     for (Membership bound : moreBounds) {
-      if (bound != null && !bound.isWithin(this))
+      if (bound != null && !bound.isWithin(this)) {
+        //System.err.println("    NOT within "+bound);
         return false;
+      }
     }
+    //System.err.println("    is within");
     return true;
   }
 
@@ -301,6 +307,28 @@
     return Math.sqrt(x * x + y * y + z * z);
   }
 
+  /** Compute the desired magnitude of a unit vector projected to a given
+   * planet model.
+   * @param planetModel is the planet model.
+   * @param x is the unit vector x value.
+   * @param y is the unit vector y value.
+   * @param z is the unit vector z value.
+   * @return a magnitude value for that (x,y,z) that projects the vector onto the specified ellipsoid.
+   */
+  protected static double computeDesiredEllipsoidMagnitude(final PlanetModel planetModel, final double x, final double y, final double z) {
+    return 1.0 / Math.sqrt(x*x*planetModel.inverseAbSquared + y*y*planetModel.inverseAbSquared + z*z*planetModel.inverseCSquared);
+  }
+
+  /** Compute the desired magnitude of a unit vector projected to a given
+   * planet model.  The unit vector is specified only by a z value.
+   * @param planetModel is the planet model.
+   * @param z is the unit vector z value.
+   * @return a magnitude value for that z value that projects the vector onto the specified ellipsoid.
+   */
+  protected static double computeDesiredEllipsoidMagnitude(final PlanetModel planetModel, final double z) {
+    return 1.0 / Math.sqrt((1.0-z*z)*planetModel.inverseAbSquared + z*z*planetModel.inverseCSquared);
+  }
+
   @Override
   public boolean equals(Object o) {
     if (!(o instanceof Vector))
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
index 93a4732..2cb6b08 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
@@ -39,6 +39,7 @@
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoPoint;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoPolygonFactory;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoShape;
+import org.apache.lucene.spatial.spatial4j.geo3d.PlanetModel;
 import org.junit.Test;
 
 import static com.spatial4j.core.distance.DistanceUtils.DEGREES_TO_RADIANS;
@@ -81,12 +82,12 @@
   public void testFailure1() throws IOException {
     setupStrategy();
     final List<GeoPoint> points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(18 * DEGREES_TO_RADIANS, -27 * DEGREES_TO_RADIANS));
-    points.add(new GeoPoint(-57 * DEGREES_TO_RADIANS, 146 * DEGREES_TO_RADIANS));
-    points.add(new GeoPoint(14 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS));
-    points.add(new GeoPoint(-15 * DEGREES_TO_RADIANS, 153 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 18 * DEGREES_TO_RADIANS, -27 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -57 * DEGREES_TO_RADIANS, 146 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 14 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -15 * DEGREES_TO_RADIANS, 153 * DEGREES_TO_RADIANS));
     
-    final Shape triangle = new Geo3dShape(GeoPolygonFactory.makeGeoPolygon(points,0),ctx);
+    final Shape triangle = new Geo3dShape(GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,0),ctx);
     final Rectangle rect = ctx.makeRectangle(-49, -45, 73, 86);
     testOperation(rect,SpatialOperation.Intersects,triangle, false);
   }
@@ -101,11 +102,11 @@
 
   private Shape makeTriangle(double x1, double y1, double x2, double y2, double x3, double y3) {
     final List<GeoPoint> geoPoints = new ArrayList<>();
-    geoPoints.add(new GeoPoint(y1 * DEGREES_TO_RADIANS, x1 * DEGREES_TO_RADIANS));
-    geoPoints.add(new GeoPoint(y2 * DEGREES_TO_RADIANS, x2 * DEGREES_TO_RADIANS));
-    geoPoints.add(new GeoPoint(y3 * DEGREES_TO_RADIANS, x3 * DEGREES_TO_RADIANS));
+    geoPoints.add(new GeoPoint(PlanetModel.SPHERE, y1 * DEGREES_TO_RADIANS, x1 * DEGREES_TO_RADIANS));
+    geoPoints.add(new GeoPoint(PlanetModel.SPHERE, y2 * DEGREES_TO_RADIANS, x2 * DEGREES_TO_RADIANS));
+    geoPoints.add(new GeoPoint(PlanetModel.SPHERE, y3 * DEGREES_TO_RADIANS, x3 * DEGREES_TO_RADIANS));
     final int convexPointIndex = 0;
-    final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(geoPoints, convexPointIndex);
+    final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, geoPoints, convexPointIndex);
     return new Geo3dShape(shape, ctx);
   }
 
@@ -125,12 +126,12 @@
           final List<GeoPoint> geoPoints = new ArrayList<>();
           while (geoPoints.size() < vertexCount) {
             final Point point = randomPoint();
-            final GeoPoint gPt = new GeoPoint(point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS);
+            final GeoPoint gPt = new GeoPoint(PlanetModel.SPHERE, point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS);
             geoPoints.add(gPt);
           }
           final int convexPointIndex = random().nextInt(vertexCount);       //If we get this wrong, hopefully we get IllegalArgumentException
           try {
-            final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(geoPoints, convexPointIndex);
+            final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, geoPoints, convexPointIndex);
             return new Geo3dShape(shape, ctx);
           } catch (IllegalArgumentException e) {
             // This is what happens when we create a shape that is invalid.  Although it is conceivable that there are cases where
@@ -145,7 +146,7 @@
           final int circleRadius = random().nextInt(179) + 1;
           final Point point = randomPoint();
           try {
-            final GeoShape shape = new GeoCircle(point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS,
+            final GeoShape shape = new GeoCircle(PlanetModel.SPHERE, point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS,
               circleRadius * DEGREES_TO_RADIANS);
             return new Geo3dShape(shape, ctx);
           } catch (IllegalArgumentException e) {
@@ -167,7 +168,7 @@
             lrhcPoint = temp;
           }
           try {
-            final GeoShape shape = GeoBBoxFactory.makeGeoBBox(ulhcPoint.getY() * DEGREES_TO_RADIANS,
+            final GeoShape shape = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, ulhcPoint.getY() * DEGREES_TO_RADIANS,
               lrhcPoint.getY() * DEGREES_TO_RADIANS,
               ulhcPoint.getX() * DEGREES_TO_RADIANS,
               lrhcPoint.getX() * DEGREES_TO_RADIANS);
@@ -186,7 +187,7 @@
         final double width = (random().nextInt(89)+1) * DEGREES_TO_RADIANS;
         while (true) {
           try {
-            final GeoPath path = new GeoPath(width);
+            final GeoPath path = new GeoPath(PlanetModel.SPHERE, width);
             for (int i = 0; i < pointCount; i++) {
               final Point nextPoint = randomPoint();
               path.addPoint(nextPoint.getY() * DEGREES_TO_RADIANS, nextPoint.getX() * DEGREES_TO_RADIANS);
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTestCase.java
similarity index 73%
rename from lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTest.java
rename to lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTestCase.java
index b67383b..14b1172 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeRectRelationTestCase.java
@@ -25,9 +25,7 @@
 import com.spatial4j.core.context.SpatialContext;
 import com.spatial4j.core.distance.DistanceUtils;
 import com.spatial4j.core.shape.Point;
-import com.spatial4j.core.shape.Rectangle;
 import org.apache.lucene.spatial.spatial4j.geo3d.Bounds;
-import org.apache.lucene.spatial.spatial4j.geo3d.GeoArea;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBox;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBoxFactory;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoCircle;
@@ -35,26 +33,30 @@
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoPoint;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoPolygonFactory;
 import org.apache.lucene.spatial.spatial4j.geo3d.GeoShape;
+import org.apache.lucene.spatial.spatial4j.geo3d.PlanetModel;
 import org.junit.Rule;
 import org.junit.Test;
 
 import static com.spatial4j.core.distance.DistanceUtils.DEGREES_TO_RADIANS;
 
-public class Geo3dShapeRectRelationTest extends RandomizedShapeTestCase {
+public abstract class Geo3dShapeRectRelationTestCase extends RandomizedShapeTestCase {
+  protected final static double RADIANS_PER_DEGREE = Math.PI/180.0;
+
   @Rule
   public final LogRule testLog = LogRule.instance;
 
-  static Random random() {
+  protected static Random random() {
     return RandomizedContext.current().getRandom();
   }
 
-  {
-    ctx = SpatialContext.GEO;
+  protected final PlanetModel planetModel;
+
+  public Geo3dShapeRectRelationTestCase(PlanetModel planetModel) {
+    super(SpatialContext.GEO);
+    this.planetModel = planetModel;
   }
 
-  protected final static double RADIANS_PER_DEGREE = Math.PI/180.0;
-
-  protected static GeoBBox getBoundingBox(final GeoShape path) {
+  protected GeoBBox getBoundingBox(final GeoShape path) {
       Bounds bounds = path.getBounds(null);
 
       double leftLon;
@@ -78,7 +80,7 @@
       } else {
         maxLat = bounds.getMaxLatitude().doubleValue();
       }
-      return GeoBBoxFactory.makeGeoBBox(maxLat, minLat, leftLon, rightLon);
+      return GeoBBoxFactory.makeGeoBBox(planetModel, maxLat, minLat, leftLon, rightLon);
   }
 
   @Test
@@ -91,9 +93,9 @@
           final int circleRadius = random().nextInt(179) + 1;//no 0-radius
           final Point point = nearP;
           try {
-            final GeoShape shape = new GeoCircle(point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS,
+            final GeoShape shape = new GeoCircle(planetModel, point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS,
                 circleRadius * DEGREES_TO_RADIANS);
-            return new Geo3dShape(shape, ctx);
+            return new Geo3dShape(planetModel, shape, ctx);
           } catch (IllegalArgumentException e) {
             // This is what happens when we create a shape that is invalid.  Although it is conceivable that there are cases where
             // the exception is thrown incorrectly, we aren't going to be able to do that in this random test.
@@ -131,11 +133,11 @@
           ulhcPoint = lrhcPoint;
           lrhcPoint = temp;
         }
-        final GeoShape shape = GeoBBoxFactory.makeGeoBBox(ulhcPoint.getY() * DEGREES_TO_RADIANS,
+        final GeoShape shape = GeoBBoxFactory.makeGeoBBox(planetModel, ulhcPoint.getY() * DEGREES_TO_RADIANS,
             lrhcPoint.getY() * DEGREES_TO_RADIANS,
             ulhcPoint.getX() * DEGREES_TO_RADIANS,
             lrhcPoint.getX() * DEGREES_TO_RADIANS);
-        return new Geo3dShape(shape, ctx);
+        return new Geo3dShape(planetModel, shape, ctx);
       }
 
       @Override
@@ -160,13 +162,13 @@
             final Point point = randomPoint();
             if (ctx.getDistCalc().distance(point,centerPoint) > maxDistance)
               continue;
-            final GeoPoint gPt = new GeoPoint(point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS);
+            final GeoPoint gPt = new GeoPoint(planetModel, point.getY() * DEGREES_TO_RADIANS, point.getX() * DEGREES_TO_RADIANS);
             geoPoints.add(gPt);
           }
           final int convexPointIndex = random().nextInt(vertexCount);       //If we get this wrong, hopefully we get IllegalArgumentException
           try {
-            final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(geoPoints, convexPointIndex);
-            return new Geo3dShape(shape, ctx);
+            final GeoShape shape = GeoPolygonFactory.makeGeoPolygon(planetModel, geoPoints, convexPointIndex);
+            return new Geo3dShape(planetModel, shape, ctx);
           } catch (IllegalArgumentException e) {
             // This is what happens when we create a shape that is invalid.  Although it is conceivable that there are cases where
             // the exception is thrown incorrectly, we aren't going to be able to do that in this random test.
@@ -201,7 +203,7 @@
         final double width = (random().nextInt(89)+1) * DEGREES_TO_RADIANS;
         while (true) {
           try {
-            final GeoPath path = new GeoPath(width);
+            final GeoPath path = new GeoPath(planetModel, width);
             int i = 0;
             while (i < pointCount) {
               final Point nextPoint = randomPoint();
@@ -211,7 +213,7 @@
               i++;
             }
             path.done();
-            return new Geo3dShape(path, ctx);
+            return new Geo3dShape(planetModel, path, ctx);
           } catch (IllegalArgumentException e) {
             // This is what happens when we create a shape that is invalid.  Although it is conceivable that there are cases where
             // the exception is thrown incorrectly, we aren't going to be able to do that in this random test.
@@ -235,41 +237,7 @@
   }
 
   private Point geoPointToSpatial4jPoint(GeoPoint geoPoint) {
-    return ctx.makePoint(geoPoint.x * DistanceUtils.RADIANS_TO_DEGREES,
-        geoPoint.y * DistanceUtils.RADIANS_TO_DEGREES);
-  }
-
-  @Test
-  public void testFailure1() {
-    final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(88 * RADIANS_PER_DEGREE, 30 * RADIANS_PER_DEGREE, -30 * RADIANS_PER_DEGREE, 62 * RADIANS_PER_DEGREE);
-    final List<GeoPoint> points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(66.2465299717 * RADIANS_PER_DEGREE, -29.1786158537 * RADIANS_PER_DEGREE));
-    points.add(new GeoPoint(43.684447915 * RADIANS_PER_DEGREE, 46.2210986329 * RADIANS_PER_DEGREE));
-    points.add(new GeoPoint(30.4579218227 * RADIANS_PER_DEGREE, 14.5238410082 * RADIANS_PER_DEGREE));
-    final GeoShape path = GeoPolygonFactory.makeGeoPolygon(points,0);
-
-    final GeoPoint point = new GeoPoint(34.2730264413182 * RADIANS_PER_DEGREE, 82.75500168892472 * RADIANS_PER_DEGREE);
-
-    // Apparently the rectangle thinks the polygon is completely within it... "shape inside rectangle"
-    assertTrue(GeoArea.WITHIN == rect.getRelationship(path));
-
-    // Point is within path? Apparently not...
-    assertFalse(path.isWithin(point));
-
-    // If it is within the path, it must be within the rectangle, and similarly visa versa
-    assertFalse(rect.isWithin(point));
-
-  }
-
-  @Test
-  public void testFailure2_LUCENE6475() {
-    GeoShape geo3dCircle = new GeoCircle(1.6282053147165243E-4 * RADIANS_PER_DEGREE,
-        -70.1600629789353 * RADIANS_PER_DEGREE, 86 * RADIANS_PER_DEGREE);
-    Geo3dShape geo3dShape = new Geo3dShape(geo3dCircle, ctx);
-    Rectangle rect = ctx.makeRectangle(-118, -114, -2.0, 32.0);
-    assertTrue(geo3dShape.relate(rect).intersects());
-    // thus the bounding box must intersect too
-    assertTrue(geo3dShape.getBoundingBox().relate(rect).intersects());
-
+    return ctx.makePoint(geoPoint.getLongitude() * DistanceUtils.RADIANS_TO_DEGREES,
+        geoPoint.getLongitude() * DistanceUtils.RADIANS_TO_DEGREES);
   }
 }
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeSphereModelRectRelationTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeSphereModelRectRelationTest.java
new file mode 100644
index 0000000..5e2ca7d
--- /dev/null
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeSphereModelRectRelationTest.java
@@ -0,0 +1,73 @@
+package org.apache.lucene.spatial.spatial4j;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.spatial4j.core.shape.Rectangle;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoArea;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBox;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBoxFactory;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoCircle;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoPoint;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoPolygonFactory;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoShape;
+import org.apache.lucene.spatial.spatial4j.geo3d.PlanetModel;
+import org.junit.Test;
+
+public class Geo3dShapeSphereModelRectRelationTest extends Geo3dShapeRectRelationTestCase {
+
+  public Geo3dShapeSphereModelRectRelationTest() {
+    super(PlanetModel.SPHERE);
+  }
+
+  @Test
+  public void testFailure1() {
+    final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 88 * RADIANS_PER_DEGREE, 30 * RADIANS_PER_DEGREE, -30 * RADIANS_PER_DEGREE, 62 * RADIANS_PER_DEGREE);
+    final List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(planetModel, 66.2465299717 * RADIANS_PER_DEGREE, -29.1786158537 * RADIANS_PER_DEGREE));
+    points.add(new GeoPoint(planetModel, 43.684447915 * RADIANS_PER_DEGREE, 46.2210986329 * RADIANS_PER_DEGREE));
+    points.add(new GeoPoint(planetModel, 30.4579218227 * RADIANS_PER_DEGREE, 14.5238410082 * RADIANS_PER_DEGREE));
+    final GeoShape path = GeoPolygonFactory.makeGeoPolygon(planetModel, points,0);
+
+    final GeoPoint point = new GeoPoint(planetModel, 34.2730264413182 * RADIANS_PER_DEGREE, 82.75500168892472 * RADIANS_PER_DEGREE);
+
+    // Apparently the rectangle thinks the polygon is completely within it... "shape inside rectangle"
+    assertTrue(GeoArea.WITHIN == rect.getRelationship(path));
+
+    // Point is within path? Apparently not...
+    assertFalse(path.isWithin(point));
+
+    // If it is within the path, it must be within the rectangle, and similarly visa versa
+    assertFalse(rect.isWithin(point));
+
+  }
+
+  @Test
+  public void testFailure2_LUCENE6475() {
+    GeoShape geo3dCircle = new GeoCircle(planetModel, 1.6282053147165243E-4 * RADIANS_PER_DEGREE,
+        -70.1600629789353 * RADIANS_PER_DEGREE, 86 * RADIANS_PER_DEGREE);
+    Geo3dShape geo3dShape = new Geo3dShape(planetModel, geo3dCircle, ctx);
+    Rectangle rect = ctx.makeRectangle(-118, -114, -2.0, 32.0);
+    assertTrue(geo3dShape.relate(rect).intersects());
+    // thus the bounding box must intersect too
+    assertTrue(geo3dShape.getBoundingBox().relate(rect).intersects());
+
+  }
+}
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
new file mode 100644
index 0000000..b26f162
--- /dev/null
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
@@ -0,0 +1,95 @@
+package org.apache.lucene.spatial.spatial4j;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoArea;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBox;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoBBoxFactory;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoCircle;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoPath;
+import org.apache.lucene.spatial.spatial4j.geo3d.GeoPoint;
+import org.apache.lucene.spatial.spatial4j.geo3d.PlanetModel;
+import org.junit.Test;
+
+public class Geo3dShapeWGS84ModelRectRelationTest extends Geo3dShapeRectRelationTestCase {
+
+  public Geo3dShapeWGS84ModelRectRelationTest() {
+    super(PlanetModel.WGS84);
+  }
+
+  @Test
+  public void testFailure1() {
+    final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 90 * RADIANS_PER_DEGREE, 74 * RADIANS_PER_DEGREE,
+        40 * RADIANS_PER_DEGREE, 60 * RADIANS_PER_DEGREE);
+    final GeoPath path = new GeoPath(planetModel, 4 * RADIANS_PER_DEGREE);
+    path.addPoint(84.4987594274 * RADIANS_PER_DEGREE, -22.8345484402 * RADIANS_PER_DEGREE);
+    path.done();
+    assertTrue(GeoArea.DISJOINT == rect.getRelationship(path));
+    // This is what the test failure claimed...
+    //assertTrue(GeoArea.CONTAINS == rect.getRelationship(path));
+    //final GeoBBox bbox = getBoundingBox(path);
+    //assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox));
+  }
+
+  @Test
+  public void testFailure2() {
+    final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, -74 * RADIANS_PER_DEGREE, -90 * RADIANS_PER_DEGREE,
+        0 * RADIANS_PER_DEGREE, 26 * RADIANS_PER_DEGREE);
+    final GeoCircle circle = new GeoCircle(planetModel, -87.3647352103 * RADIANS_PER_DEGREE, 52.3769709972 * RADIANS_PER_DEGREE, 1 * RADIANS_PER_DEGREE);
+    assertTrue(GeoArea.DISJOINT == rect.getRelationship(circle));
+    // This is what the test failure claimed...
+    //assertTrue(GeoArea.CONTAINS == rect.getRelationship(circle));
+    //final GeoBBox bbox = getBoundingBox(circle);
+    //assertFalse(GeoArea.DISJOINT == rect.getRelationship(bbox));
+  }
+
+  @Test
+  public void testFailure3() {
+    /*
+   [junit4]   1> S-R Rel: {}, Shape {}, Rectangle {}    lap# {} [CONTAINS, Geo3dShape{planetmodel=PlanetModel: {ab=1.0011188180710464, c=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {ab=1.0011188180710464, c=0.9977622539852008}, width=1.53588974175501(87.99999999999999), 
+    points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}}, 
+    Rect(minX=4.0,maxX=36.0,minY=16.0,maxY=16.0), 6981](no slf4j subst; sorry)
+   [junit4] FAILURE 0.59s | Geo3dWGS84ShapeRectRelationTest.testGeoPathRect <<<
+   [junit4]    > Throwable #1: java.lang.AssertionError: Geo3dShape{planetmodel=PlanetModel: {ab=1.0011188180710464, c=0.9977622539852008}, shape=GeoPath: {planetmodel=PlanetModel: {ab=1.0011188180710464, c=0.9977622539852008}, width=1.53588974175501(87.99999999999999), 
+    points={[[X=0.12097657665150223, Y=-0.6754177666095532, Z=0.7265376136709238], [X=-0.3837892785614207, Y=0.4258049113530899, Z=0.8180007850434892]]}}} intersect Pt(x=23.81626064835212,y=16.0)
+   [junit4]    >  at __randomizedtesting.SeedInfo.seed([2595268DA3F13FEA:6CC30D8C83453E5D]:0)
+   [junit4]    >  at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase._assertIntersect(RandomizedShapeTestCase.java:168)
+   [junit4]    >  at org.apache.lucene.spatial.spatial4j.RandomizedShapeTestCase.assertRelation(RandomizedShapeTestCase.java:153)
+   [junit4]    >  at org.apache.lucene.spatial.spatial4j.RectIntersectionTestHelper.testRelateWithRectangle(RectIntersectionTestHelper.java:128)
+   [junit4]    >  at org.apache.lucene.spatial.spatial4j.Geo3dWGS84ShapeRectRelationTest.testGeoPathRect(Geo3dWGS84ShapeRectRelationTest.java:265)
+  */
+    final GeoBBox rect = GeoBBoxFactory.makeGeoBBox(planetModel, 16 * RADIANS_PER_DEGREE, 16 * RADIANS_PER_DEGREE, 4 * RADIANS_PER_DEGREE, 36 * RADIANS_PER_DEGREE);
+    final GeoPoint pt = new GeoPoint(planetModel, 16 * RADIANS_PER_DEGREE, 23.81626064835212 * RADIANS_PER_DEGREE);
+    final GeoPath path = new GeoPath(planetModel, 88 * RADIANS_PER_DEGREE);
+    path.addPoint(46.6369060853 * RADIANS_PER_DEGREE, -79.8452213228 * RADIANS_PER_DEGREE);
+    path.addPoint(54.9779334519 * RADIANS_PER_DEGREE, 132.029177424 * RADIANS_PER_DEGREE);
+    path.done();
+    System.out.println("rect=" + rect);
+    // Rectangle is within path (this is wrong; it's on the other side.  Should be OVERLAPS)
+    assertTrue(GeoArea.OVERLAPS == rect.getRelationship(path));
+    // Rectangle contains point
+    //assertTrue(rect.isWithin(pt));
+    // Path contains point (THIS FAILS)
+    //assertTrue(path.isWithin(pt));
+    // What happens: (1) The center point of the horizontal line is within the path, in fact within a radius of one of the endpoints.
+    // (2) The point mentioned is NOT inside either SegmentEndpoint.
+    // (3) The point mentioned is NOT inside the path segment, either.  (I think it should be...)
+  }
+
+}
+
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxTest.java
index a0c67df..f96fcb7 100755
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoBBoxTest.java
@@ -36,14 +36,14 @@
     GeoConvexPolygon cp;
     int relationship;
     List<GeoPoint> points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(24 * DEGREES_TO_RADIANS, -30 * DEGREES_TO_RADIANS));
-    points.add(new GeoPoint(-11 * DEGREES_TO_RADIANS, 101 * DEGREES_TO_RADIANS));
-    points.add(new GeoPoint(-49 * DEGREES_TO_RADIANS, -176 * DEGREES_TO_RADIANS));
-    GeoMembershipShape shape = GeoPolygonFactory.makeGeoPolygon(points, 0);
-    box = GeoBBoxFactory.makeGeoBBox(-64 * DEGREES_TO_RADIANS, -64 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, 180 * DEGREES_TO_RADIANS);
+    points.add(new GeoPoint(PlanetModel.SPHERE, 24 * DEGREES_TO_RADIANS, -30 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -11 * DEGREES_TO_RADIANS, 101 * DEGREES_TO_RADIANS));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -49 * DEGREES_TO_RADIANS, -176 * DEGREES_TO_RADIANS));
+    GeoMembershipShape shape = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points, 0);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, -64 * DEGREES_TO_RADIANS, -64 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, 180 * DEGREES_TO_RADIANS);
     relationship = box.getRelationship(shape);
     assertEquals(GeoArea.CONTAINS, relationship);
-    box = GeoBBoxFactory.makeGeoBBox(-61.85 * DEGREES_TO_RADIANS, -67.5 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, -168.75 * DEGREES_TO_RADIANS);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, -61.85 * DEGREES_TO_RADIANS, -67.5 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS, -168.75 * DEGREES_TO_RADIANS);
     System.out.println("Shape = " + shape + " Rect = " + box);
     relationship = box.getRelationship(shape);
     assertEquals(GeoArea.CONTAINS, relationship);
@@ -54,52 +54,52 @@
     GeoBBox box;
     GeoPoint gp;
     // Standard normal Rect box, not crossing dateline
-    box = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, -1.0, 1.0);
-    gp = new GeoPoint(-0.1, 0.0);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -1.0, 1.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, 0.0);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(0.1, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, 0.0);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, 0.0);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, 1.1);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.1);
     assertFalse(box.isWithin(gp));
     // Standard normal Rect box, crossing dateline
-    box = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, Math.PI - 1.0, -Math.PI + 1.0);
-    gp = new GeoPoint(-0.1, -Math.PI);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 1.0, -Math.PI + 1.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(0.1, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, -Math.PI);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI + 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI + 1.1);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI - 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI - 1.1);
     assertFalse(box.isWithin(gp));
     // Latitude zone rectangle
-    box = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, -Math.PI, Math.PI);
-    gp = new GeoPoint(-0.1, -Math.PI);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -Math.PI, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(0.1, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, -Math.PI);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI + 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI + 1.1);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI - 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI - 1.1);
     assertTrue(box.isWithin(gp));
     // World
-    box = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, Math.PI);
-    gp = new GeoPoint(-0.1, -Math.PI);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(0.1, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, -Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, -Math.PI);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI + 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI + 1.1);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -Math.PI - 1.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -Math.PI - 1.1);
     assertTrue(box.isWithin(gp));
 
   }
@@ -109,23 +109,23 @@
     GeoBBox box;
     GeoPoint gp;
     // Standard normal Rect box, not crossing dateline
-    box = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, -1.0, 1.0);
+    box = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -1.0, 1.0);
     box = box.expand(0.1);
-    gp = new GeoPoint(0.05, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, 0.0);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(0.15, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.15, 0.0);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.25 - 0.05, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.25 - 0.05, 0.0);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.25 - 0.15, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.25 - 0.15, 0.0);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -1.05);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.05);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, -1.15);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.15);
     assertFalse(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, 1.05);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, 1.05);
     assertTrue(box.isWithin(gp));
-    gp = new GeoPoint(-0.1, 1.15);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, 1.15);
     assertFalse(box.isWithin(gp));
   }
 
@@ -134,7 +134,7 @@
     GeoBBox c;
     Bounds b;
 
-    c = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, -1.0, 1.0);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -1.0, 1.0);
 
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
@@ -145,7 +145,7 @@
     assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001);
     assertEquals(0.0, b.getMaxLatitude(), 0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, 1.0, -1.0);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, 1.0, -1.0);
 
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
@@ -156,7 +156,7 @@
     assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001);
     assertEquals(0.0, b.getMaxLatitude(), 0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -1.0, 1.0);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -1.0, 1.0);
 
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
@@ -165,7 +165,7 @@
     assertEquals(-1.0, b.getLeftLongitude(), 0.000001);
     assertEquals(1.0, b.getRightLongitude(), 0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, 1.0, -1.0);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 1.0, -1.0);
 
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
@@ -176,7 +176,7 @@
 
     // Check wide variants of rectangle and longitude slice
 
-    c = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, -Math.PI + 0.1, Math.PI - 0.1);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, -Math.PI + 0.1, Math.PI - 0.1);
 
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
@@ -187,7 +187,7 @@
     assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001);
     assertEquals(0.0, b.getMaxLatitude(), 0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(0.0, -Math.PI * 0.25, Math.PI - 0.1, -Math.PI + 0.1);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 0.0, -Math.PI * 0.25, Math.PI - 0.1, -Math.PI + 0.1);
 
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
@@ -198,7 +198,7 @@
     assertEquals(-Math.PI * 0.25, b.getMinLatitude(), 0.000001);
     assertEquals(0.0, b.getMaxLatitude(), 0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -Math.PI + 0.1, Math.PI - 0.1);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI + 0.1, Math.PI - 0.1);
 
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
@@ -207,7 +207,7 @@
     //assertEquals(-Math.PI+0.1,b.getLeftLongitude(),0.000001);
     //assertEquals(Math.PI-0.1,b.getRightLongitude(),0.000001);
 
-    c = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, Math.PI - 0.1, -Math.PI + 0.1);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, Math.PI - 0.1, -Math.PI + 0.1);
 
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
@@ -217,7 +217,7 @@
     assertEquals(-Math.PI + 0.1, b.getRightLongitude(), 0.000001);
 
     // Check latitude zone
-    c = GeoBBoxFactory.makeGeoBBox(1.0, -1.0, -Math.PI, Math.PI);
+    c = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, 1.0, -1.0, -Math.PI, Math.PI);
 
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
@@ -230,8 +230,8 @@
     GeoBBox c1;
     GeoBBox c2;
 
-    c1 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0);
-    c2 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI);
+    c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0);
+    c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI);
 
     b = new Bounds();
     b = c1.getBounds(b);
@@ -240,8 +240,8 @@
     assertTrue(b.checkNoTopLatitudeBound());
     assertTrue(b.checkNoBottomLatitudeBound());
 
-    c1 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0);
-    c2 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI * 0.5);
+    c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI, 0.0);
+    c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI * 0.5);
 
     b = new Bounds();
     b = c1.getBounds(b);
@@ -252,8 +252,8 @@
     //assertEquals(-Math.PI,b.getLeftLongitude(),0.000001);
     //assertEquals(Math.PI*0.5,b.getRightLongitude(),0.000001);
 
-    c1 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, -Math.PI * 0.5, 0.0);
-    c2 = GeoBBoxFactory.makeGeoBBox(Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI);
+    c1 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, -Math.PI * 0.5, 0.0);
+    c2 = GeoBBoxFactory.makeGeoBBox(PlanetModel.SPHERE, Math.PI * 0.5, -Math.PI * 0.5, 0.0, Math.PI);
 
     b = new Bounds();
     b = c1.getBounds(b);
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircleTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircleTest.java
index 2e16be2..11e1ad1 100755
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircleTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoCircleTest.java
@@ -30,16 +30,16 @@
   public void testCircleDistance() {
     GeoCircle c;
     GeoPoint gp;
-    c = new GeoCircle(0.0, -0.5, 0.1);
-    gp = new GeoPoint(0.0, 0.0);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertEquals(Double.MAX_VALUE, c.computeArcDistance(gp), 0.0);
     assertEquals(Double.MAX_VALUE, c.computeLinearDistance(gp), 0.0);
     assertEquals(Double.MAX_VALUE, c.computeNormalDistance(gp), 0.0);
-    gp = new GeoPoint(0.0, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5);
     assertEquals(0.0, c.computeArcDistance(gp), 0.000001);
     assertEquals(0.0, c.computeLinearDistance(gp), 0.000001);
     assertEquals(0.0, c.computeNormalDistance(gp), 0.000001);
-    gp = new GeoPoint(0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, -0.5);
     assertEquals(0.05, c.computeArcDistance(gp), 0.000001);
     assertEquals(0.049995, c.computeLinearDistance(gp), 0.000001);
     assertEquals(0.049979, c.computeNormalDistance(gp), 0.000001);
@@ -49,18 +49,18 @@
   public void testCirclePointWithin() {
     GeoCircle c;
     GeoPoint gp;
-    c = new GeoCircle(0.0, -0.5, 0.1);
-    gp = new GeoPoint(0.0, 0.0);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.55);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.55);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.45);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.45);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertFalse(c.isWithin(gp));
   }
 
@@ -71,7 +71,7 @@
 
 
     // Vertical circle cases
-    c = new GeoCircle(0.0, -0.5, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, -0.5, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -80,7 +80,7 @@
     assertEquals(-0.4, b.getRightLongitude(), 0.000001);
     assertEquals(-0.1, b.getMinLatitude(), 0.000001);
     assertEquals(0.1, b.getMaxLatitude(), 0.000001);
-    c = new GeoCircle(0.0, 0.5, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, 0.5, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -89,7 +89,7 @@
     assertEquals(0.6, b.getRightLongitude(), 0.000001);
     assertEquals(-0.1, b.getMinLatitude(), 0.000001);
     assertEquals(0.1, b.getMaxLatitude(), 0.000001);
-    c = new GeoCircle(0.0, 0.0, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, 0.0, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -98,7 +98,7 @@
     assertEquals(0.1, b.getRightLongitude(), 0.000001);
     assertEquals(-0.1, b.getMinLatitude(), 0.000001);
     assertEquals(0.1, b.getMaxLatitude(), 0.000001);
-    c = new GeoCircle(0.0, Math.PI, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.0, Math.PI, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -108,13 +108,13 @@
     assertEquals(-0.1, b.getMinLatitude(), 0.000001);
     assertEquals(0.1, b.getMaxLatitude(), 0.000001);
     // Horizontal circle cases
-    c = new GeoCircle(Math.PI * 0.5, 0.0, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, Math.PI * 0.5, 0.0, 0.1);
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
     assertTrue(b.checkNoTopLatitudeBound());
     assertFalse(b.checkNoBottomLatitudeBound());
     assertEquals(Math.PI * 0.5 - 0.1, b.getMinLatitude(), 0.000001);
-    c = new GeoCircle(-Math.PI * 0.5, 0.0, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, -Math.PI * 0.5, 0.0, 0.1);
     b = c.getBounds(null);
     assertTrue(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -122,7 +122,7 @@
     assertEquals(-Math.PI * 0.5 + 0.1, b.getMaxLatitude(), 0.000001);
 
     // Now do a somewhat tilted plane, facing different directions.
-    c = new GeoCircle(0.01, 0.0, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, 0.0, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -132,7 +132,7 @@
     assertEquals(-0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(0.01, Math.PI, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, Math.PI, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -142,7 +142,7 @@
     assertEquals(Math.PI - 0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(-Math.PI + 0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(0.01, Math.PI * 0.5, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, Math.PI * 0.5, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -152,7 +152,7 @@
     assertEquals(Math.PI * 0.5 - 0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(Math.PI * 0.5 + 0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(0.01, -Math.PI * 0.5, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, -Math.PI * 0.5, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -163,7 +163,7 @@
     assertEquals(-Math.PI * 0.5 + 0.1, b.getRightLongitude(), 0.00001);
 
     // Slightly tilted, PI/4 direction.
-    c = new GeoCircle(0.01, Math.PI * 0.25, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, Math.PI * 0.25, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -173,7 +173,7 @@
     assertEquals(Math.PI * 0.25 - 0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(Math.PI * 0.25 + 0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(0.01, -Math.PI * 0.25, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, -Math.PI * 0.25, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -183,7 +183,7 @@
     assertEquals(-Math.PI * 0.25 - 0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(-Math.PI * 0.25 + 0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(-0.01, Math.PI * 0.25, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, -0.01, Math.PI * 0.25, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -193,7 +193,7 @@
     assertEquals(Math.PI * 0.25 - 0.1, b.getLeftLongitude(), 0.00001);
     assertEquals(Math.PI * 0.25 + 0.1, b.getRightLongitude(), 0.00001);
 
-    c = new GeoCircle(-0.01, -Math.PI * 0.25, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, -0.01, -Math.PI * 0.25, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
@@ -204,7 +204,7 @@
     assertEquals(-Math.PI * 0.25 + 0.1, b.getRightLongitude(), 0.00001);
 
     // Now do a somewhat tilted plane.
-    c = new GeoCircle(0.01, -0.5, 0.1);
+    c = new GeoCircle(PlanetModel.SPHERE, 0.01, -0.5, 0.1);
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
     assertFalse(b.checkNoTopLatitudeBound());
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygonTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygonTest.java
index 1df933e..0bb7179 100755
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygonTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoConvexPolygonTest.java
@@ -30,37 +30,37 @@
   public void testPolygonPointWithin() {
     GeoConvexPolygon c;
     GeoPoint gp;
-    c = new GeoConvexPolygon(-0.1, -0.5);
+    c = new GeoConvexPolygon(PlanetModel.SPHERE, -0.1, -0.5);
     c.addPoint(0.0, -0.6, false);
     c.addPoint(0.1, -0.5, false);
     c.addPoint(0.0, -0.4, false);
     c.donePoints(false);
     // Sample some points within
-    gp = new GeoPoint(0.0, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.55);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.55);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.45);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.45);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(-0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.05, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, -0.5);
     assertTrue(c.isWithin(gp));
     // Sample some nearby points outside
-    gp = new GeoPoint(0.0, -0.65);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.65);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.35);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.35);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(-0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.15, -0.5);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.15, -0.5);
     assertFalse(c.isWithin(gp));
     // Random points outside
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertFalse(c.isWithin(gp));
   }
 
@@ -69,7 +69,7 @@
     GeoConvexPolygon c;
     Bounds b;
 
-    c = new GeoConvexPolygon(-0.1, -0.5);
+    c = new GeoConvexPolygon(PlanetModel.SPHERE, -0.1, -0.5);
     c.addPoint(0.0, -0.6, false);
     c.addPoint(0.1, -0.5, false);
     c.addPoint(0.0, -0.4, false);
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoModelTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoModelTest.java
new file mode 100644
index 0000000..4e294df
--- /dev/null
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoModelTest.java
@@ -0,0 +1,106 @@
+package org.apache.lucene.spatial.spatial4j.geo3d;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test basic plane functionality.
+ */
+public class GeoModelTest {
+
+  protected final static PlanetModel scaledModel = new PlanetModel(1.2,1.5);
+  
+  @Test
+  public void testBasicCircle() {
+    // The point of this test is just to make sure nothing blows up doing normal things with a quite non-spherical model
+    // Make sure that the north pole is in the circle, and south pole isn't
+    final GeoPoint northPole = new GeoPoint(scaledModel, Math.PI * 0.5, 0.0);
+    final GeoPoint southPole = new GeoPoint(scaledModel, -Math.PI * 0.5, 0.0);
+    final GeoPoint point1 = new GeoPoint(scaledModel, Math.PI * 0.25, 0.0);
+    final GeoPoint point2 = new GeoPoint(scaledModel, Math.PI * 0.125, 0.0);
+    
+    GeoCircle circle = new GeoCircle(scaledModel, Math.PI * 0.5, 0.0, 0.01);
+    assertTrue(circle.isWithin(northPole));
+    assertFalse(circle.isWithin(southPole));
+    assertFalse(circle.isWithin(point1));
+    Bounds bounds = circle.getBounds(null);
+    assertTrue(bounds.checkNoLongitudeBound());
+    assertTrue(bounds.checkNoTopLatitudeBound());
+    assertFalse(bounds.checkNoBottomLatitudeBound());
+    assertEquals(Math.PI * 0.5 - 0.01, bounds.getMinLatitude(), 0.01);
+
+    circle = new GeoCircle(scaledModel, Math.PI * 0.25, 0.0, 0.01);
+    assertTrue(circle.isWithin(point1));
+    assertFalse(circle.isWithin(northPole));
+    assertFalse(circle.isWithin(southPole));
+    bounds = circle.getBounds(null);
+    assertFalse(bounds.checkNoTopLatitudeBound());
+    assertFalse(bounds.checkNoLongitudeBound());
+    assertFalse(bounds.checkNoBottomLatitudeBound());
+    assertEquals(1.20, bounds.getMaxLatitude(), 0.01);
+    assertEquals(Math.PI * 0.25 - 0.01, bounds.getMinLatitude(), 0.01);
+    assertEquals(-0.36, bounds.getLeftLongitude(), 0.01);
+    assertEquals(0.36, bounds.getRightLongitude(), 0.01);
+
+    circle = new GeoCircle(scaledModel, Math.PI * 0.125, 0.0, 0.01);
+    assertTrue(circle.isWithin(point2));
+    assertFalse(circle.isWithin(northPole));
+    assertFalse(circle.isWithin(southPole));
+    bounds = circle.getBounds(null);
+    assertFalse(bounds.checkNoLongitudeBound());
+    assertFalse(bounds.checkNoTopLatitudeBound());
+    assertFalse(bounds.checkNoBottomLatitudeBound());
+    // Asymmetric, as expected
+    assertEquals(Math.PI * 0.125 - 0.01, bounds.getMinLatitude(), 0.01);
+    assertEquals(0.74, bounds.getMaxLatitude(), 0.01);
+    assertEquals(-0.18, bounds.getLeftLongitude(), 0.01);
+    assertEquals(0.18, bounds.getRightLongitude(), 0.01);
+
+  }
+
+  @Test
+  public void testBasicRectangle() {
+    final GeoBBox bbox = GeoBBoxFactory.makeGeoBBox(scaledModel, 1.0, 0.0, 0.0, 1.0);
+    final GeoPoint insidePoint = new GeoPoint(scaledModel, 0.5, 0.5);
+    assertTrue(bbox.isWithin(insidePoint));
+    final GeoPoint topOutsidePoint = new GeoPoint(scaledModel, 1.01, 0.5);
+    assertFalse(bbox.isWithin(topOutsidePoint));
+    final GeoPoint bottomOutsidePoint = new GeoPoint(scaledModel, -0.01, 0.5);
+    assertFalse(bbox.isWithin(bottomOutsidePoint));
+    final GeoPoint leftOutsidePoint = new GeoPoint(scaledModel, 0.5, -0.01);
+    assertFalse(bbox.isWithin(leftOutsidePoint));
+    final GeoPoint rightOutsidePoint = new GeoPoint(scaledModel, 0.5, 1.01);
+    assertFalse(bbox.isWithin(rightOutsidePoint));
+    final Bounds bounds = bbox.getBounds(null);
+    assertFalse(bounds.checkNoLongitudeBound());
+    assertFalse(bounds.checkNoTopLatitudeBound());
+    assertFalse(bounds.checkNoBottomLatitudeBound());
+    assertEquals(1.0, bounds.getMaxLatitude(), 0.00001);
+    assertEquals(0.0, bounds.getMinLatitude(), 0.00001);
+    assertEquals(1.0, bounds.getRightLongitude(), 0.00001);
+    assertEquals(0.0, bounds.getLeftLongitude(), 0.00001);
+  }
+  
+}
+
+
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPathTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPathTest.java
index f6413ab..998cbe8 100755
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPathTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPathTest.java
@@ -19,6 +19,7 @@
 
 import org.junit.Test;
 
+import static java.lang.Math.toRadians;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -30,47 +31,47 @@
     // Start with a really simple case
     GeoPath p;
     GeoPoint gp;
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     p.addPoint(0.0, 0.0);
     p.addPoint(0.0, 0.1);
     p.addPoint(0.0, 0.2);
     p.done();
-    gp = new GeoPoint(Math.PI * 0.5, 0.15);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.15);
     assertEquals(Double.MAX_VALUE, p.computeArcDistance(gp), 0.0);
-    gp = new GeoPoint(0.05, 0.15);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, 0.15);
     assertEquals(0.15 + 0.05, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(0.0, 0.12);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.12);
     assertEquals(0.12 + 0.0, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(-0.15, 0.05);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.15, 0.05);
     assertEquals(Double.MAX_VALUE, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(0.0, 0.25);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.25);
     assertEquals(0.20 + 0.05, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(0.0, -0.05);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.05);
     assertEquals(0.0 + 0.05, p.computeArcDistance(gp), 0.000001);
 
     // Compute path distances now
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     p.addPoint(0.0, 0.0);
     p.addPoint(0.0, 0.1);
     p.addPoint(0.0, 0.2);
     p.done();
-    gp = new GeoPoint(0.05, 0.15);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, 0.15);
     assertEquals(0.15 + 0.05, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(0.0, 0.12);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.12);
     assertEquals(0.12, p.computeArcDistance(gp), 0.000001);
 
     // Now try a vertical path, and make sure distances are as expected
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     p.addPoint(-Math.PI * 0.25, -0.5);
     p.addPoint(Math.PI * 0.25, -0.5);
     p.done();
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertEquals(Double.MAX_VALUE, p.computeArcDistance(gp), 0.0);
-    gp = new GeoPoint(-0.1, -1.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.1, -1.0);
     assertEquals(Double.MAX_VALUE, p.computeArcDistance(gp), 0.0);
-    gp = new GeoPoint(Math.PI * 0.25 + 0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.25 + 0.05, -0.5);
     assertEquals(Math.PI * 0.5 + 0.05, p.computeArcDistance(gp), 0.000001);
-    gp = new GeoPoint(-Math.PI * 0.25 - 0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.25 - 0.05, -0.5);
     assertEquals(0.0 + 0.05, p.computeArcDistance(gp), 0.000001);
   }
 
@@ -79,47 +80,48 @@
     // Tests whether we can properly detect whether a point is within a path or not
     GeoPath p;
     GeoPoint gp;
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     // Build a diagonal path crossing the equator
     p.addPoint(-0.2, -0.2);
     p.addPoint(0.2, 0.2);
     p.done();
     // Test points on the path
-    gp = new GeoPoint(-0.2, -0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.2, -0.2);
     assertTrue(p.isWithin(gp));
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertTrue(p.isWithin(gp));
-    gp = new GeoPoint(0.1, 0.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, 0.1);
     assertTrue(p.isWithin(gp));
     // Test points off the path
-    gp = new GeoPoint(-0.2, 0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.2, 0.2);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, 0.0);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(0.2, -0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.2, -0.2);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertFalse(p.isWithin(gp));
     // Repeat the test, but across the terminator
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     // Build a diagonal path crossing the equator
     p.addPoint(-0.2, Math.PI - 0.2);
     p.addPoint(0.2, -Math.PI + 0.2);
+    p.done();
     // Test points on the path
-    gp = new GeoPoint(-0.2, Math.PI - 0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.2, Math.PI - 0.2);
     assertTrue(p.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertTrue(p.isWithin(gp));
-    gp = new GeoPoint(0.1, -Math.PI + 0.1);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.1, -Math.PI + 0.1);
     assertTrue(p.isWithin(gp));
     // Test points off the path
-    gp = new GeoPoint(-0.2, -Math.PI + 0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.2, -Math.PI + 0.2);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(-Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, -Math.PI * 0.5, 0.0);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(0.2, Math.PI - 0.2);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.2, Math.PI - 0.2);
     assertFalse(p.isWithin(gp));
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertFalse(p.isWithin(gp));
 
   }
@@ -131,31 +133,31 @@
 
     // Start by testing the basic kinds of relationship, increasing in order of difficulty.
 
-    p = new GeoPath(0.1);
+    p = new GeoPath(PlanetModel.SPHERE, 0.1);
     p.addPoint(-0.3, -0.3);
     p.addPoint(0.3, 0.3);
     p.done();
     // Easiest: The path is wholly contains the georect
-    rect = new GeoRectangle(0.05, -0.05, -0.05, 0.05);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.05, -0.05, -0.05, 0.05);
     assertEquals(GeoArea.CONTAINS, rect.getRelationship(p));
     // Next easiest: Some endpoints of the rectangle are inside, and some are outside.
-    rect = new GeoRectangle(0.05, -0.05, -0.05, 0.5);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.05, -0.05, -0.05, 0.5);
     assertEquals(GeoArea.OVERLAPS, rect.getRelationship(p));
     // Now, all points are outside, but the figures intersect
-    rect = new GeoRectangle(0.05, -0.05, -0.5, 0.5);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.05, -0.05, -0.5, 0.5);
     assertEquals(GeoArea.OVERLAPS, rect.getRelationship(p));
     // Finally, all points are outside, and the figures *do not* intersect
-    rect = new GeoRectangle(0.5, -0.5, -0.5, 0.5);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.5, -0.5, -0.5, 0.5);
     assertEquals(GeoArea.WITHIN, rect.getRelationship(p));
     // Check that segment edge overlap detection works
-    rect = new GeoRectangle(0.1, 0.0, -0.1, 0.0);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.1, 0.0, -0.1, 0.0);
     assertEquals(GeoArea.OVERLAPS, rect.getRelationship(p));
-    rect = new GeoRectangle(0.2, 0.1, -0.2, -0.1);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.2, 0.1, -0.2, -0.1);
     assertEquals(GeoArea.DISJOINT, rect.getRelationship(p));
     // Check if overlap at endpoints behaves as expected next
-    rect = new GeoRectangle(0.5, -0.5, -0.5, -0.35);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.5, -0.5, -0.5, -0.35);
     assertEquals(GeoArea.OVERLAPS, rect.getRelationship(p));
-    rect = new GeoRectangle(0.5, -0.5, -0.5, -0.45);
+    rect = new GeoRectangle(PlanetModel.SPHERE, 0.5, -0.5, -0.5, -0.45);
     assertEquals(GeoArea.DISJOINT, rect.getRelationship(p));
 
   }
@@ -165,7 +167,7 @@
     GeoPath c;
     Bounds b;
 
-    c = new GeoPath(0.1);
+    c = new GeoPath(PlanetModel.SPHERE, 0.1);
     c.addPoint(-0.3, -0.3);
     c.addPoint(0.3, 0.3);
     c.done();
@@ -181,4 +183,14 @@
 
   }
 
+  @Test
+  public void testCoLinear() {
+    // p1: (12,-90), p2: (11, -55), (129, -90)
+    GeoPath p = new GeoPath(PlanetModel.SPHERE, 0.1);
+    p.addPoint(toRadians(-90), toRadians(12));//south pole
+    p.addPoint(toRadians(-55), toRadians(11));
+    p.addPoint(toRadians(-90), toRadians(129));//south pole again
+    p.done();//at least test this doesn't bomb like it used too -- LUCENE-6520
+  }
+
 }
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPointTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPointTest.java
new file mode 100644
index 0000000..1b59825
--- /dev/null
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPointTest.java
@@ -0,0 +1,56 @@
+package org.apache.lucene.spatial.spatial4j.geo3d;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.spatial4j.core.distance.DistanceUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomFloat;
+
+/**
+ * Test basic GeoPoint functionality.
+ */
+public class GeoPointTest extends LuceneTestCase {
+
+  @Test
+  public void testConversion() {
+    testPointRoundTrip(PlanetModel.SPHERE, 90, 0, 1e-12);
+    testPointRoundTrip(PlanetModel.SPHERE, -90, 0, 1e-12);
+    testPointRoundTrip(PlanetModel.WGS84, 90, 0, 1e-12);
+    testPointRoundTrip(PlanetModel.WGS84, -90, 0, 1e-12);
+
+    final int times = atLeast(100);
+    for (int i = 0; i < times; i++) {
+      final double pLat = (randomFloat() * 180.0 - 90.0) * DistanceUtils.DEGREES_TO_RADIANS;
+      final double pLon = (randomFloat() * 360.0 - 180.0) * DistanceUtils.DEGREES_TO_RADIANS;
+      testPointRoundTrip(PlanetModel.SPHERE, pLat, pLon, 1e-6);//1e-6 since there's a square root in there (Karl says)
+      testPointRoundTrip(PlanetModel.WGS84, pLat, pLon, 1e-6);
+    }
+  }
+
+  protected void testPointRoundTrip(PlanetModel planetModel, double pLat, double pLon, double epsilon) {
+    final GeoPoint p1 = new GeoPoint(planetModel, pLat, pLon);
+    final GeoPoint p2 = new GeoPoint(planetModel, p1.getLatitude(), p1.getLongitude());
+    double dist = p1.arcDistance(p2);
+    assertEquals(0, dist, epsilon);
+  }
+
+}
+
+
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonTest.java
index 87b26e8..b44ee2b 100755
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/GeoPolygonTest.java
@@ -36,49 +36,49 @@
     List<GeoPoint> points;
 
     points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(-0.1, -0.5));
-    points.add(new GeoPoint(0.0, -0.6));
-    points.add(new GeoPoint(0.1, -0.5));
-    points.add(new GeoPoint(0.0, -0.4));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
 
-    c = GeoPolygonFactory.makeGeoPolygon(points, 0);
+    c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points, 0);
     // Sample some points within
-    gp = new GeoPoint(0.0, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.55);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.55);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.45);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.45);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(-0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.05, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, -0.5);
     assertTrue(c.isWithin(gp));
     // Sample some nearby points outside
-    gp = new GeoPoint(0.0, -0.65);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.65);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.35);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.35);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(-0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.15, -0.5);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.15, -0.5);
     assertFalse(c.isWithin(gp));
     // Random points outside
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertFalse(c.isWithin(gp));
 
     points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(-0.1, -0.5));
-    points.add(new GeoPoint(-0.01, -0.6));
-    points.add(new GeoPoint(-0.1, -0.7));
-    points.add(new GeoPoint(0.0, -0.8));
-    points.add(new GeoPoint(0.1, -0.7));
-    points.add(new GeoPoint(0.01, -0.6));
-    points.add(new GeoPoint(0.1, -0.5));
-    points.add(new GeoPoint(0.0, -0.4));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.01, -0.6));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.7));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.8));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.7));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.01, -0.6));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
         
         /*
         System.out.println("Points: ");
@@ -87,33 +87,33 @@
         }
         */
 
-    c = GeoPolygonFactory.makeGeoPolygon(points, 0);
+    c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points, 0);
     // Sample some points within
-    gp = new GeoPoint(0.0, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.55);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.55);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.45);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.45);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(-0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.05, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.05, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.05, -0.5);
     assertTrue(c.isWithin(gp));
-    gp = new GeoPoint(0.0, -0.7);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.7);
     assertTrue(c.isWithin(gp));
     // Sample some nearby points outside
-    gp = new GeoPoint(0.0, -0.35);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, -0.35);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(-0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, -0.15, -0.5);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.15, -0.5);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.15, -0.5);
     assertFalse(c.isWithin(gp));
     // Random points outside
-    gp = new GeoPoint(0.0, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(Math.PI * 0.5, 0.0);
+    gp = new GeoPoint(PlanetModel.SPHERE, Math.PI * 0.5, 0.0);
     assertFalse(c.isWithin(gp));
-    gp = new GeoPoint(0.0, Math.PI);
+    gp = new GeoPoint(PlanetModel.SPHERE, 0.0, Math.PI);
     assertFalse(c.isWithin(gp));
 
   }
@@ -125,12 +125,12 @@
     List<GeoPoint> points;
 
     points = new ArrayList<GeoPoint>();
-    points.add(new GeoPoint(-0.1, -0.5));
-    points.add(new GeoPoint(0.0, -0.6));
-    points.add(new GeoPoint(0.1, -0.5));
-    points.add(new GeoPoint(0.0, -0.4));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
 
-    c = GeoPolygonFactory.makeGeoPolygon(points, 0);
+    c = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points, 0);
 
     b = c.getBounds(null);
     assertFalse(b.checkNoLongitudeBound());
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/PlaneTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/PlaneTest.java
index 5020f6e..a333737 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/PlaneTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/spatial4j/geo3d/PlaneTest.java
@@ -30,7 +30,7 @@
 
   @Test
   public void testIdenticalPlanes() {
-    final GeoPoint p = new GeoPoint(0.123, -0.456);
+    final GeoPoint p = new GeoPoint(PlanetModel.SPHERE, 0.123, -0.456);
     final Plane plane1 = new Plane(p, 0.0);
     final Plane plane2 = new Plane(p, 0.0);
     assertTrue(plane1.isNumericallyIdentical(plane2));
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java
index 4a1c30b..2bf4151 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionAnalyzer.java
@@ -17,22 +17,10 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.AnalyzerWrapper;
-import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.TokenStreamToAutomaton;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.automaton.Operations;
-import org.apache.lucene.util.automaton.Transition;
 
 /**
  * Wraps an {@link org.apache.lucene.analysis.Analyzer}
@@ -40,15 +28,15 @@
  * (e.g. preserving token separators, preserving position increments while converting
  * a token stream to an automaton)
  * <p>
- * Can be used to index {@link SuggestField}
- * and as a query analyzer to {@link SuggestIndexSearcher}
+ * Can be used to index {@link SuggestField} and {@link ContextSuggestField}
+ * and as a query analyzer to {@link PrefixCompletionQuery} amd {@link FuzzyCompletionQuery}
  * <p>
- * NOTE: In most cases, index and query analyzer should have same values for {@link #preservePositionIncrements}
- * and {@link #preserveSep}
+ * NOTE: In most cases, index and query analyzer should have same values for {@link #preservePositionIncrements()}
+ * and {@link #preserveSep()}
  *
  * @lucene.experimental
  */
-public class CompletionAnalyzer extends AnalyzerWrapper {
+public final class CompletionAnalyzer extends AnalyzerWrapper {
 
   /**
    * Represents the separation between tokens, if
@@ -64,7 +52,7 @@
    */
   final static int HOLE_CHARACTER = TokenStreamToAutomaton.HOLE;
 
-  final static int DEFAULT_MAX_GRAPH_EXPANSIONS = -1;
+  final static int DEFAULT_MAX_GRAPH_EXPANSIONS = Operations.DEFAULT_MAX_DETERMINIZED_STATES;
   final static boolean DEFAULT_PRESERVE_SEP = true;
   final static boolean DEFAULT_PRESERVE_POSITION_INCREMENTS = true;
 
@@ -133,6 +121,22 @@
     this(analyzer, DEFAULT_PRESERVE_SEP, DEFAULT_PRESERVE_POSITION_INCREMENTS, maxGraphExpansions);
   }
 
+  /**
+   * Returns true if separation between tokens are preserved when converting
+   * the token stream to an automaton
+   */
+  public boolean preserveSep() {
+    return preserveSep;
+  }
+
+  /**
+   * Returns true if position increments are preserved when converting
+   * the token stream to an automaton
+   */
+  public boolean preservePositionIncrements() {
+    return preservePositionIncrements;
+  }
+
   @Override
   protected Analyzer getWrappedAnalyzer(String fieldName) {
     return analyzer;
@@ -141,33 +145,7 @@
   @Override
   protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
     CompletionTokenStream tokenStream = new CompletionTokenStream(components.getTokenStream(),
-        preserveSep, preservePositionIncrements, SEP_LABEL, maxGraphExpansions);
+        preserveSep, preservePositionIncrements, maxGraphExpansions);
     return new TokenStreamComponents(components.getTokenizer(), tokenStream);
   }
-
-  /**
-   * Converts <code>key</code> to an automaton using
-   * {@link #preservePositionIncrements}, {@link #preserveSep}
-   * and {@link #maxGraphExpansions}
-   */
-  public Automaton toAutomaton(String field, CharSequence key) throws IOException {
-    for (int i = 0; i < key.length(); i++) {
-      switch (key.charAt(i)) {
-        case HOLE_CHARACTER:
-          throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved");
-        case SEP_LABEL:
-          throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved");
-        default:
-          break;
-      }
-    }
-
-    try (TokenStream tokenStream = analyzer.tokenStream(field, key.toString())) {
-      try(CompletionTokenStream stream = new CompletionTokenStream(tokenStream,
-          preserveSep, preservePositionIncrements, SEP_LABEL, maxGraphExpansions)) {
-        return stream.toAutomaton(tokenStream);
-      }
-    }
-  }
-
 }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
index da7ae5d..5654bb4 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
@@ -58,7 +58,7 @@
 final class CompletionFieldsConsumer extends FieldsConsumer {
 
   private final String delegatePostingsFormatName;
-  private final Map<String, Long> seenFields = new HashMap<>();
+  private final Map<String, CompletionMetaData> seenFields = new HashMap<>();
   private final SegmentWriteState state;
   private IndexOutput dictOut;
   private FieldsConsumer delegateFieldsConsumer;
@@ -98,7 +98,10 @@
       // store lookup, if needed
       long filePointer = dictOut.getFilePointer();
       if (termWriter.finish(dictOut)) {
-        seenFields.put(field, filePointer);
+        seenFields.put(field, new CompletionMetaData(filePointer,
+            termWriter.minWeight,
+            termWriter.maxWeight,
+            termWriter.type));
       }
     }
   }
@@ -124,10 +127,14 @@
       // write # of seen fields
       indexOut.writeVInt(seenFields.size());
       // write field numbers and dictOut offsets
-      for (Map.Entry<String, Long> seenField : seenFields.entrySet()) {
+      for (Map.Entry<String, CompletionMetaData> seenField : seenFields.entrySet()) {
         FieldInfo fieldInfo = state.fieldInfos.fieldInfo(seenField.getKey());
         indexOut.writeVInt(fieldInfo.number);
-        indexOut.writeVLong(seenField.getValue());
+        CompletionMetaData metaData = seenField.getValue();
+        indexOut.writeVLong(metaData.filePointer);
+        indexOut.writeVLong(metaData.minWeight);
+        indexOut.writeVLong(metaData.maxWeight);
+        indexOut.writeByte(metaData.type);
       }
       CodecUtil.writeFooter(indexOut);
       CodecUtil.writeFooter(dictOut);
@@ -140,17 +147,36 @@
     }
   }
 
+  private static class CompletionMetaData {
+    private final long filePointer;
+    private final long minWeight;
+    private final long maxWeight;
+    private final byte type;
+
+    private CompletionMetaData(long filePointer, long minWeight, long maxWeight, byte type) {
+      this.filePointer = filePointer;
+      this.minWeight = minWeight;
+      this.maxWeight = maxWeight;
+      this.type = type;
+    }
+  }
+
   // builds an FST based on the terms written
   private static class CompletionTermWriter {
 
     private PostingsEnum postingsEnum = null;
     private int docCount = 0;
+    private long maxWeight = 0;
+    private long minWeight = Long.MAX_VALUE;
+    private byte type;
+    private boolean first;
 
     private final BytesRefBuilder scratch = new BytesRefBuilder();
     private final NRTSuggesterBuilder builder;
 
     public CompletionTermWriter() {
       builder = new NRTSuggesterBuilder();
+      first = true;
     }
 
     /**
@@ -160,6 +186,9 @@
     public boolean finish(IndexOutput output) throws IOException {
       boolean stored = builder.store(output);
       assert stored || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
+      if (docCount == 0) {
+        minWeight = 0;
+      }
       return stored;
     }
 
@@ -181,7 +210,17 @@
           scratch.grow(len);
           scratch.setLength(len);
           input.readBytes(scratch.bytes(), 0, scratch.length());
-          builder.addEntry(docID, scratch.get(), input.readVLong() - 1);
+          long weight = input.readVInt() - 1;
+          maxWeight = Math.max(maxWeight, weight);
+          minWeight = Math.min(minWeight, weight);
+          byte type = input.readByte();
+          if (first) {
+            this.type = type;
+            first = false;
+          } else if (this.type != type) {
+            throw new IllegalArgumentException("single field name has mixed types");
+          }
+          builder.addEntry(docID, scratch.get(), weight);
         }
         docFreq++;
         docCount = Math.max(docCount, docFreq + 1);
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
index a205826..1ef3d5f 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
@@ -30,7 +30,6 @@
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FilterLeafReader;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.Terms;
@@ -98,9 +97,12 @@
       for (int i = 0; i < numFields; i++) {
         int fieldNumber = index.readVInt();
         long offset = index.readVLong();
+        long minWeight = index.readVLong();
+        long maxWeight = index.readVLong();
+        byte type = index.readByte();
         FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNumber);
         // we don't load the FST yet
-        readers.put(fieldInfo.name, new CompletionsTermsReader(offset));
+        readers.put(fieldInfo.name, new CompletionsTermsReader(dictIn, offset, minWeight, maxWeight, type));
       }
       CodecUtil.checkFooter(index);
       success = true;
@@ -161,7 +163,11 @@
 
   @Override
   public Terms terms(String field) throws IOException {
-    return new CompletionTerms(delegateFieldsProducer.terms(field), readers.get(field));
+    Terms terms = delegateFieldsProducer.terms(field) ;
+    if (terms == null) {
+      return null;
+    }
+    return new CompletionTerms(terms, readers.get(field));
   }
 
   @Override
@@ -169,60 +175,4 @@
     return readers.size();
   }
 
-  private class CompletionsTermsReader implements Accountable {
-    private final long offset;
-    private NRTSuggester suggester;
-
-    public CompletionsTermsReader(long offset) throws IOException {
-      assert offset >= 0l && offset < dictIn.length();
-      this.offset = offset;
-    }
-
-    public synchronized NRTSuggester suggester() throws IOException {
-      if (suggester == null) {
-        try (IndexInput dictClone = dictIn.clone()) { // let multiple fields load concurrently
-          dictClone.seek(offset);
-          suggester = NRTSuggester.load(dictClone);
-        }
-      }
-      return suggester;
-    }
-
-    @Override
-    public long ramBytesUsed() {
-      return (suggester != null) ? suggester.ramBytesUsed() : 0;
-    }
-
-    @Override
-    public Collection<Accountable> getChildResources() {
-      return Collections.emptyList();
-    }
-  }
-
-  /**
-   * Thin wrapper over {@link org.apache.lucene.index.Terms} with
-   * a {@link NRTSuggester}
-   */
-  public static class CompletionTerms extends FilterLeafReader.FilterTerms {
-
-    private final CompletionsTermsReader reader;
-
-    public CompletionTerms(Terms in, CompletionsTermsReader reader) {
-      super(in);
-      this.reader = reader;
-    }
-
-    /**
-     * Returns a {@link NRTSuggester} for the field
-     * or <code>null</code> if no FST
-     * was indexed for this field
-     */
-    public NRTSuggester suggester() throws IOException {
-      if (reader == null) {
-        return null;
-      }
-      return reader.suggester();
-    }
-  }
-
 }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java
index 89b87bc..d2e3e9f 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionPostingsFormat.java
@@ -68,9 +68,12 @@
  *   <li>CompletionIndex (.cmp) --&gt; Header, NumSuggestFields, Entry<sup>NumSuggestFields</sup>, Footer</li>
  *   <li>Header --&gt; {@link CodecUtil#writeHeader CodecHeader}</li>
  *   <li>NumSuggestFields --&gt; {@link DataOutput#writeVInt Uint32}</li>
- *   <li>Entry --&gt; FieldNumber, CompletionDictionaryOffset</li>
+ *   <li>Entry --&gt; FieldNumber, CompletionDictionaryOffset, MinWeight, MaxWeight, Type</li>
  *   <li>FieldNumber --&gt; {@link DataOutput#writeVInt Uint32}</li>
  *   <li>CompletionDictionaryOffset --&gt; {@link DataOutput#writeVLong  Uint64}</li>
+ *   <li>MinWeight --&gt; {@link DataOutput#writeVLong  Uint64}</li>
+ *   <li>MaxWeight --&gt; {@link DataOutput#writeVLong  Uint64}</li>
+ *   <li>Type --&gt; {@link DataOutput#writeByte  Byte}</li>
  *   <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
  * </ul>
  * <p>Notes:</p>
@@ -80,6 +83,8 @@
  *   <li>NumSuggestFields is the number of suggest fields indexed</li>
  *   <li>FieldNumber is the fields number from {@link FieldInfos}. (.fnm)</li>
  *   <li>CompletionDictionaryOffset is the file offset of a field's FST in CompletionDictionary (.lkp)</li>
+ *   <li>MinWeight and MaxWeight are the global minimum and maximum weight for the field</li>
+ *   <li>Type indicates if the suggester has context or not</li>
  * </ul>
  *
  * @lucene.experimental
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java
new file mode 100644
index 0000000..eb2ba22
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java
@@ -0,0 +1,170 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+
+import static org.apache.lucene.search.suggest.document.CompletionAnalyzer.HOLE_CHARACTER;
+import static org.apache.lucene.search.suggest.document.CompletionAnalyzer.SEP_LABEL;
+
+/**
+ * Abstract {@link Query} that match documents containing terms with a specified prefix
+ * filtered by {@link Filter}. This should be used to query against any {@link SuggestField}s
+ * or {@link ContextSuggestField}s of documents.
+ * <p>
+ * Use {@link SuggestIndexSearcher#suggest(CompletionQuery, int)} to execute any query
+ * that provides a concrete implementation of this query. Example below shows using this query
+ * to retrieve the top 5 documents.
+ *
+ * <pre class="prettyprint">
+ *  SuggestIndexSearcher searcher = new SuggestIndexSearcher(reader);
+ *  TopSuggestDocs suggestDocs = searcher.suggest(query, 5);
+ * </pre>
+ * This query rewrites to an appropriate {@link CompletionQuery} depending on the
+ * type ({@link SuggestField} or {@link ContextSuggestField}) of the field the query is run against.
+ *
+ * @lucene.experimental
+ */
+public abstract class CompletionQuery extends Query {
+
+  /**
+   * Term to query against
+   */
+  private final Term term;
+
+  /**
+   * Filter for document scoping
+   */
+  private final Filter filter;
+
+  /**
+   * Creates a base Completion query against a <code>term</code>
+   * with a <code>filter</code> to scope the documents
+   */
+  protected CompletionQuery(Term term, Filter filter) {
+    validate(term.text());
+    this.term = term;
+    this.filter = filter;
+  }
+
+  /**
+   * Returns the filter for the query, used to
+   * suggest completions on a subset of indexed documents
+   */
+  public Filter getFilter() {
+    return filter;
+  }
+
+  /**
+   * Returns the field name this query should
+   * be run against
+   */
+  public String getField() {
+    return term.field();
+  }
+
+  /**
+   * Returns the term to be queried against
+   */
+  public Term getTerm() {
+    return term;
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    byte type = 0;
+    boolean first = true;
+    Terms terms;
+    for (LeafReaderContext context : reader.leaves()) {
+      LeafReader leafReader = context.reader();
+      try {
+        if ((terms = leafReader.terms(getField())) == null) {
+          continue;
+        }
+      } catch (IOException e) {
+        continue;
+      }
+      if (terms instanceof CompletionTerms) {
+        CompletionTerms completionTerms = (CompletionTerms) terms;
+        byte t = completionTerms.getType();
+        if (first) {
+          type = t;
+          first = false;
+        } else if (type != t) {
+          throw new IllegalStateException(getField() + " has values of multiple types");
+        }
+      }
+    }
+
+    if (first == false) {
+      if (this instanceof ContextQuery) {
+        if (type == SuggestField.TYPE) {
+          throw new IllegalStateException(this.getClass().getSimpleName()
+              + " can not be executed against a non context-enabled SuggestField: "
+              + getField());
+        }
+      } else {
+        if (type == ContextSuggestField.TYPE) {
+          return new ContextQuery(this);
+        }
+      }
+    }
+    return this;
+  }
+
+  @Override
+  public String toString(String field) {
+    StringBuilder buffer = new StringBuilder();
+    if (!term.field().equals(field)) {
+      buffer.append(term.field());
+      buffer.append(":");
+    }
+    buffer.append(term.text());
+    buffer.append('*');
+    if (filter != null) {
+      buffer.append(",");
+      buffer.append("filter");
+      buffer.append(":");
+      buffer.append(filter.toString(field));
+    }
+    return buffer.toString();
+  }
+
+  private void validate(String termText) {
+    for (int i = 0; i < termText.length(); i++) {
+      switch (termText.charAt(i)) {
+        case HOLE_CHARACTER:
+          throw new IllegalArgumentException(
+              "Term text cannot contain HOLE character U+001E; this character is reserved");
+        case SEP_LABEL:
+          throw new IllegalArgumentException(
+              "Term text cannot contain unit separator character U+001F; this character is reserved");
+        default:
+          break;
+      }
+    }
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java
new file mode 100644
index 0000000..d1b6679
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionScorer.java
@@ -0,0 +1,103 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.automaton.Automaton;
+
+/**
+ * Expert: Responsible for executing the query against an
+ * appropriate suggester and collecting the results
+ * via a collector.
+ *
+ * {@link #score(LeafCollector, int, int)} is called
+ * for each leaf reader.
+ *
+ * {@link #accept(int)} and {@link #score(float, float)}
+ * is called for every matched completion (i.e. document)
+ *
+ * @lucene.experimental
+ */
+public class CompletionScorer extends BulkScorer {
+  private final NRTSuggester suggester;
+  private final Bits acceptDocs;
+
+  // values accessed by suggester
+  /** weight that created this scorer */
+  protected final CompletionWeight weight;
+  final LeafReader reader;
+  final boolean filtered;
+  final Automaton automaton;
+
+  /**
+   * Creates a scorer for a field-specific <code>suggester</code> scoped by <code>acceptDocs</code>
+   */
+  protected CompletionScorer(final CompletionWeight weight, final NRTSuggester suggester,
+                             final LeafReader reader, final Bits acceptDocs,
+                             final boolean filtered, final Automaton automaton) throws IOException {
+    this.weight = weight;
+    this.suggester = suggester;
+    this.reader = reader;
+    this.automaton = automaton;
+    this.filtered = filtered;
+    this.acceptDocs = acceptDocs;
+  }
+
+  @Override
+  public int score(LeafCollector collector, int min, int max) throws IOException {
+    if (!(collector instanceof TopSuggestDocsCollector)) {
+      throw new IllegalArgumentException("collector is not of type TopSuggestDocsCollector");
+    }
+    suggester.lookup(this, ((TopSuggestDocsCollector) collector));
+    return max;
+  }
+
+  @Override
+  public long cost() {
+    return 0;
+  }
+
+  /**
+   * Returns true if a document with <code>docID</code> is accepted,
+   * false if the docID maps to a deleted
+   * document or has been filtered out
+   */
+  public final boolean accept(int docID) {
+    return acceptDocs == null || acceptDocs.get(docID);
+  }
+
+  /**
+   * Returns the score for a matched completion
+   * based on the query time boost and the
+   * index time weight.
+   */
+  public float score(float weight, float boost) {
+    if (boost == 0f) {
+      return weight;
+    }
+    if (weight == 0f) {
+      return boost;
+    }
+    return weight * boost;
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java
new file mode 100644
index 0000000..6accac4
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java
@@ -0,0 +1,74 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.FilterLeafReader;
+import org.apache.lucene.index.Terms;
+
+/**
+ * Wrapped {@link org.apache.lucene.index.Terms}
+ * used by {@link SuggestField} and {@link ContextSuggestField}
+ * to access corresponding suggester and their attributes
+ *
+ * @lucene.experimental
+ */
+public final class CompletionTerms extends FilterLeafReader.FilterTerms {
+
+  private final CompletionsTermsReader reader;
+
+  /**
+   * Creates a completionTerms based on {@link CompletionsTermsReader}
+   */
+  CompletionTerms(Terms in, CompletionsTermsReader reader) {
+    super(in);
+    this.reader = reader;
+  }
+
+  /**
+   * Returns the type of FST, either {@link SuggestField#TYPE} or
+   * {@link ContextSuggestField#TYPE}
+   */
+  public byte getType() {
+    return (reader != null) ? reader.type : SuggestField.TYPE;
+  }
+
+  /**
+   * Returns the minimum weight of all entries in the weighted FST
+   */
+  public long getMinWeight() {
+    return (reader != null) ? reader.minWeight : 0;
+  }
+
+  /**
+   * Returns the maximum weight of all entries in the weighted FST
+   */
+  public long getMaxWeight() {
+    return (reader != null) ? reader.maxWeight : 0;
+  }
+
+  /**
+   * Returns a {@link NRTSuggester} for the field
+   * or <code>null</code> if no FST
+   * was indexed for this field
+   */
+  public NRTSuggester suggester() throws IOException {
+    return (reader != null) ? reader.suggester() : null;
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java
index 3acd713..d5adf68 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTokenStream.java
@@ -18,7 +18,7 @@
  */
 
 import java.io.IOException;
-import java.util.HashSet;
+import java.util.BitSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Set;
@@ -52,18 +52,18 @@
  * The token stream uses a {@link org.apache.lucene.analysis.tokenattributes.PayloadAttribute} to store
  * a completion's payload (see {@link CompletionTokenStream#setPayload(org.apache.lucene.util.BytesRef)})
  *
+ * @lucene.experimental
  */
-final class CompletionTokenStream extends TokenStream {
+public final class CompletionTokenStream extends TokenStream {
 
   private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);
   private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);
   private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);
 
   private final TokenStream input;
-  private final boolean preserveSep;
-  private final boolean preservePositionIncrements;
-  private final int sepLabel;
-  private final int maxGraphExpansions;
+  final boolean preserveSep;
+  final boolean preservePositionIncrements;
+  final int maxGraphExpansions;
 
   private BytesRef payload;
   private Iterator<IntsRef> finiteStrings;
@@ -77,29 +77,20 @@
    * The token stream <code>input</code> is converted to an automaton
    * with the default settings of {@link org.apache.lucene.search.suggest.document.CompletionAnalyzer}
    */
-  public CompletionTokenStream(TokenStream input) {
-    this(input, DEFAULT_PRESERVE_SEP, DEFAULT_PRESERVE_POSITION_INCREMENTS, SEP_LABEL, DEFAULT_MAX_GRAPH_EXPANSIONS);
+  CompletionTokenStream(TokenStream input) {
+    this(input, DEFAULT_PRESERVE_SEP, DEFAULT_PRESERVE_POSITION_INCREMENTS, DEFAULT_MAX_GRAPH_EXPANSIONS);
   }
 
-  CompletionTokenStream(TokenStream input, boolean preserveSep, boolean preservePositionIncrements, int sepLabel, int maxGraphExpansions) {
+  CompletionTokenStream(TokenStream input, boolean preserveSep, boolean preservePositionIncrements, int maxGraphExpansions) {
     // Don't call the super(input) ctor - this is a true delegate and has a new attribute source since we consume
     // the input stream entirely in toFiniteStrings(input)
     this.input = input;
     this.preserveSep = preserveSep;
     this.preservePositionIncrements = preservePositionIncrements;
-    this.sepLabel = sepLabel;
     this.maxGraphExpansions = maxGraphExpansions;
   }
 
   /**
-   * Returns a separator label that is reserved for the payload
-   * in {@link CompletionTokenStream#setPayload(org.apache.lucene.util.BytesRef)}
-   */
-  public int sepLabel() {
-    return sepLabel;
-  }
-
-  /**
    * Sets a payload available throughout successive token stream enumeration
    */
   public void setPayload(BytesRef payload) {
@@ -111,7 +102,7 @@
     clearAttributes();
     if (finiteStrings == null) {
       //TODO: make this return a Iterator<IntsRef> instead?
-      Automaton automaton = toAutomaton(input);
+      Automaton automaton = toAutomaton();
       Set<IntsRef> strings = Operations.getFiniteStrings(automaton, maxGraphExpansions);
 
       posInc = strings.size();
@@ -165,9 +156,17 @@
   }
 
   /**
-   * Converts <code>tokenStream</code> to an automaton
+   * Converts the token stream to an automaton,
+   * treating the transition labels as utf-8
    */
-  public Automaton toAutomaton(TokenStream tokenStream) throws IOException {
+  public Automaton toAutomaton() throws IOException {
+    return toAutomaton(false);
+  }
+
+  /**
+   * Converts the tokenStream to an automaton
+   */
+  public Automaton toAutomaton(boolean unicodeAware) throws IOException {
     // TODO refactor this
     // maybe we could hook up a modified automaton from TermAutomatonQuery here?
     Automaton automaton = null;
@@ -184,10 +183,11 @@
         tsta = new TokenStreamToAutomaton();
       }
       tsta.setPreservePositionIncrements(preservePositionIncrements);
+      tsta.setUnicodeArcs(unicodeAware);
 
-      automaton = tsta.toAutomaton(tokenStream);
+      automaton = tsta.toAutomaton(input);
     } finally {
-      IOUtils.closeWhileHandlingException(tokenStream);
+      IOUtils.closeWhileHandlingException(input);
     }
 
     // TODO: we can optimize this somewhat by determinizing
@@ -281,11 +281,12 @@
   }
 
   private static int[] topoSortStates(Automaton a) {
-    int[] states = new int[a.getNumStates()];
-    final Set<Integer> visited = new HashSet<>();
+    int numStates = a.getNumStates();
+    int[] states = new int[numStates];
+    final BitSet visited = new BitSet(numStates);
     final LinkedList<Integer> worklist = new LinkedList<>();
     worklist.add(0);
-    visited.add(0);
+    visited.set(0);
     int upto = 0;
     states[upto] = 0;
     upto++;
@@ -293,10 +294,10 @@
     while (worklist.size() > 0) {
       int s = worklist.removeFirst();
       int count = a.initTransition(s, t);
-      for (int i = 0; i < count; i++) {
+      for (int i=0;i<count;i++) {
         a.getNextTransition(t);
-        if (!visited.contains(t.dest)) {
-          visited.add(t.dest);
+        if (!visited.get(t.dest)) {
+          visited.set(t.dest);
           worklist.add(t.dest);
           states[upto++] = t.dest;
         }
@@ -305,21 +306,37 @@
     return states;
   }
 
-  public interface ByteTermAttribute extends TermToBytesRefAttribute {
+  /**
+   * Attribute providing access to the term builder and UTF-16 conversion
+   */
+  private interface ByteTermAttribute extends TermToBytesRefAttribute {
     // marker interface
 
     /**
-     * Return the builder from which the term is derived.
+     * Returns the builder from which the term is derived.
      */
-    public BytesRefBuilder builder();
+    BytesRefBuilder builder();
 
-    public CharSequence toUTF16();
+    /**
+     * Returns the term represented as UTF-16
+     */
+    CharSequence toUTF16();
   }
 
+  /**
+   * Custom attribute implementation for completion token stream
+   */
   public static final class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute, TermToBytesRefAttribute {
     private final BytesRefBuilder bytes = new BytesRefBuilder();
     private CharsRefBuilder charsRef;
 
+    /**
+     * Sole constructor
+     * no-op
+     */
+    public ByteTermAttributeImpl() {
+    }
+
     @Override
     public void fillBytesRef() {
       // does nothing - we change in place
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java
new file mode 100644
index 0000000..6c0ad30
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java
@@ -0,0 +1,160 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.automaton.Automaton;
+
+/**
+ * Expert: the Weight for CompletionQuery, used to
+ * score and explain these queries.
+ *
+ * Subclasses can override {@link #setNextMatch(IntsRef)},
+ * {@link #boost()} and {@link #context()}
+ * to calculate the boost and extract the context of
+ * a matched path prefix.
+ *
+ * @lucene.experimental
+ */
+public class CompletionWeight extends Weight {
+  private final CompletionQuery completionQuery;
+  private final Automaton automaton;
+
+  /**
+   * Creates a weight for <code>query</code> with an <code>automaton</code>,
+   * using the <code>reader</code> for index stats
+   */
+  public CompletionWeight(final CompletionQuery query, final Automaton automaton) throws IOException {
+    super(query);
+    this.completionQuery = query;
+    this.automaton = automaton;
+  }
+
+  /**
+   * Returns the automaton specified
+   * by the {@link CompletionQuery}
+   *
+   * @return query automaton
+   */
+  public Automaton getAutomaton() {
+    return automaton;
+  }
+
+  @Override
+  public BulkScorer bulkScorer(final LeafReaderContext context, Bits acceptDocs) throws IOException {
+    final LeafReader reader = context.reader();
+    final Terms terms;
+    final NRTSuggester suggester;
+    if ((terms = reader.terms(completionQuery.getField())) == null) {
+      return null;
+    }
+    if (terms instanceof CompletionTerms) {
+      CompletionTerms completionTerms = (CompletionTerms) terms;
+      if ((suggester = completionTerms.suggester()) == null) {
+        // a segment can have a null suggester
+        // i.e. no FST was built
+        return null;
+      }
+    } else {
+      throw new IllegalArgumentException(completionQuery.getField() + " is not a SuggestField");
+    }
+
+    DocIdSet docIdSet = null;
+    Filter filter = completionQuery.getFilter();
+    if (filter != null) {
+      docIdSet = filter.getDocIdSet(context, acceptDocs);
+      if (docIdSet == null || docIdSet.iterator() == null) {
+        // filter matches no docs in current leave
+        return null;
+      } else if (docIdSet.bits() == null) {
+        throw new IllegalArgumentException("DocIDSet does not provide random access interface");
+      }
+    }
+    Bits acceptDocBits = (docIdSet != null) ? docIdSet.bits() : acceptDocs;
+    return new CompletionScorer(this, suggester, reader, acceptDocBits, filter != null, automaton);
+  }
+
+  /**
+   * Set for every partial path in the index that matched the query
+   * automaton.
+   *
+   * Subclasses should override {@link #boost()} and {@link #context()}
+   * to return an appropriate value with respect to the current pathPrefix.
+   *
+   * @param pathPrefix the prefix of a matched path
+   */
+  protected void setNextMatch(IntsRef pathPrefix) {
+  }
+
+  /**
+   * Returns the boost of the partial path set by {@link #setNextMatch(IntsRef)}
+   *
+   * @return suggestion query-time boost
+   */
+  protected float boost() {
+    return 0;
+  }
+
+  /**
+   * Returns the context of the partial path set by {@link #setNextMatch(IntsRef)}
+   *
+   * @return suggestion context
+   */
+  protected CharSequence context() {
+    return null;
+  }
+
+  @Override
+  public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void extractTerms(Set<Term> terms) {
+    // no-op
+  }
+
+  @Override
+  public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+    //TODO
+    return null;
+  }
+
+  @Override
+  public float getValueForNormalization() throws IOException {
+    return 0;
+  }
+
+  @Override
+  public void normalize(float norm, float topLevelBoost) {
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java
new file mode 100644
index 0000000..35e2d46
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionsTermsReader.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.Accountable;
+
+/**
+ * Holder for suggester and field-level info
+ * for a suggest field
+ *
+ * @lucene.experimental
+ */
+public final class CompletionsTermsReader implements Accountable {
+  /** Minimum entry weight for the suggester */
+  public final long minWeight;
+  /** Maximum entry weight for the suggester */
+  public final long maxWeight;
+  /** type of suggester (context-enabled or not) */
+  public final byte type;
+  private final IndexInput dictIn;
+  private final long offset;
+
+  private NRTSuggester suggester;
+
+  /**
+   * Creates a CompletionTermsReader to load a field-specific suggester
+   * from the index <code>dictIn</code> with <code>offset</code>
+   */
+  CompletionsTermsReader(IndexInput dictIn, long offset, long minWeight, long maxWeight, byte type) throws IOException {
+    assert minWeight <= maxWeight;
+    assert offset >= 0l && offset < dictIn.length();
+    this.dictIn = dictIn;
+    this.offset = offset;
+    this.minWeight = minWeight;
+    this.maxWeight = maxWeight;
+    this.type = type;
+  }
+
+  /**
+   * Returns the suggester for a field, if not loaded already, loads
+   * the appropriate suggester from CompletionDictionary
+   */
+  public synchronized NRTSuggester suggester() throws IOException {
+    if (suggester == null) {
+      try (IndexInput dictClone = dictIn.clone()) { // let multiple fields load concurrently
+        dictClone.seek(offset);
+        suggester = NRTSuggester.load(dictClone);
+      }
+    }
+    return suggester;
+  }
+
+  @Override
+  public long ramBytesUsed() {
+    return (suggester != null) ? suggester.ramBytesUsed() : 0;
+  }
+
+  @Override
+  public Collection<Accountable> getChildResources() {
+    return Collections.emptyList();
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
new file mode 100644
index 0000000..1abbb4f
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
@@ -0,0 +1,305 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeSet;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.RegExp;
+import org.apache.lucene.util.fst.Util;
+
+/**
+ * A {@link CompletionQuery} that match documents specified by
+ * a wrapped {@link CompletionQuery} supporting boosting and/or filtering
+ * by specified contexts.
+ * <p>
+ * Use this query against {@link ContextSuggestField}
+ * <p>
+ * Example of using a {@link CompletionQuery} with boosted
+ * contexts:
+ * <pre class="prettyprint">
+ *  CompletionQuery completionQuery = ...;
+ *  ContextQuery query = new ContextQuery(completionQuery);
+ *  query.addContext("context1", 2);
+ *  query.addContext("context2", 1);
+ * </pre>
+ * <p>
+ * NOTE:
+ * <ul>
+ *   <li>
+ *    This query can be constructed with
+ *    {@link PrefixCompletionQuery}, {@link RegexCompletionQuery}
+ *    or {@link FuzzyCompletionQuery} query.
+ *   </li>
+ *   <li>
+ *     To suggest across all contexts with the same boost,
+ *     use '*' as the context in {@link #addContext(CharSequence)})}.
+ *     This can be combined with specific contexts with different boosts.
+ *   </li>
+ *   <li>
+ *     To apply the same boost to multiple contexts sharing the same prefix,
+ *     Use {@link #addContext(CharSequence, float, boolean)} with the common
+ *     context prefix, boost and set <code>exact</code> to false.
+ *   <li>
+ *     Using this query against a {@link SuggestField} (not context enabled),
+ *     would yield results ignoring any context filtering/boosting
+ *   </li>
+ * </ul>
+ *
+ * @lucene.experimental
+ */
+public class ContextQuery extends CompletionQuery {
+  private Map<CharSequence, ContextMetaData> contexts;
+  /** Inner completion query */
+  protected CompletionQuery query;
+
+  /**
+   * Constructs a context completion query that matches
+   * documents specified by <code>query</code>.
+   * <p>
+   * Use {@link #addContext(CharSequence, float, boolean)}
+   * to add context(s) with boost
+   */
+  public ContextQuery(CompletionQuery query) {
+    super(query.getTerm(), query.getFilter());
+    if (query instanceof ContextQuery) {
+      throw new IllegalArgumentException("'query' parameter must not be of type "
+              + this.getClass().getSimpleName());
+    }
+    this.query = query;
+    contexts = new HashMap<>();
+  }
+
+  /**
+   * Adds an exact context with default boost of 1
+   */
+  public void addContext(CharSequence context) {
+    addContext(context, 1f, true);
+  }
+
+  /**
+   * Adds an exact context with boost
+   */
+  public void addContext(CharSequence context, float boost) {
+    addContext(context, boost, true);
+  }
+
+  /**
+   * Adds a context with boost, set <code>exact</code> to false
+   * if the context is a prefix of any indexed contexts
+   */
+  public void addContext(CharSequence context, float boost, boolean exact) {
+    if (boost < 0f) {
+      throw new IllegalArgumentException("'boost' must be >= 0");
+    }
+    for (int i = 0; i < context.length(); i++) {
+      if (ContextSuggestField.CONTEXT_SEPARATOR == context.charAt(i)) {
+        throw new IllegalArgumentException("Illegal value [" + context + "] UTF-16 codepoint [0x"
+            + Integer.toHexString((int) context.charAt(i))+ "] at position " + i + " is a reserved character");
+      }
+    }
+    contexts.put(context, new ContextMetaData(boost, exact));
+  }
+
+  @Override
+  public String toString(String field) {
+    StringBuilder buffer = new StringBuilder();
+    for (CharSequence context : contexts.keySet()) {
+      if (buffer.length() != 0) {
+        buffer.append(",");
+      } else {
+        buffer.append("contexts");
+        buffer.append(":[");
+      }
+      buffer.append(context);
+      ContextMetaData metaData = contexts.get(context);
+      if (metaData.exact == false) {
+        buffer.append("*");
+      }
+      if (metaData.boost != 0) {
+        buffer.append("^");
+        buffer.append(Float.toString(metaData.boost));
+      }
+    }
+    if (buffer.length() != 0) {
+      buffer.append("]");
+      buffer.append(",");
+    }
+    return buffer.toString() + query.toString(field);
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+    IntsRefBuilder scratch = new IntsRefBuilder();
+    final Map<IntsRef, Float> contextMap = new HashMap<>(contexts.size());
+    final TreeSet<Integer> contextLengths = new TreeSet<>();
+    final CompletionWeight innerWeight = ((CompletionWeight) query.createWeight(searcher, needsScores));
+    Automaton contextsAutomaton = null;
+    Automaton gap = Automata.makeChar(ContextSuggestField.CONTEXT_SEPARATOR);
+    // if separators are preserved the fst contains a SEP_LABEL
+    // behind each gap. To have a matching automaton, we need to
+    // include the SEP_LABEL in the query as well
+    gap = Operations.concatenate(gap, Operations.optional(Automata.makeChar(CompletionAnalyzer.SEP_LABEL)));
+    final Automaton prefixAutomaton = Operations.concatenate(gap, innerWeight.getAutomaton());
+    final Automaton matchAllAutomaton = new RegExp(".*").toAutomaton();
+    for (Map.Entry<CharSequence, ContextMetaData> entry : contexts.entrySet()) {
+      Automaton contextAutomaton;
+      if (entry.getKey().equals("*")) {
+        contextAutomaton = Operations.concatenate(matchAllAutomaton, prefixAutomaton);
+      } else {
+        BytesRef ref = new BytesRef(entry.getKey());
+        ContextMetaData contextMetaData = entry.getValue();
+        contextMap.put(IntsRef.deepCopyOf(Util.toIntsRef(ref, scratch)), contextMetaData.boost);
+        contextLengths.add(scratch.length());
+        contextAutomaton = Automata.makeString(entry.getKey().toString());
+        if (contextMetaData.exact) {
+          contextAutomaton = Operations.concatenate(contextAutomaton, prefixAutomaton);
+        } else {
+          contextAutomaton = Operations.concatenate(Arrays.asList(contextAutomaton,
+              matchAllAutomaton,
+              prefixAutomaton));
+        }
+      }
+      if (contextsAutomaton == null) {
+        contextsAutomaton = contextAutomaton;
+      } else {
+        contextsAutomaton = Operations.union(contextsAutomaton, contextAutomaton);
+      }
+    }
+    if (contexts.size() == 0) {
+      addContext("*");
+      contextsAutomaton = Operations.concatenate(matchAllAutomaton, prefixAutomaton);
+    }
+    contextsAutomaton = Operations.determinize(contextsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
+    int[] contextLengthArray = new int[contextLengths.size()];
+    final Iterator<Integer> iterator = contextLengths.descendingIterator();
+    for (int i = 0; iterator.hasNext(); i++) {
+      contextLengthArray[i] = iterator.next();
+    }
+    return new ContextCompletionWeight(this, contextsAutomaton, innerWeight, contextMap, contextLengthArray);
+  }
+
+  private static class ContextMetaData {
+    private final float boost;
+    private final boolean exact;
+
+    private ContextMetaData(float boost, boolean exact) {
+      this.boost = boost;
+      this.exact = exact;
+    }
+  }
+
+  private class ContextCompletionWeight extends CompletionWeight {
+
+    private final Map<IntsRef, Float> contextMap;
+    private final int[] contextLengths;
+    private final CompletionWeight innerWeight;
+    private final BytesRefBuilder scratch = new BytesRefBuilder();
+
+    private float currentBoost;
+    private CharSequence currentContext;
+
+    public ContextCompletionWeight(CompletionQuery query, Automaton automaton, CompletionWeight innerWeight,
+                                   Map<IntsRef, Float> contextMap,
+                                   int[] contextLengths) throws IOException {
+      super(query, automaton);
+      this.contextMap = contextMap;
+      this.contextLengths = contextLengths;
+      this.innerWeight = innerWeight;
+    }
+
+    @Override
+    protected void setNextMatch(IntsRef pathPrefix) {
+      IntsRef ref = pathPrefix.clone();
+
+      // check if the pathPrefix matches any
+      // defined context, longer context first
+      for (int contextLength : contextLengths) {
+        if (contextLength > pathPrefix.length) {
+          continue;
+        }
+        ref.length = contextLength;
+        if (contextMap.containsKey(ref)) {
+          currentBoost = contextMap.get(ref);
+          ref.length = pathPrefix.length;
+          ref.offset = contextLength;
+          while (ref.ints[ref.offset] != ContextSuggestField.CONTEXT_SEPARATOR) {
+            ref.offset++;
+            assert ref.offset < ref.length;
+          }
+          assert ref.ints[ref.offset] == ContextSuggestField.CONTEXT_SEPARATOR :
+              "expected CONTEXT_SEPARATOR at offset=" + ref.offset;
+          if (ref.offset > pathPrefix.offset) {
+            currentContext = Util.toBytesRef(new IntsRef(pathPrefix.ints, pathPrefix.offset, ref.offset), scratch).utf8ToString();
+          } else {
+            currentContext = null;
+          }
+          ref.offset++;
+          if (ref.ints[ref.offset] == CompletionAnalyzer.SEP_LABEL) {
+            ref.offset++;
+          }
+          innerWeight.setNextMatch(ref);
+          return;
+        }
+      }
+      // unknown context
+      ref.length = pathPrefix.length;
+      currentBoost = contexts.get("*").boost;
+      for (int i = pathPrefix.offset; i < pathPrefix.length; i++) {
+        if (pathPrefix.ints[i] == ContextSuggestField.CONTEXT_SEPARATOR) {
+          if (i > pathPrefix.offset) {
+            currentContext = Util.toBytesRef(new IntsRef(pathPrefix.ints, pathPrefix.offset, i), scratch).utf8ToString();
+          } else {
+            currentContext = null;
+          }
+          ref.offset = ++i;
+          assert ref.offset < ref.length : "input should not end with the context separator";
+          if (pathPrefix.ints[i] == CompletionAnalyzer.SEP_LABEL) {
+            ref.offset++;
+            assert ref.offset < ref.length : "input should not end with a context separator followed by SEP_LABEL";
+          }
+          ref.length -= ref.offset;
+          innerWeight.setNextMatch(ref);
+        }
+      }
+    }
+
+    @Override
+    protected CharSequence context() {
+      return currentContext;
+    }
+
+    @Override
+    protected float boost() {
+      return currentBoost + innerWeight.boost();
+    }
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java
new file mode 100644
index 0000000..dd262e4
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java
@@ -0,0 +1,165 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+
+/**
+ * {@link SuggestField} which additionally takes in a set of
+ * contexts. Example usage of adding a suggestion with contexts is as follows:
+ *
+ * <pre class="prettyprint">
+ *  document.add(
+ *   new ContextSuggestField(name, "suggestion", Arrays.asList("context1", "context2"),  4));
+ * </pre>
+ *
+ * Use {@link ContextQuery} to boost and/or filter suggestions
+ * at query-time. Use {@link PrefixCompletionQuery}, {@link RegexCompletionQuery}
+ * or {@link FuzzyCompletionQuery} if context boost/filtering
+ * are not needed.
+ *
+ * @lucene.experimental
+ */
+public class ContextSuggestField extends SuggestField {
+
+  /**
+   * Separator used between context value and the suggest field value
+   */
+  public static final int CONTEXT_SEPARATOR = '\u001D';
+  static final byte TYPE = 1;
+
+  private final Set<CharSequence> contexts;
+
+  /**
+   * Creates a context-enabled suggest field
+   *
+   * @param name field name
+   * @param value field value to get suggestion on
+   * @param weight field weight
+   * @param contexts associated contexts
+   *
+   * @throws IllegalArgumentException if either the name or value is null,
+   * if value is an empty string, if the weight is negative, if value or
+   * contexts contains any reserved characters
+   */
+  public ContextSuggestField(String name, String value, int weight, CharSequence... contexts) {
+    super(name, value, weight);
+    validate(value);
+    this.contexts = new HashSet<>((contexts != null) ? contexts.length : 0);
+    if (contexts != null) {
+      for (CharSequence context : contexts) {
+        validate(context);
+        this.contexts.add(context);
+      }
+    }
+  }
+
+  @Override
+  protected CompletionTokenStream wrapTokenStream(TokenStream stream) {
+    CompletionTokenStream completionTokenStream;
+    if (stream instanceof CompletionTokenStream) {
+      completionTokenStream = (CompletionTokenStream) stream;
+      completionTokenStream = new CompletionTokenStream(
+          new PrefixTokenFilter(stream, (char) CONTEXT_SEPARATOR, contexts),
+          completionTokenStream.preserveSep,
+          completionTokenStream.preservePositionIncrements,
+          completionTokenStream.maxGraphExpansions);
+    } else {
+      completionTokenStream = new CompletionTokenStream(
+          new PrefixTokenFilter(stream, (char) CONTEXT_SEPARATOR, contexts));
+    }
+    return completionTokenStream;
+  }
+
+  @Override
+  protected byte type() {
+    return TYPE;
+  }
+
+  /**
+   * The {@link PrefixTokenFilter} wraps a {@link TokenStream} and adds a set
+   * prefixes ahead. The position attribute will not be incremented for the prefixes.
+   */
+  private static final class PrefixTokenFilter extends TokenFilter {
+
+    private final char separator;
+    private final CharTermAttribute termAttr = addAttribute(CharTermAttribute.class);
+    private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);
+    private final Iterable<CharSequence> prefixes;
+
+    private Iterator<CharSequence> currentPrefix;
+
+    /**
+     * Create a new {@link PrefixTokenFilter}
+     *
+     * @param input {@link TokenStream} to wrap
+     * @param separator Character used separate prefixes from other tokens
+     * @param prefixes {@link Iterable} of {@link CharSequence} which keeps all prefixes
+     */
+    public PrefixTokenFilter(TokenStream input, char separator, Iterable<CharSequence> prefixes) {
+      super(input);
+      this.prefixes = prefixes;
+      this.currentPrefix = null;
+      this.separator = separator;
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (currentPrefix != null) {
+        if (!currentPrefix.hasNext()) {
+          return input.incrementToken();
+        } else {
+          posAttr.setPositionIncrement(0);
+        }
+      } else {
+        currentPrefix = prefixes.iterator();
+        termAttr.setEmpty();
+        posAttr.setPositionIncrement(1);
+      }
+      termAttr.setEmpty();
+      if (currentPrefix.hasNext()) {
+        termAttr.append(currentPrefix.next());
+      }
+      termAttr.append(separator);
+      return true;
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      currentPrefix = null;
+    }
+  }
+
+  private void validate(final CharSequence value) {
+    for (int i = 0; i < value.length(); i++) {
+      if (CONTEXT_SEPARATOR == value.charAt(i)) {
+        throw new IllegalArgumentException("Illegal value [" + value + "] UTF-16 codepoint [0x"
+            + Integer.toHexString((int) value.charAt(i))+ "] at position " + i + " is a reserved character");
+      }
+    }
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java
new file mode 100644
index 0000000..3489815
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java
@@ -0,0 +1,252 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.LevenshteinAutomata;
+import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.UTF32ToUTF8;
+
+/**
+ * A {@link CompletionQuery} that match documents containing terms
+ * within an edit distance of the specified prefix.
+ * <p>
+ * This query boost documents relative to how similar the indexed terms are to the
+ * provided prefix.
+ * <p>
+ * Example usage of querying an analyzed prefix within an edit distance of 1 of 'subg'
+ * against a field 'suggest_field' is as follows:
+ *
+ * <pre class="prettyprint">
+ *  CompletionQuery query = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "subg"));
+ * </pre>
+ *
+ * @lucene.experimental
+ */
+public class FuzzyCompletionQuery extends PrefixCompletionQuery {
+
+  /**
+   * Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix
+   * parameters in Unicode code points (actual letters)
+   * instead of bytes.
+   * */
+  public static final boolean DEFAULT_UNICODE_AWARE = false;
+
+  /**
+   * The default minimum length of the key before any edits are allowed.
+   */
+  public static final int DEFAULT_MIN_FUZZY_LENGTH = 3;
+
+  /**
+   * The default prefix length where edits are not allowed.
+   */
+  public static final int DEFAULT_NON_FUZZY_PREFIX = 1;
+
+  /**
+   * The default maximum number of edits for fuzzy
+   * suggestions.
+   */
+  public static final int DEFAULT_MAX_EDITS = 1;
+
+  /**
+   * The default transposition value passed to {@link LevenshteinAutomata}
+   */
+  public static final boolean DEFAULT_TRANSPOSITIONS = true;
+
+  private final int maxEdits;
+  private final boolean transpositions;
+  private final int nonFuzzyPrefix;
+  private final int minFuzzyLength;
+  private final boolean unicodeAware;
+  private final int maxDeterminizedStates;
+
+  /**
+   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, Filter)}
+   * with no filter
+   */
+  public FuzzyCompletionQuery(Analyzer analyzer, Term term) {
+    this(analyzer, term, null);
+  }
+
+  /**
+   * Calls {@link FuzzyCompletionQuery#FuzzyCompletionQuery(Analyzer, Term, Filter,
+   * int, boolean, int, int, boolean, int)}
+   * with defaults for <code>maxEdits</code>, <code>transpositions</code>,
+   * <code>nonFuzzyPrefix</code>, <code>minFuzzyLength</code>,
+   * <code>unicodeAware</code> and <code>maxDeterminizedStates</code>
+   *
+   * See {@link #DEFAULT_MAX_EDITS}, {@link #DEFAULT_TRANSPOSITIONS},
+   * {@link #DEFAULT_NON_FUZZY_PREFIX}, {@link #DEFAULT_MIN_FUZZY_LENGTH},
+   * {@link #DEFAULT_UNICODE_AWARE} and {@link Operations#DEFAULT_MAX_DETERMINIZED_STATES}
+   * for defaults
+   */
+  public FuzzyCompletionQuery(Analyzer analyzer, Term term, Filter filter) {
+    this(analyzer, term, filter, DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS, DEFAULT_NON_FUZZY_PREFIX,
+        DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE, Operations.DEFAULT_MAX_DETERMINIZED_STATES
+    );
+  }
+
+  /**
+   * Constructs an analyzed fuzzy prefix completion query
+   *
+   * @param analyzer used to analyze the provided {@link Term#text()}
+   * @param term query is run against {@link Term#field()} and {@link Term#text()}
+   *             is analyzed with <code>analyzer</code>
+   * @param filter used to query on a sub set of documents
+   * @param maxEdits maximum number of acceptable edits
+   * @param transpositions value passed to {@link LevenshteinAutomata}
+   * @param nonFuzzyPrefix prefix length where edits are not allowed
+   * @param minFuzzyLength minimum prefix length before any edits are allowed
+   * @param unicodeAware treat prefix as unicode rather than bytes
+   * @param maxDeterminizedStates maximum automaton states allowed for {@link LevenshteinAutomata}
+   */
+  public FuzzyCompletionQuery(Analyzer analyzer, Term term, Filter filter, int maxEdits,
+                              boolean transpositions, int nonFuzzyPrefix, int minFuzzyLength,
+                              boolean unicodeAware, int maxDeterminizedStates) {
+    super(analyzer, term, filter);
+    this.maxEdits = maxEdits;
+    this.transpositions = transpositions;
+    this.nonFuzzyPrefix = nonFuzzyPrefix;
+    this.minFuzzyLength = minFuzzyLength;
+    this.unicodeAware = unicodeAware;
+    this.maxDeterminizedStates = maxDeterminizedStates;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+    CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text());
+    Automaton a = stream.toAutomaton(unicodeAware);
+    final Set<IntsRef> refs = Operations.getFiniteStrings(a, -1);
+    assert refs.size() > 0;
+    Automaton automaton = toLevenshteinAutomata(refs);
+    if (unicodeAware) {
+      Automaton utf8automaton = new UTF32ToUTF8().convert(automaton);
+      utf8automaton = Operations.determinize(utf8automaton, maxDeterminizedStates);
+      automaton = utf8automaton;
+    }
+    return new FuzzyCompletionWeight(this, automaton, refs);
+  }
+
+  private Automaton toLevenshteinAutomata(Set<IntsRef> ref) {
+    Automaton subs[] = new Automaton[ref.size()];
+    int upto = 0;
+    for (IntsRef path : ref) {
+      if (path.length <= nonFuzzyPrefix || path.length < minFuzzyLength) {
+        subs[upto] = Automata.makeString(path.ints, path.offset, path.length);
+        upto++;
+      } else {
+        int ints[] = new int[path.length - nonFuzzyPrefix];
+        System.arraycopy(path.ints, path.offset + nonFuzzyPrefix, ints, 0, ints.length);
+        // TODO: maybe add alphaMin to LevenshteinAutomata,
+        // and pass 1 instead of 0?  We probably don't want
+        // to allow the trailing dedup bytes to be
+        // edited... but then 0 byte is "in general" allowed
+        // on input (but not in UTF8).
+        LevenshteinAutomata lev = new LevenshteinAutomata(ints,
+            unicodeAware ? Character.MAX_CODE_POINT : 255,
+            transpositions);
+        subs[upto] = lev.toAutomaton(maxEdits,
+            UnicodeUtil.newString(path.ints, path.offset, nonFuzzyPrefix));
+        upto++;
+      }
+    }
+
+    if (subs.length == 0) {
+      // automaton is empty, there is no accepted paths through it
+      return Automata.makeEmpty(); // matches nothing
+    } else if (subs.length == 1) {
+      // no synonyms or anything: just a single path through the tokenstream
+      return subs[0];
+    } else {
+      // multiple paths: this is really scary! is it slow?
+      // maybe we should not do this and throw UOE?
+      Automaton a = Operations.union(Arrays.asList(subs));
+      // TODO: we could call toLevenshteinAutomata() before det?
+      // this only happens if you have multiple paths anyway (e.g. synonyms)
+      return Operations.determinize(a, maxDeterminizedStates);
+    }
+  }
+
+  @Override
+  public String toString(String field) {
+    StringBuilder buffer = new StringBuilder();
+    if (!getField().equals(field)) {
+      buffer.append(getField());
+      buffer.append(":");
+    }
+    buffer.append(getTerm().text());
+    buffer.append('*');
+    buffer.append('~');
+    buffer.append(Integer.toString(maxEdits));
+    if (getFilter() != null) {
+      buffer.append(",");
+      buffer.append("filter");
+      buffer.append(getFilter().toString(field));
+    }
+    return buffer.toString();
+  }
+
+  private static class FuzzyCompletionWeight extends CompletionWeight {
+    private final Set<IntsRef> refs;
+    int currentBoost = 0;
+
+    public FuzzyCompletionWeight(CompletionQuery query, Automaton automaton, Set<IntsRef> refs) throws IOException {
+      super(query, automaton);
+      this.refs = refs;
+    }
+
+    @Override
+    protected void setNextMatch(IntsRef pathPrefix) {
+      // NOTE: the last letter of the matched prefix for the exact
+      // match never makes it through here
+      // so an exact match and a match with only a edit at the
+      // end is boosted the same
+      int maxCount = 0;
+      for (IntsRef ref : refs) {
+        int minLength = Math.min(ref.length, pathPrefix.length);
+        int count = 0;
+        for (int i = 0; i < minLength; i++) {
+          if (ref.ints[i + ref.offset] == pathPrefix.ints[i + pathPrefix.offset]) {
+            count++;
+          } else {
+            break;
+          }
+        }
+        maxCount = Math.max(maxCount, count);
+      }
+      currentBoost = maxCount;
+    }
+
+    @Override
+    protected float boost() {
+      return currentBoost;
+    }
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
index a014d04..6bbab8d 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
@@ -23,19 +23,13 @@
 import java.util.Comparator;
 import java.util.List;
 
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.search.CollectionTerminatedException;
-import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.suggest.analyzing.FSTUtil;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.ByteArrayDataOutput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.IntsRef;
-import org.apache.lucene.util.automaton.Automaton;
 import org.apache.lucene.util.fst.ByteSequenceOutputs;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.PairOutputs;
@@ -48,18 +42,11 @@
 
 /**
  * <p>
- * NRTSuggester returns Top N completions with corresponding documents matching a provided automaton.
- * The completions are returned in descending order of their corresponding weight.
- * Deleted documents are filtered out in near real time using the provided reader.
- * A {@link org.apache.lucene.search.DocIdSet} can be passed in at query time to filter out documents.
- * </p>
+ * NRTSuggester executes Top N search on a weighted FST specified by a {@link CompletionScorer}
  * <p>
- * See {@link #lookup(LeafReader, Automaton, int, DocIdSet, TopSuggestDocsCollector)} for more implementation
+ * See {@link #lookup(CompletionScorer, TopSuggestDocsCollector)} for more implementation
  * details.
  * <p>
- * Builder: {@link NRTSuggesterBuilder}
- * </p>
- * <p>
  * FST Format:
  * <ul>
  *   <li>Input: analyzed forms of input terms</li>
@@ -68,16 +55,17 @@
  * <p>
  * NOTE:
  * <ul>
- *   <li>currently only {@link org.apache.lucene.search.DocIdSet} with random access capabilities are supported.</li>
  *   <li>having too many deletions or using a very restrictive filter can make the search inadmissible due to
- *     over-pruning of potential paths</li>
- *   <li>when a {@link org.apache.lucene.search.DocIdSet} is used, it is assumed that the filter will roughly
- *     filter out half the number of documents that match the provided automaton</li>
+ *     over-pruning of potential paths. See {@link CompletionScorer#accept(int)}</li>
+ *   <li>when matched documents are arbitrarily filtered ({@link CompletionScorer#filtered} set to <code>true</code>,
+ *     it is assumed that the filter will roughly filter out half the number of documents that match
+ *     the provided automaton</li>
  *   <li>lookup performance will degrade as more accepted completions lead to filtered out documents</li>
  * </ul>
  *
+ * @lucene.experimental
  */
-final class NRTSuggester implements Accountable {
+public final class NRTSuggester implements Accountable {
 
   /**
    * FST<Weight,Surface>:
@@ -103,23 +91,16 @@
   private final int payloadSep;
 
   /**
-   * Label used to denote the end of an input in the FST and
-   * the beginning of dedup bytes
-   */
-  private final int endByte;
-
-  /**
    * Maximum queue depth for TopNSearcher
    *
    * NOTE: value should be <= Integer.MAX_VALUE
    */
-  private static final long MAX_TOP_N_QUEUE_SIZE = 1000;
+  private static final long MAX_TOP_N_QUEUE_SIZE = 5000;
 
-  private NRTSuggester(FST<Pair<Long, BytesRef>> fst, int maxAnalyzedPathsPerOutput, int payloadSep, int endByte) {
+  private NRTSuggester(FST<Pair<Long, BytesRef>> fst, int maxAnalyzedPathsPerOutput, int payloadSep) {
     this.fst = fst;
     this.maxAnalyzedPathsPerOutput = maxAnalyzedPathsPerOutput;
     this.payloadSep = payloadSep;
-    this.endByte = endByte;
   }
 
   @Override
@@ -132,6 +113,81 @@
     return Collections.emptyList();
   }
 
+  /**
+   * Collects at most {@link TopSuggestDocsCollector#getCountToCollect()} completions that
+   * match the provided {@link CompletionScorer}.
+   * <p>
+   * The {@link CompletionScorer#automaton} is intersected with the {@link #fst}.
+   * {@link CompletionScorer#weight} is used to compute boosts and/or extract context
+   * for each matched partial paths. A top N search is executed on {@link #fst} seeded with
+   * the matched partial paths. Upon reaching a completed path, {@link CompletionScorer#accept(int)}
+   * and {@link CompletionScorer#score(float, float)} is used on the document id, index weight
+   * and query boost to filter and score the entry, before being collected via
+   * {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}
+   */
+  public void lookup(final CompletionScorer scorer, final TopSuggestDocsCollector collector) throws IOException {
+    final double liveDocsRatio = calculateLiveDocRatio(scorer.reader.numDocs(), scorer.reader.maxDoc());
+    if (liveDocsRatio == -1) {
+      return;
+    }
+    final List<FSTUtil.Path<Pair<Long, BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(scorer.automaton, fst);
+    final int queueSize = getMaxTopNSearcherQueueSize(collector.getCountToCollect() * prefixPaths.size(),
+        scorer.reader.numDocs(), liveDocsRatio, scorer.filtered);
+    Comparator<Pair<Long, BytesRef>> comparator = getComparator();
+    Util.TopNSearcher<Pair<Long, BytesRef>> searcher = new Util.TopNSearcher<Pair<Long, BytesRef>>(fst,
+        collector.getCountToCollect(), queueSize, comparator, new ScoringPathComparator(scorer)) {
+
+      private final CharsRefBuilder spare = new CharsRefBuilder();
+
+      @Override
+      protected boolean acceptResult(Util.FSTPath<Pair<Long, BytesRef>> path) {
+        int payloadSepIndex = parseSurfaceForm(path.cost.output2, payloadSep, spare);
+        int docID = parseDocID(path.cost.output2, payloadSepIndex);
+        if (!scorer.accept(docID)) {
+          return false;
+        }
+        try {
+          float score = scorer.score(decode(path.cost.output1), path.boost);
+          collector.collect(docID, spare.toCharsRef(), path.context, score);
+          return true;
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    };
+
+    for (FSTUtil.Path<Pair<Long, BytesRef>> path : prefixPaths) {
+      scorer.weight.setNextMatch(path.input.get());
+      searcher.addStartPaths(path.fstNode, path.output, false, path.input, scorer.weight.boost(),
+          scorer.weight.context());
+    }
+    // hits are also returned by search()
+    // we do not use it, instead collect at acceptResult
+    searcher.search();
+    // search admissibility is not guaranteed
+    // see comment on getMaxTopNSearcherQueueSize
+    // assert  search.isComplete;
+  }
+
+  /**
+   * Compares partial completion paths using {@link CompletionScorer#score(float, float)},
+   * breaks ties comparing path inputs
+   */
+  private static class ScoringPathComparator implements Comparator<Util.FSTPath<Pair<Long, BytesRef>>> {
+    private final CompletionScorer scorer;
+
+    public ScoringPathComparator(CompletionScorer scorer) {
+      this.scorer = scorer;
+    }
+
+    @Override
+    public int compare(Util.FSTPath<Pair<Long, BytesRef>> first, Util.FSTPath<Pair<Long, BytesRef>> second) {
+      int cmp = Float.compare(scorer.score(decode(second.cost.output1), second.boost),
+          scorer.score(decode(first.cost.output1), first.boost));
+      return (cmp != 0) ? cmp : first.input.get().compareTo(second.input.get());
+    }
+  }
+
   private static Comparator<Pair<Long, BytesRef>> getComparator() {
     return new Comparator<Pair<Long, BytesRef>>() {
       @Override
@@ -142,93 +198,6 @@
   }
 
   /**
-   * Collects at most Top <code>num</code> completions, filtered by <code>filter</code> on
-   * corresponding documents, which has a prefix accepted by <code>automaton</code>
-   * <p>
-   * Supports near real time deleted document filtering using <code>reader</code>
-   * <p>
-   * {@link TopSuggestDocsCollector#collect(int, CharSequence, long)} is called
-   * for every matched completion
-   * <p>
-   * Completion collection can be early terminated by throwing {@link org.apache.lucene.search.CollectionTerminatedException}
-   */
-  public void lookup(final LeafReader reader, final Automaton automaton, final int num, final DocIdSet filter, final TopSuggestDocsCollector collector) {
-    final Bits filterDocs;
-    try {
-      if (filter != null) {
-        if (filter.iterator() == null) {
-          return;
-        }
-        if (filter.bits() == null) {
-          throw new IllegalArgumentException("DocIDSet does not provide random access interface");
-        } else {
-          filterDocs = filter.bits();
-        }
-      } else {
-        filterDocs = null;
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-
-    int queueSize = getMaxTopNSearcherQueueSize(num, reader, filterDocs != null);
-    if (queueSize == -1) {
-      return;
-    }
-
-    final Bits liveDocs = reader.getLiveDocs();
-    try {
-      final List<FSTUtil.Path<Pair<Long, BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(automaton, fst);
-      Util.TopNSearcher<Pair<Long, BytesRef>> searcher = new Util.TopNSearcher<Pair<Long, BytesRef>>(fst, num, queueSize, getComparator()) {
-
-        private final CharsRefBuilder spare = new CharsRefBuilder();
-
-        @Override
-        protected boolean acceptResult(IntsRef input, Pair<Long, BytesRef> output) {
-          int payloadSepIndex = parseSurfaceForm(output.output2, payloadSep, spare);
-          int docID = parseDocID(output.output2, payloadSepIndex);
-
-          // filter out deleted docs only if no filter is set
-          if (filterDocs == null && liveDocs != null && !liveDocs.get(docID)) {
-            return false;
-          }
-
-          // filter by filter context
-          if (filterDocs != null && !filterDocs.get(docID)) {
-            return false;
-          }
-
-          try {
-            collector.collect(docID, spare.toCharsRef(), decode(output.output1));
-            return true;
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-        }
-      };
-
-      // TODO: add fuzzy support
-      for (FSTUtil.Path<Pair<Long, BytesRef>> path : prefixPaths) {
-        searcher.addStartPaths(path.fstNode, path.output, false, path.input);
-      }
-
-      try {
-        // hits are also returned by search()
-        // we do not use it, instead collect at acceptResult
-        Util.TopResults<Pair<Long, BytesRef>> search = searcher.search();
-        // search admissibility is not guaranteed
-        // see comment on getMaxTopNSearcherQueueSize
-        // assert  search.isComplete;
-      } catch (CollectionTerminatedException e) {
-        // terminate
-      }
-
-    } catch (IOException bogus) {
-      throw new RuntimeException(bogus);
-    }
-  }
-
-  /**
    * Simple heuristics to try to avoid over-pruning potential suggestions by the
    * TopNSearcher. Since suggestion entries can be rejected if they belong
    * to a deleted document, the length of the TopNSearcher queue has to
@@ -241,17 +210,13 @@
    * <p>
    * The maximum queue size is {@link #MAX_TOP_N_QUEUE_SIZE}
    */
-  private int getMaxTopNSearcherQueueSize(int num, LeafReader reader, boolean filterEnabled) {
-    double liveDocsRatio = calculateLiveDocRatio(reader.numDocs(), reader.maxDoc());
-    if (liveDocsRatio == -1) {
-      return -1;
-    }
-    long maxQueueSize = num * maxAnalyzedPathsPerOutput;
+  private int getMaxTopNSearcherQueueSize(int topN, int numDocs, double liveDocsRatio, boolean filterEnabled) {
+    long maxQueueSize = topN * maxAnalyzedPathsPerOutput;
     // liveDocRatio can be at most 1.0 (if no docs were deleted)
     assert liveDocsRatio <= 1.0d;
     maxQueueSize = (long) (maxQueueSize / liveDocsRatio);
     if (filterEnabled) {
-      maxQueueSize = maxQueueSize + (reader.numDocs()/2);
+      maxQueueSize = maxQueueSize + (numDocs/2);
     }
     return (int) Math.min(MAX_TOP_N_QUEUE_SIZE, maxQueueSize);
   }
@@ -269,21 +234,27 @@
 
     /* read some meta info */
     int maxAnalyzedPathsPerOutput = input.readVInt();
+    /*
+     * Label used to denote the end of an input in the FST and
+     * the beginning of dedup bytes
+     */
     int endByte = input.readVInt();
     int payloadSep = input.readVInt();
 
-    return new NRTSuggester(fst, maxAnalyzedPathsPerOutput, payloadSep, endByte);
+    return new NRTSuggester(fst, maxAnalyzedPathsPerOutput, payloadSep);
   }
 
   static long encode(long input) {
-    if (input < 0) {
+    if (input < 0 || input > Integer.MAX_VALUE) {
       throw new UnsupportedOperationException("cannot encode value: " + input);
     }
-    return Long.MAX_VALUE - input;
+    return Integer.MAX_VALUE - input;
   }
 
   static long decode(long output) {
-    return (Long.MAX_VALUE - output);
+    assert output >= 0 && output <= Integer.MAX_VALUE :
+        "decoded output: " + output + " is not within 0 and Integer.MAX_VALUE";
+    return Integer.MAX_VALUE - output;
   }
 
   /**
@@ -307,7 +278,8 @@
 
     static int parseDocID(final BytesRef output, int payloadSepIndex) {
       assert payloadSepIndex != -1 : "payload sep index can not be -1";
-      ByteArrayDataInput input = new ByteArrayDataInput(output.bytes, payloadSepIndex + output.offset + 1, output.length - (payloadSepIndex + output.offset));
+      ByteArrayDataInput input = new ByteArrayDataInput(output.bytes, payloadSepIndex + output.offset + 1,
+          output.length - (payloadSepIndex + output.offset));
       return input.readVInt();
     }
 
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java
index 80c7d36..a962bbf 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggesterBuilder.java
@@ -49,7 +49,7 @@
    * Marks end of the analyzed input and start of dedup
    * byte.
    */
-  private static final int END_BYTE = 0x0;
+  public static final int END_BYTE = 0x0;
 
   private final PairOutputs<Long, BytesRef> outputs;
   private final Builder<PairOutputs.Pair<Long, BytesRef>> builder;
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java
new file mode 100644
index 0000000..24590f7
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java
@@ -0,0 +1,74 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A {@link CompletionQuery} which takes an {@link Analyzer}
+ * to analyze the prefix of the query term.
+ * <p>
+ * Example usage of querying an analyzed prefix 'sugg'
+ * against a field 'suggest_field' is as follows:
+ *
+ * <pre class="prettyprint">
+ *  CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
+ * </pre>
+ * @lucene.experimental
+ */
+public class PrefixCompletionQuery extends CompletionQuery {
+  /** Used to analyze the term text */
+  protected final CompletionAnalyzer analyzer;
+
+  /**
+   * Calls {@link PrefixCompletionQuery#PrefixCompletionQuery(Analyzer, Term, Filter)}
+   * with no filter
+   */
+  public PrefixCompletionQuery(Analyzer analyzer, Term term) {
+    this(analyzer, term, null);
+  }
+
+  /**
+   * Constructs an analyzed prefix completion query
+   *
+   * @param analyzer used to analyze the provided {@link Term#text()}
+   * @param term query is run against {@link Term#field()} and {@link Term#text()}
+   *             is analyzed with <code>analyzer</code>
+   * @param filter used to query on a sub set of documents
+   */
+  public PrefixCompletionQuery(Analyzer analyzer, Term term, Filter filter) {
+    super(term, filter);
+    if (!(analyzer instanceof CompletionAnalyzer)) {
+      this.analyzer = new CompletionAnalyzer(analyzer);
+    } else {
+      this.analyzer = (CompletionAnalyzer) analyzer;
+    }
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+    CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text());
+    return new CompletionWeight(this, stream.toAutomaton());
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java
new file mode 100644
index 0000000..efbaea4
--- /dev/null
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java
@@ -0,0 +1,95 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.RegExp;
+
+/**
+ * A {@link CompletionQuery} which takes a regular expression
+ * as the prefix of the query term.
+ *
+ * <p>
+ * Example usage of querying a prefix of 'sug' and 'sub'
+ * as a regular expression against a suggest field 'suggest_field':
+ *
+ * <pre class="prettyprint">
+ *  CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "su[g|b]"));
+ * </pre>
+ *
+ * <p>
+ * See {@link RegExp} for the supported regular expression
+ * syntax
+ *
+ * @lucene.experimental
+ */
+public class RegexCompletionQuery extends CompletionQuery {
+
+  private final int flags;
+  private final int maxDeterminizedStates;
+
+  /**
+   * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, Filter)}
+   * with no filter
+   */
+  public RegexCompletionQuery(Term term) {
+    this(term, null);
+  }
+
+  /**
+   * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, Filter)}
+   * enabling all optional regex syntax and <code>maxDeterminizedStates</code> of
+   * {@value Operations#DEFAULT_MAX_DETERMINIZED_STATES}
+   */
+  public RegexCompletionQuery(Term term, Filter filter) {
+    this(term, RegExp.ALL, Operations.DEFAULT_MAX_DETERMINIZED_STATES, filter);
+  }
+  /**
+   * Calls {@link RegexCompletionQuery#RegexCompletionQuery(Term, int, int, Filter)}
+   * with no filter
+   */
+  public RegexCompletionQuery(Term term, int flags, int maxDeterminizedStates) {
+    this(term, flags, maxDeterminizedStates, null);
+  }
+
+  /**
+   * Constructs a regular expression completion query
+   *
+   * @param term query is run against {@link Term#field()} and {@link Term#text()}
+   *             is interpreted as a regular expression
+   * @param flags used as syntax_flag in {@link RegExp#RegExp(String, int)}
+   * @param maxDeterminizedStates used in {@link RegExp#toAutomaton(int)}
+   * @param filter used to query on a sub set of documents
+   */
+  public RegexCompletionQuery(Term term, int flags, int maxDeterminizedStates, Filter filter) {
+    super(term, filter);
+    this.flags = flags;
+    this.maxDeterminizedStates = maxDeterminizedStates;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+    return new CompletionWeight(this, new RegExp(getTerm().text(), flags).toAutomaton(maxDeterminizedStates));
+  }
+}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java
index c7d4093..c6d1a4a 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java
@@ -48,20 +48,14 @@
  * document.add(new SuggestField(name, "suggestion", 4));
  * </pre>
  * To perform document suggestions based on the this field, use
- * {@link SuggestIndexSearcher#suggest(String, CharSequence, int, org.apache.lucene.search.Filter)}
- * <p>
- * Example query usage:
- * <pre class="prettyprint">
- * SuggestIndexSearcher indexSearcher = ..
- * indexSearcher.suggest(name, "su", 2)
- * </pre>
+ * {@link SuggestIndexSearcher#suggest(CompletionQuery, int)}
  *
  * @lucene.experimental
  */
 public class SuggestField extends Field {
 
-  private static final FieldType FIELD_TYPE = new FieldType();
-
+  /** Default field type for suggest field */
+  public static final FieldType FIELD_TYPE = new FieldType();
   static {
     FIELD_TYPE.setTokenized(true);
     FIELD_TYPE.setStored(false);
@@ -71,53 +65,86 @@
     FIELD_TYPE.freeze();
   }
 
+  static final byte TYPE = 0;
+
   private final BytesRef surfaceForm;
-  private final long weight;
+  private final int weight;
 
   /**
    * Creates a {@link SuggestField}
    *
-   * @param name   of the field
-   * @param value  to get suggestions on
-   * @param weight weight of the suggestion
+   * @param name   field name
+   * @param value  field value to get suggestions on
+   * @param weight field weight
+   *
+   * @throws IllegalArgumentException if either the name or value is null,
+   * if value is an empty string, if the weight is negative, if value contains
+   * any reserved characters
    */
-  public SuggestField(String name, String value, long weight) {
+  public SuggestField(String name, String value, int weight) {
     super(name, value, FIELD_TYPE);
-    if (weight < 0l) {
+    if (weight < 0) {
       throw new IllegalArgumentException("weight must be >= 0");
     }
+    if (value.length() == 0) {
+      throw new IllegalArgumentException("value must have a length > 0");
+    }
+    for (int i = 0; i < value.length(); i++) {
+      if (isReserved(value.charAt(i))) {
+        throw new IllegalArgumentException("Illegal input [" + value + "] UTF-16 codepoint [0x"
+            + Integer.toHexString((int) value.charAt(i))+ "] at position " + i + " is a reserved character");
+      }
+    }
     this.surfaceForm = new BytesRef(value);
     this.weight = weight;
   }
 
   @Override
   public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
-    TokenStream stream = super.tokenStream(analyzer, reuse);
-    CompletionTokenStream completionStream;
-    if (stream instanceof CompletionTokenStream) {
-      completionStream = (CompletionTokenStream) stream;
-    } else {
-      completionStream = new CompletionTokenStream(stream);
-    }
-    BytesRef suggestPayload = buildSuggestPayload(surfaceForm, weight, (char) completionStream.sepLabel());
-    completionStream.setPayload(suggestPayload);
+    CompletionTokenStream completionStream = wrapTokenStream(super.tokenStream(analyzer, reuse));
+    completionStream.setPayload(buildSuggestPayload());
     return completionStream;
   }
 
-  private BytesRef buildSuggestPayload(BytesRef surfaceForm, long weight, char sepLabel) throws IOException {
-    for (int i = 0; i < surfaceForm.length; i++) {
-      if (surfaceForm.bytes[i] == sepLabel) {
-        assert sepLabel == '\u001f';
-        throw new IllegalArgumentException(
-            "surface form cannot contain unit separator character U+001F; this character is reserved");
-      }
+  /**
+   * Wraps a <code>stream</code> with a CompletionTokenStream.
+   *
+   * Subclasses can override this method to change the indexing pipeline.
+   */
+  protected CompletionTokenStream wrapTokenStream(TokenStream stream) {
+    if (stream instanceof CompletionTokenStream) {
+      return (CompletionTokenStream) stream;
+    } else {
+      return new CompletionTokenStream(stream);
     }
+  }
+
+  /**
+   * Returns a byte to denote the type of the field
+   */
+  protected byte type() {
+    return TYPE;
+  }
+
+  private BytesRef buildSuggestPayload() throws IOException {
     ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
     try (OutputStreamDataOutput output = new OutputStreamDataOutput(byteArrayOutputStream)) {
       output.writeVInt(surfaceForm.length);
       output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
-      output.writeVLong(weight + 1);
+      output.writeVInt(weight + 1);
+      output.writeByte(type());
     }
     return new BytesRef(byteArrayOutputStream.toByteArray());
   }
+
+  private boolean isReserved(char c) {
+    switch (c) {
+      case CompletionAnalyzer.SEP_LABEL:
+      case CompletionAnalyzer.HOLE_CHARACTER:
+      case NRTSuggesterBuilder.END_BYTE:
+        return true;
+      default:
+        return false;
+    }
+  }
 }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java
index ffc7a48..17b30ce 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestIndexSearcher.java
@@ -19,132 +19,66 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.search.DocIdSet;
-import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.CollectionTerminatedException;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.util.automaton.Automaton;
-
-import static org.apache.lucene.search.suggest.document.CompletionFieldsProducer.CompletionTerms;
+import org.apache.lucene.search.Weight;
 
 /**
- * Adds document suggest capabilities to IndexSearcher
+ * Adds document suggest capabilities to IndexSearcher.
+ * Any {@link CompletionQuery} can be used to suggest documents.
+ *
+ * Use {@link PrefixCompletionQuery} for analyzed prefix queries,
+ * {@link RegexCompletionQuery} for regular expression prefix queries,
+ * {@link FuzzyCompletionQuery} for analyzed prefix with typo tolerance
+ * and {@link ContextQuery} to boost and/or filter suggestions by contexts
  *
  * @lucene.experimental
  */
 public class SuggestIndexSearcher extends IndexSearcher {
 
-  private final Analyzer queryAnalyzer;
-
   /**
    * Creates a searcher with document suggest capabilities
    * for <code>reader</code>.
-   * <p>
-   * Suggestion <code>key</code> is analyzed with <code>queryAnalyzer</code>
    */
-  public SuggestIndexSearcher(IndexReader reader, Analyzer queryAnalyzer) {
+  public SuggestIndexSearcher(IndexReader reader) {
     super(reader);
-    this.queryAnalyzer = queryAnalyzer;
   }
 
   /**
-   * Calls {@link #suggest(String, CharSequence, int, Filter)}
-   * with no document filter
+   * Returns top <code>n</code> completion hits for
+   * <code>query</code>
    */
-  public TopSuggestDocs suggest(String field, CharSequence key, int num) throws IOException {
-    return suggest(field, key, num, (Filter) null);
-  }
-
-  /**
-   * Calls {@link #suggest(String, CharSequence, int, Filter, TopSuggestDocsCollector)}
-   * with no document filter
-   */
-  public void suggest(String field, CharSequence key, int num, TopSuggestDocsCollector collector) throws IOException {
-    suggest(field, key, num, null, collector);
-  }
-
-  /**
-   * Suggests at most <code>num</code> documents filtered by <code>filter</code>
-   * that completes to <code>key</code> for a suggest <code>field</code>
-   * <p>
-   * Returns at most Top <code>num</code> document ids with corresponding completion and weight pair
-   *
-   * @throws java.lang.IllegalArgumentException if <code>filter</code> does not provide a random access
-   *                                            interface or if <code>field</code> is not a {@link SuggestField}
-   */
-  public TopSuggestDocs suggest(String field, CharSequence key, int num, Filter filter) throws IOException {
-    TopSuggestDocsCollector collector = new TopSuggestDocsCollector(num);
-    suggest(field, key, num, filter, collector);
+  public TopSuggestDocs suggest(CompletionQuery query, int n) throws IOException {
+    TopSuggestDocsCollector collector = new TopSuggestDocsCollector(n);
+    suggest(query, collector);
     return collector.get();
   }
 
   /**
-   * Suggests at most <code>num</code> documents filtered by <code>filter</code>
-   * that completes to <code>key</code> for a suggest <code>field</code>
-   * <p>
-   * Collect completions with {@link TopSuggestDocsCollector}
-   * The completions are collected in order of the suggest <code>field</code> weight.
-   * There can be more than one collection of the same document, if the <code>key</code>
-   * matches multiple <code>field</code> values of the same document
+   * Lower-level suggest API.
+   * Collects completion hits through <code>collector</code> for <code>query</code>.
    *
-   * @throws java.lang.IllegalArgumentException if <code>filter</code> does not provide a random access
-   *                                            interface or if <code>field</code> is not a {@link SuggestField}
+   * <p>{@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}
+   * is called for every matching completion hit.
    */
-  public void suggest(String field, CharSequence key, int num, Filter filter, TopSuggestDocsCollector collector) throws IOException {
-    // verify input
-    if (field == null) {
-      throw new IllegalArgumentException("'field' can not be null");
-    }
-    if (num <= 0) {
-      throw new IllegalArgumentException("'num' should be > 0");
-    }
-    if (collector == null) {
-      throw new IllegalArgumentException("'collector' can not be null");
-    }
-
-    // build query automaton
-    CompletionAnalyzer analyzer;
-    if (queryAnalyzer instanceof CompletionAnalyzer) {
-      analyzer = (CompletionAnalyzer) queryAnalyzer;
-    } else {
-      analyzer = new CompletionAnalyzer(queryAnalyzer);
-    }
-    final Automaton automaton = analyzer.toAutomaton(field, key);
-
-    // collect results
+  public void suggest(CompletionQuery query, TopSuggestDocsCollector collector) throws IOException {
+    // TODO use IndexSearcher.rewrite instead
+    // have to implement equals() and hashCode() in CompletionQuerys and co
+    query = (CompletionQuery) query.rewrite(getIndexReader());
+    Weight weight = query.createWeight(this, collector.needsScores());
     for (LeafReaderContext context : getIndexReader().leaves()) {
-      TopSuggestDocsCollector leafCollector = (TopSuggestDocsCollector) collector.getLeafCollector(context);
-      LeafReader reader = context.reader();
-      Terms terms = reader.terms(field);
-      if (terms == null) {
-        continue;
-      }
-      NRTSuggester suggester;
-      if (terms instanceof CompletionTerms) {
-        CompletionTerms completionTerms = (CompletionTerms) terms;
-        suggester = completionTerms.suggester();
-      } else {
-        throw new IllegalArgumentException(field + " is not a SuggestField");
-      }
-      if (suggester == null) {
-        // a segment can have a null suggester
-        // i.e. no FST was built
-        continue;
-      }
-
-      DocIdSet docIdSet = null;
-      if (filter != null) {
-        docIdSet = filter.getDocIdSet(context, reader.getLiveDocs());
-        if (docIdSet == null) {
-          // filter matches no docs in current leave
-          continue;
+      BulkScorer scorer = weight.bulkScorer(context, context.reader().getLiveDocs());
+      if (scorer != null) {
+        try {
+          scorer.score(collector.getLeafCollector(context));
+        } catch (CollectionTerminatedException e) {
+          // collection was terminated prematurely
+          // continue with the following leaf
         }
       }
-      suggester.lookup(reader, automaton, num, docIdSet, leafCollector);
     }
   }
 }
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java
index 0064b4b..049f73a 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocs.java
@@ -43,7 +43,12 @@
     /**
      * Matched completion key
      */
-    public CharSequence key;
+    public final CharSequence key;
+
+    /**
+     * Context for the completion
+     */
+    public final CharSequence context;
 
     /**
      * Creates a SuggestScoreDoc instance
@@ -52,11 +57,10 @@
      * @param key   matched completion
      * @param score weight of the matched completion
      */
-    public SuggestScoreDoc(int doc, CharSequence key, long score) {
-      // loss of precision but not magnitude
-      // implicit conversion from long -> float
+    public SuggestScoreDoc(int doc, CharSequence key, CharSequence context, float score) {
       super(doc, score);
       this.key = key;
+      this.context = context;
     }
 
     @Override
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java
index 6644b0d..1cb3277 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/TopSuggestDocsCollector.java
@@ -30,20 +30,23 @@
  * score, along with document id
  * <p>
  * Non scoring collector that collect completions in order of their
- * pre-defined weight.
+ * pre-computed scores.
  * <p>
  * NOTE: One document can be collected multiple times if a document
  * is matched for multiple unique completions for a given query
  * <p>
- * Subclasses should only override {@link TopSuggestDocsCollector#collect(int, CharSequence, long)},
- * {@link #setScorer(org.apache.lucene.search.Scorer)} is not
- * used
+ * Subclasses should only override
+ * {@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}.
+ * <p>
+ * NOTE: {@link #setScorer(org.apache.lucene.search.Scorer)} and
+ * {@link #collect(int)} is not used
  *
  * @lucene.experimental
  */
 public class TopSuggestDocsCollector extends SimpleCollector {
 
   private final SuggestScoreDocPriorityQueue priorityQueue;
+  private final int num;
 
   /**
    * Document base offset for the current Leaf
@@ -60,9 +63,17 @@
     if (num <= 0) {
       throw new IllegalArgumentException("'num' must be > 0");
     }
+    this.num = num;
     this.priorityQueue = new SuggestScoreDocPriorityQueue(num);
   }
 
+  /**
+   * Returns the number of results to be collected
+   */
+  public int getCountToCollect() {
+    return num;
+  }
+
   @Override
   protected void doSetNextReader(LeafReaderContext context) throws IOException {
     docBase = context.docBase;
@@ -76,8 +87,8 @@
    * NOTE: collection at the leaf level is guaranteed to be in
    * descending order of score
    */
-  public void collect(int docID, CharSequence key, long score) throws IOException {
-    SuggestScoreDoc current = new SuggestScoreDoc(docBase + docID, key, score);
+  public void collect(int docID, CharSequence key, CharSequence context, float score) throws IOException {
+    SuggestScoreDoc current = new SuggestScoreDoc(docBase + docID, key, context, score);
     if (current == priorityQueue.insertWithOverflow(current)) {
       // if the current SuggestScoreDoc has overflown from pq,
       // we can assume all of the successive collections from
@@ -104,7 +115,7 @@
    */
   @Override
   public void collect(int doc) throws IOException {
-    // {@link #collect(int, CharSequence, long)} is used
+    // {@link #collect(int, CharSequence, CharSequence, long)} is used
     // instead
   }
 
@@ -113,6 +124,6 @@
    */
   @Override
   public boolean needsScores() {
-    return false;
+    return true;
   }
 }
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/SuggestFieldTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/SuggestFieldTest.java
deleted file mode 100644
index f1df220..0000000
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/SuggestFieldTest.java
+++ /dev/null
@@ -1,791 +0,0 @@
-package org.apache.lucene.search.suggest.document;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CyclicBarrier;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.MockTokenFilter;
-import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50Codec;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.StoredDocument;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
-import org.apache.lucene.search.DocIdSet;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BitDocIdSet;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.FixedBitSet;
-import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.apache.lucene.search.suggest.document.TopSuggestDocs.*;
-import static org.hamcrest.core.IsEqual.equalTo;
-
-public class SuggestFieldTest extends LuceneTestCase {
-
-  public Directory dir;
-
-  @Before
-  public void before() throws Exception {
-    dir = newDirectory();
-  }
-
-  @After
-  public void after() throws Exception {
-    dir.close();
-  }
-
-  @Test
-  public void testSimple() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    Document document = new Document();
-
-    document.add(newSuggestField("suggest_field", "abc", 3l));
-    document.add(newSuggestField("suggest_field", "abd", 4l));
-    document.add(newSuggestField("suggest_field", "The Foo Fighters", 2l));
-    iw.addDocument(document);
-    document.clear();
-    document.add(newSuggestField("suggest_field", "abcdd", 5));
-    iw.addDocument(document);
-
-    if (rarely()) {
-      iw.commit();
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest("suggest_field", "ab", 3);
-    assertSuggestions(lookupDocs, new Entry("abcdd", 5), new Entry("abd", 4), new Entry("abc", 3));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testMultipleSuggestFieldsPerDoc() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "sug_field_1", "sug_field_2"));
-
-    Document document = new Document();
-    document.add(newSuggestField("sug_field_1", "apple", 4));
-    document.add(newSuggestField("sug_field_2", "april", 3));
-    iw.addDocument(document);
-    document.clear();
-    document.add(newSuggestField("sug_field_1", "aples", 3));
-    document.add(newSuggestField("sug_field_2", "apartment", 2));
-    iw.addDocument(document);
-
-    if (rarely()) {
-      iw.commit();
-    }
-
-    DirectoryReader reader = iw.getReader();
-
-    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggestDocs1 = suggestIndexSearcher.suggest("sug_field_1", "ap", 4);
-    assertSuggestions(suggestDocs1, new Entry("apple", 4), new Entry("aples", 3));
-    TopSuggestDocs suggestDocs2 = suggestIndexSearcher.suggest("sug_field_2", "ap", 4);
-    assertSuggestions(suggestDocs2, new Entry("april", 3), new Entry("apartment", 2));
-
-    // check that the doc ids are consistent
-    for (int i = 0; i < suggestDocs1.scoreDocs.length; i++) {
-      ScoreDoc suggestScoreDoc = suggestDocs1.scoreDocs[i];
-      assertThat(suggestScoreDoc.doc, equalTo(suggestDocs2.scoreDocs[i].doc));
-    }
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testDupSuggestFieldValues() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(300));
-    long[] weights = new long[num];
-    for(int i = 0; i < num; i++) {
-      Document document = new Document();
-      weights[i] = Math.abs(random().nextLong());
-      document.add(newSuggestField("suggest_field", "abc", weights[i]));
-      iw.addDocument(document);
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    Entry[] expectedEntries = new Entry[num];
-    Arrays.sort(weights);
-    for (int i = 1; i <= num; i++) {
-      expectedEntries[i - 1] = new Entry("abc", weights[num - i]);
-    }
-
-    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest("suggest_field", "abc", num);
-    assertSuggestions(lookupDocs, expectedEntries);
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testNRTDeletedDocFiltering() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    // using IndexWriter instead of RandomIndexWriter
-    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
-
-    int num = Math.min(1000, atLeast(10));
-
-    Document document = new Document();
-    int numLive = 0;
-    List<Entry> expectedEntries = new ArrayList<>();
-    for (int i = 0; i < num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, num - i));
-      if (i % 2 == 0) {
-        document.add(newStringField("str_field", "delete", Field.Store.YES));
-      } else {
-        numLive++;
-        expectedEntries.add(new Entry("abc_" + i, num - i));
-        document.add(newStringField("str_field", "no_delete", Field.Store.YES));
-      }
-      iw.addDocument(document);
-      document.clear();
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    iw.deleteDocuments(new Term("str_field", "delete"));
-
-    DirectoryReader reader = DirectoryReader.open(iw, true);
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", numLive);
-    assertSuggestions(suggest, expectedEntries.toArray(new Entry[expectedEntries.size()]));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testSuggestOnAllFilteredDocuments() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-    for (int i = 0; i < num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, i));
-      document.add(newStringField("str_fld", "deleted", Field.Store.NO));
-      iw.addDocument(document);
-      document.clear();
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    Filter filter = new QueryWrapperFilter(new TermsQuery("str_fld", new BytesRef("non_existent")));
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    // no random access required;
-    // calling suggest with filter that does not match any documents should early terminate
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", num, filter);
-    assertThat(suggest.totalHits, equalTo(0));
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testSuggestOnAllDeletedDocuments() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    // using IndexWriter instead of RandomIndexWriter
-    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-    for (int i = 0; i < num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, i));
-      document.add(newStringField("delete", "delete", Field.Store.NO));
-      iw.addDocument(document);
-      document.clear();
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    iw.deleteDocuments(new Term("delete", "delete"));
-
-    DirectoryReader reader = DirectoryReader.open(iw, true);
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", num);
-    assertThat(suggest.totalHits, equalTo(0));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testSuggestOnMostlyDeletedDocuments() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    // using IndexWriter instead of RandomIndexWriter
-    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-    for (int i = 1; i <= num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, i));
-      document.add(new IntField("weight_fld", i, Field.Store.YES));
-      iw.addDocument(document);
-      document.clear();
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    iw.deleteDocuments(NumericRangeQuery.newIntRange("weight_fld", 2, null, true, false));
-
-    DirectoryReader reader = DirectoryReader.open(iw, true);
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", 1);
-    assertSuggestions(suggest, new Entry("abc_1", 1));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testSuggestOnMostlyFilteredOutDocuments() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-    for (int i = 0; i < num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, i));
-      document.add(new IntField("filter_int_fld", i, Field.Store.NO));
-      iw.addDocument(document);
-      document.clear();
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-
-    int topScore = num/2;
-    QueryWrapperFilter filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 0, topScore, true, true));
-    Filter filter = randomAccessFilter(filterWrapper);
-    // if at most half of the top scoring documents have been filtered out
-    // the search should be admissible for a single segment
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", num, filter);
-    assertTrue(suggest.totalHits >= 1);
-    assertThat(suggest.scoreLookupDocs()[0].key.toString(), equalTo("abc_" + topScore));
-    assertThat(suggest.scoreLookupDocs()[0].score, equalTo((float) topScore));
-
-    filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 0, 0, true, true));
-    filter = randomAccessFilter(filterWrapper);
-    // if more than half of the top scoring documents have been filtered out
-    // search is not admissible, so # of suggestions requested is num instead of 1
-    suggest = indexSearcher.suggest("suggest_field", "abc_", num, filter);
-    assertSuggestions(suggest, new Entry("abc_0", 0));
-
-    filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", num - 1, num - 1, true, true));
-    filter = randomAccessFilter(filterWrapper);
-    // if only lower scoring documents are filtered out
-    // search is admissible
-    suggest = indexSearcher.suggest("suggest_field", "abc_", 1, filter);
-    assertSuggestions(suggest, new Entry("abc_" + (num - 1), num - 1));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testEarlyTermination() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-
-    // have segments of 4 documents
-    // with descending suggestion weights
-    // suggest should early terminate for
-    // segments with docs having lower suggestion weights
-    for (int i = num; i > 0; i--) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, i));
-      iw.addDocument(document);
-      document.clear();
-      if (i % 4 == 0) {
-        iw.commit();
-      }
-    }
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", 1);
-    assertSuggestions(suggest, new Entry("abc_" + num, num));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testMultipleSegments() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    int num = Math.min(1000, atLeast(10));
-    Document document = new Document();
-    List<Entry> entries = new ArrayList<>();
-
-    // ensure at least some segments have no suggest field
-    for (int i = num; i > 0; i--) {
-      if (random().nextInt(4) == 1) {
-        document.add(newSuggestField("suggest_field", "abc_" + i, i));
-        entries.add(new Entry("abc_" + i, i));
-      }
-      document.add(new IntField("weight_fld", i, Field.Store.YES));
-      iw.addDocument(document);
-      document.clear();
-      if (usually()) {
-        iw.commit();
-      }
-    }
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", (entries.size() == 0) ? 1 : entries.size());
-    assertSuggestions(suggest, entries.toArray(new Entry[entries.size()]));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testDocFiltering() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-
-    Document document = new Document();
-    document.add(new IntField("filter_int_fld", 9, Field.Store.NO));
-    document.add(newSuggestField("suggest_field", "apples", 3));
-    iw.addDocument(document);
-
-    document.clear();
-    document.add(new IntField("filter_int_fld", 10, Field.Store.NO));
-    document.add(newSuggestField("suggest_field", "applle", 4));
-    iw.addDocument(document);
-
-    document.clear();
-    document.add(new IntField("filter_int_fld", 4, Field.Store.NO));
-    document.add(newSuggestField("suggest_field", "apple", 5));
-    iw.addDocument(document);
-
-    if (rarely()) {
-      iw.commit();
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-
-    // suggest without filter
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "app", 3);
-    assertSuggestions(suggest, new Entry("apple", 5), new Entry("applle", 4), new Entry("apples", 3));
-
-    // suggest with filter
-    QueryWrapperFilter filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 5, 12, true, true));
-    Filter filter = randomAccessFilter(filterWrapper);
-    suggest = indexSearcher.suggest("suggest_field", "app", 3, filter);
-    assertSuggestions(suggest, new Entry("applle", 4), new Entry("apples", 3));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testReturnedDocID() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-
-    Document document = new Document();
-    int num = Math.min(1000, atLeast(10));
-    for (int i = 0; i < num; i++) {
-      document.add(newSuggestField("suggest_field", "abc_" + i, num));
-      document.add(new IntField("int_field", i, Field.Store.YES));
-      iw.addDocument(document);
-      document.clear();
-
-      if (random().nextBoolean()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", "abc_", num);
-    assertEquals(num, suggest.totalHits);
-    for (SuggestScoreDoc suggestScoreDoc : suggest.scoreLookupDocs()) {
-      String key = suggestScoreDoc.key.toString();
-      assertTrue(key.startsWith("abc_"));
-      String substring = key.substring(4);
-      int fieldValue = Integer.parseInt(substring);
-      StoredDocument doc = reader.document(suggestScoreDoc.doc);
-      assertEquals(doc.getField("int_field").numericValue().intValue(), fieldValue);
-    }
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testCompletionAnalyzerOptions() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
-    Map<String, Analyzer> map = new HashMap<>();
-    map.put("suggest_field_default", new CompletionAnalyzer(analyzer));
-    CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, false, true);
-    map.put("suggest_field_no_p_sep", completionAnalyzer);
-    completionAnalyzer = new CompletionAnalyzer(analyzer, true, false);
-    map.put("suggest_field_no_p_pos_inc", completionAnalyzer);
-    completionAnalyzer = new CompletionAnalyzer(analyzer, false, false);
-    map.put("suggest_field_no_p_sep_or_pos_inc", completionAnalyzer);
-    PerFieldAnalyzerWrapper analyzerWrapper = new PerFieldAnalyzerWrapper(analyzer, map);
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzerWrapper, map.keySet()));
-
-    Document document = new Document();
-    document.add(newSuggestField("suggest_field_default", "foobar", 7));
-    document.add(newSuggestField("suggest_field_default", "foo bar", 8));
-    document.add(newSuggestField("suggest_field_default", "the fo", 9));
-    document.add(newSuggestField("suggest_field_default", "the foo bar", 10));
-
-    document.add(newSuggestField("suggest_field_no_p_sep", "foobar", 7));
-    document.add(newSuggestField("suggest_field_no_p_sep", "foo bar", 8));
-    document.add(newSuggestField("suggest_field_no_p_sep", "the fo", 9));
-    document.add(newSuggestField("suggest_field_no_p_sep", "the foo bar", 10));
-
-    document.add(newSuggestField("suggest_field_no_p_pos_inc", "foobar", 7));
-    document.add(newSuggestField("suggest_field_no_p_pos_inc", "foo bar", 8));
-    document.add(newSuggestField("suggest_field_no_p_pos_inc", "the fo", 9));
-    document.add(newSuggestField("suggest_field_no_p_pos_inc", "the foo bar", 10));
-
-    document.add(newSuggestField("suggest_field_no_p_sep_or_pos_inc", "foobar", 7));
-    document.add(newSuggestField("suggest_field_no_p_sep_or_pos_inc", "foo bar", 8));
-    document.add(newSuggestField("suggest_field_no_p_sep_or_pos_inc", "the fo", 9));
-    document.add(newSuggestField("suggest_field_no_p_sep_or_pos_inc", "the foo bar", 10));
-    iw.addDocument(document);
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-
-    TopSuggestDocs suggest;
-    suggest = indexSearcher.suggest("suggest_field_default", "fo", 4);
-    assertSuggestions(suggest, new Entry("foo bar", 8), new Entry("foobar", 7));
-    suggest = indexSearcher.suggest("suggest_field_default", "foob", 4);
-    assertSuggestions(suggest, new Entry("foobar", 7));
-
-    suggest = indexSearcher.suggest("suggest_field_no_p_sep", "fo", 4); // matches all 4
-    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
-    suggest = indexSearcher.suggest("suggest_field_no_p_sep", "foob", 4); // except the fo
-    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
-
-    suggest = indexSearcher.suggest("suggest_field_no_p_pos_inc", "fo", 4); //matches all 4
-    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
-    suggest = indexSearcher.suggest("suggest_field_no_p_pos_inc", "foob", 4); // only foobar
-    assertSuggestions(suggest, new Entry("foobar", 7));
-
-    suggest = indexSearcher.suggest("suggest_field_no_p_sep_or_pos_inc", "fo", 4); // all 4
-    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
-    suggest = indexSearcher.suggest("suggest_field_no_p_sep_or_pos_inc", "foob", 4); // not the fo
-    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testScoring() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-
-    int num = Math.min(1000, atLeast(100));
-    String[] prefixes = {"abc", "bac", "cab"};
-    Map<String, Long> mappings = new HashMap<>();
-    for (int i = 0; i < num; i++) {
-      Document document = new Document();
-      String suggest = prefixes[i % 3] + TestUtil.randomSimpleString(random(), 10) + "_" +String.valueOf(i);
-      long weight = Math.abs(random().nextLong());
-      document.add(newSuggestField("suggest_field", suggest, weight));
-      mappings.put(suggest, weight);
-      iw.addDocument(document);
-
-      if (usually()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    for (String prefix : prefixes) {
-      TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", prefix, num);
-      assertTrue(suggest.totalHits > 0);
-      float topScore = -1;
-      for (SuggestScoreDoc scoreDoc : suggest.scoreLookupDocs()) {
-        if (topScore != -1) {
-          assertTrue(topScore >= scoreDoc.score);
-        }
-        topScore = scoreDoc.score;
-        assertThat((float) mappings.get(scoreDoc.key.toString()), equalTo(scoreDoc.score));
-        assertNotNull(mappings.remove(scoreDoc.key.toString()));
-      }
-    }
-
-    assertThat(mappings.size(), equalTo(0));
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testRealisticKeys() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
-    LineFileDocs lineFileDocs = new LineFileDocs(random());
-    int num = Math.min(1000, atLeast(100));
-    Map<String, Long> mappings = new HashMap<>();
-    for (int i = 0; i < num; i++) {
-      Document document = lineFileDocs.nextDoc();
-      String title = document.getField("title").stringValue();
-      long weight = Math.abs(random().nextLong());
-      Long prevWeight = mappings.get(title);
-      if (prevWeight == null || prevWeight < weight) {
-        mappings.put(title, weight);
-      }
-      Document doc = new Document();
-      doc.add(newSuggestField("suggest_field", title, weight));
-      iw.addDocument(doc);
-
-      if (rarely()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-
-    for (Map.Entry<String, Long> entry : mappings.entrySet()) {
-      String title = entry.getKey();
-
-      TopSuggestDocs suggest = indexSearcher.suggest("suggest_field", title, mappings.size());
-      assertTrue(suggest.totalHits > 0);
-      boolean matched = false;
-      for (ScoreDoc scoreDoc : suggest.scoreDocs) {
-        matched = Float.compare(scoreDoc.score, (float) entry.getValue()) == 0;
-        if (matched) {
-          break;
-        }
-      }
-      assertTrue("at least one of the entries should have the score", matched);
-    }
-
-    reader.close();
-    iw.close();
-  }
-
-  @Test
-  public void testThreads() throws Exception {
-    Analyzer analyzer = new MockAnalyzer(random());
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field_1", "suggest_field_2", "suggest_field_3"));
-    int num = Math.min(1000, atLeast(100));
-    final String prefix1 = "abc1_";
-    final String prefix2 = "abc2_";
-    final String prefix3 = "abc3_";
-    final Entry[] entries1 = new Entry[num];
-    final Entry[] entries2 = new Entry[num];
-    final Entry[] entries3 = new Entry[num];
-    for (int i = 0; i < num; i++) {
-      int weight = num - (i + 1);
-      entries1[i] = new Entry(prefix1 + weight, weight);
-      entries2[i] = new Entry(prefix2 + weight, weight);
-      entries3[i] = new Entry(prefix3 + weight, weight);
-    }
-    for (int i = 0; i < num; i++) {
-      Document doc = new Document();
-      doc.add(newSuggestField("suggest_field_1", prefix1 + i, i));
-      doc.add(newSuggestField("suggest_field_2", prefix2 + i, i));
-      doc.add(newSuggestField("suggest_field_3", prefix3 + i, i));
-      iw.addDocument(doc);
-
-      if (rarely()) {
-        iw.commit();
-      }
-    }
-
-    DirectoryReader reader = iw.getReader();
-    int numThreads = TestUtil.nextInt(random(), 2, 7);
-    Thread threads[] = new Thread[numThreads];
-    final CyclicBarrier startingGun = new CyclicBarrier(numThreads+1);
-    final CopyOnWriteArrayList<Throwable> errors = new CopyOnWriteArrayList<>();
-    final SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader, analyzer);
-    for (int i = 0; i < threads.length; i++) {
-      threads[i] = new Thread() {
-        @Override
-        public void run() {
-          try {
-            startingGun.await();
-            TopSuggestDocs suggest = indexSearcher.suggest("suggest_field_1", prefix1, num);
-            assertSuggestions(suggest, entries1);
-            suggest = indexSearcher.suggest("suggest_field_2", prefix2, num);
-            assertSuggestions(suggest, entries2);
-            suggest = indexSearcher.suggest("suggest_field_3", prefix3, num);
-            assertSuggestions(suggest, entries3);
-          } catch (Throwable e) {
-            errors.add(e);
-          }
-        }
-      };
-      threads[i].start();
-    }
-
-    startingGun.await();
-    for (Thread t : threads) {
-      t.join();
-    }
-    assertTrue(errors.toString(), errors.isEmpty());
-
-    reader.close();
-    iw.close();
-  }
-
-  private static class RandomAccessFilter extends Filter {
-
-    private final Filter in;
-
-    private RandomAccessFilter(Filter in) {
-      this.in = in;
-    }
-
-    @Override
-    public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
-      DocIdSet docIdSet = in.getDocIdSet(context, acceptDocs);
-      DocIdSetIterator iterator = docIdSet.iterator();
-      FixedBitSet bits = new FixedBitSet(context.reader().maxDoc());
-      if (iterator != null) {
-        bits.or(iterator);
-      }
-      return new BitDocIdSet(bits);
-    }
-
-    @Override
-    public String toString(String field) {
-      return in.toString(field);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (super.equals(obj) == false) {
-        return false;
-      }
-      return in.equals(((RandomAccessFilter) obj).in);
-    }
-
-    @Override
-    public int hashCode() {
-      return 31 * super.hashCode() + in.hashCode();
-    }
-  }
-
-  private static Filter randomAccessFilter(Filter filter) {
-    return new RandomAccessFilter(filter);
-  }
-
-  private static class Entry {
-    private final String output;
-    private final float value;
-
-    private Entry(String output, float value) {
-      this.output = output;
-      this.value = value;
-    }
-  }
-
-  private void assertSuggestions(TopDocs actual, Entry... expected) {
-    SuggestScoreDoc[] suggestScoreDocs = (SuggestScoreDoc[]) actual.scoreDocs;
-    assertThat(suggestScoreDocs.length, equalTo(expected.length));
-    for (int i = 0; i < suggestScoreDocs.length; i++) {
-      SuggestScoreDoc lookupDoc = suggestScoreDocs[i];
-      assertThat(lookupDoc.key.toString(), equalTo(expected[i].output));
-      assertThat(lookupDoc.score, equalTo(expected[i].value));
-    }
-  }
-
-  private SuggestField newSuggestField(String name, String value, long weight) throws IOException {
-    return new SuggestField(name, value, weight);
-  }
-
-  private IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, String... suggestFields) {
-    return iwcWithSuggestField(analyzer, asSet(suggestFields));
-  }
-
-  private IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, Set<String> suggestFields) {
-    IndexWriterConfig iwc = newIndexWriterConfig(random(), analyzer);
-    iwc.setMergePolicy(newLogMergePolicy());
-    Codec filterCodec = new Lucene50Codec() {
-      PostingsFormat postingsFormat = new Completion50PostingsFormat();
-
-      @Override
-      public PostingsFormat getPostingsFormatForField(String field) {
-        if (suggestFields.contains(field)) {
-          return postingsFormat;
-        }
-        return super.getPostingsFormatForField(field);
-      }
-    };
-    iwc.setCodec(filterCodec);
-    return iwc;
-  }
-}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java
new file mode 100644
index 0000000..b5724f2
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java
@@ -0,0 +1,531 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField;
+
+public class TestContextQuery extends LuceneTestCase {
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testIllegalInnerQuery() throws Exception {
+    try {
+      new ContextQuery(new ContextQuery(
+          new PrefixCompletionQuery(new MockAnalyzer(random()), new Term("suggest_field", "sugg"))));
+      fail("should error out trying to nest a Context query within another Context query");
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains(ContextQuery.class.getSimpleName()));
+    }
+  }
+
+  @Test
+  public void testSimpleContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 8, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 7, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 6, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 5, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type1", 1);
+    query.addContext("type2", 2);
+    query.addContext("type3", 3);
+    query.addContext("type4", 4);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion4", "type4", 5 * 4),
+        new Entry("suggestion3", "type3", 6 * 3),
+        new Entry("suggestion2", "type2", 7 * 2),
+        new Entry("suggestion1", "type1", 8 * 1)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testContextQueryOnSuggestField() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new SuggestField("suggest_field", "abc", 3));
+    document.add(new SuggestField("suggest_field", "abd", 4));
+    document.add(new SuggestField("suggest_field", "The Foo Fighters", 2));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new SuggestField("suggest_field", "abcdd", 5));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab")));
+    try {
+      suggestIndexSearcher.suggest(query, 4);
+    } catch (IllegalStateException expected) {
+      assertTrue(expected.getMessage().contains("SuggestField"));
+    }
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testNonExactContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type", 1, false);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type1", 4),
+        new Entry("suggestion2", "type2", 3),
+        new Entry("suggestion3", "type3", 2),
+        new Entry("suggestion4", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testContextPrecedenceBoost() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "typetype"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type", 1);
+    query.addContext("typetype", 2);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "typetype", 4 * 2),
+        new Entry("suggestion2", "type", 3 * 1)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testEmptyContext() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion_no_ctx", 4));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion_no_ctx", null, 4),
+        new Entry("suggestion", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testEmptyContextWithBoosts() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type4", 10);
+    query.addContext("*");
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion4", "type4", 1 * 10),
+        new Entry("suggestion1", null, 4),
+        new Entry("suggestion2", null, 3),
+        new Entry("suggestion3", null, 2)
+    );
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testSameSuggestionMultipleContext() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 4, "type1", "type2", "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type1", 10);
+    query.addContext("type2", 2);
+    query.addContext("type3", 3);
+    query.addContext("type4", 4);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion", "type1", 4 * 10),
+        new Entry("suggestion", "type3", 4 * 3),
+        new Entry("suggestion", "type2", 4 * 2),
+        new Entry("suggestion", "type4", 1 * 4)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testMixedContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type1", 7);
+    query.addContext("type2", 6);
+    query.addContext("*", 5);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type1", 4 * 7),
+        new Entry("suggestion2", "type2", 3 * 6),
+        new Entry("suggestion3", "type3", 2 * 5),
+        new Entry("suggestion4", "type4", 1 * 5)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testFilteringContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type3", 3);
+    query.addContext("type4", 4);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion3", "type3", 2 * 3),
+        new Entry("suggestion4", "type4", 1 * 4)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testContextQueryRewrite() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type1", 4),
+        new Entry("suggestion2", "type2", 3),
+        new Entry("suggestion3", "type3", 2),
+        new Entry("suggestion4", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testMultiContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 8, "type1", "type3"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 7, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 6, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 5, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    query.addContext("type1", 1);
+    query.addContext("type2", 2);
+    query.addContext("type3", 3);
+    query.addContext("type4", 4);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type3", 8 * 3),
+        new Entry("suggestion4", "type4", 5 * 4),
+        new Entry("suggestion3", "type3", 6 * 3),
+        new Entry("suggestion2", "type2", 7 * 2),
+        new Entry("suggestion1", "type1", 8 * 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testAllContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type1", 4),
+        new Entry("suggestion2", "type2", 3),
+        new Entry("suggestion3", "type3", 2),
+        new Entry("suggestion4", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testRandomContextQueryScoring() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    try(RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) {
+      int numSuggestions = atLeast(20);
+      int numContexts = atLeast(5);
+
+      Set<Integer> seenWeights = new HashSet<>();
+      List<Entry> expectedEntries = new ArrayList<>();
+      List<CharSequence> contexts = new ArrayList<>();
+      for (int i = 1; i <= numContexts; i++) {
+        CharSequence context = TestUtil.randomSimpleString(random(), 10) + i;
+        contexts.add(context);
+        for (int j = 1; j <= numSuggestions; j++) {
+          String suggestion = "sugg_" + TestUtil.randomSimpleString(random(), 10) + j;
+          int weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
+          while (seenWeights.contains(weight)) {
+            weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
+          }
+          seenWeights.add(weight);
+          Document document = new Document();
+          document.add(new ContextSuggestField("suggest_field", suggestion, weight, context));
+          iw.addDocument(document);
+          expectedEntries.add(new Entry(suggestion, context.toString(), i * weight));
+        }
+        if (rarely()) {
+          iw.commit();
+        }
+      }
+      Entry[] expectedResults = expectedEntries.toArray(new Entry[expectedEntries.size()]);
+
+      ArrayUtil.introSort(expectedResults, new Comparator<Entry>() {
+        @Override
+        public int compare(Entry o1, Entry o2) {
+          int cmp = Float.compare(o2.value, o1.value);
+          if (cmp != 0) {
+            return cmp;
+          } else {
+            return o1.output.compareTo(o2.output);
+          }
+        }
+      });
+
+      try(DirectoryReader reader = iw.getReader()) {
+        SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+        ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
+        for (int i = 0; i < contexts.size(); i++) {
+          query.addContext(contexts.get(i), i + 1);
+        }
+        TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4);
+        assertSuggestions(suggest, Arrays.copyOfRange(expectedResults, 0, 4));
+      }
+    }
+  }
+}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java
new file mode 100644
index 0000000..90aef87
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextSuggestField.java
@@ -0,0 +1,144 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.CharsRefBuilder;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField;
+
+public class TestContextSuggestField extends LuceneTestCase {
+
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testEmptySuggestion() throws Exception {
+    try {
+      new ContextSuggestField("suggest_field", "", 1, "type1");
+      fail("no exception thrown when indexing zero length suggestion");
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("value"));
+    }
+  }
+
+  @Test
+  public void testReservedChars() throws Exception {
+    CharsRefBuilder charsRefBuilder = new CharsRefBuilder();
+    charsRefBuilder.append("sugg");
+    charsRefBuilder.setCharAt(2, (char) ContextSuggestField.CONTEXT_SEPARATOR);
+    try {
+      new ContextSuggestField("name", "sugg", 1, charsRefBuilder.toString());
+      fail("no exception thrown for context value containing CONTEXT_SEPARATOR:" + ContextSuggestField.CONTEXT_SEPARATOR);
+    } catch (IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("[0x1d]"));
+    }
+
+    try {
+      new ContextSuggestField("name", charsRefBuilder.toString(), 1, "sugg");
+      fail("no exception thrown for value containing CONTEXT_SEPARATOR:" + ContextSuggestField.CONTEXT_SEPARATOR);
+    } catch (IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("[0x1d]"));
+    }
+  }
+
+  @Test
+  public void testMixedSuggestFields() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    Document document = new Document();
+    document.add(new SuggestField("suggest_field", "suggestion1", 4));
+    document.add(new ContextSuggestField("suggest_field", "suggestion2", 3));
+
+    try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
+        iwcWithSuggestField(analyzer, "suggest_field"))) {
+      iw.addDocument(document);
+      iw.commit();
+      fail("mixing suggest field types for same field name should error out");
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("mixed types"));
+    }
+  }
+
+  @Test
+  public void testWithSuggestFields() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
+        iwcWithSuggestField(analyzer, "suggest_field", "context_suggest_field"));
+    Document document = new Document();
+
+    document.add(new SuggestField("suggest_field", "suggestion1", 4));
+    document.add(new SuggestField("suggest_field", "suggestion2", 3));
+    document.add(new SuggestField("suggest_field", "suggestion3", 2));
+    document.add(new ContextSuggestField("context_suggest_field", "suggestion1", 4, "type1"));
+    document.add(new ContextSuggestField("context_suggest_field", "suggestion2", 3, "type2"));
+    document.add(new ContextSuggestField("context_suggest_field", "suggestion3", 2, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new SuggestField("suggest_field", "suggestion4", 1));
+    document.add(new ContextSuggestField("context_suggest_field", "suggestion4", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+
+    CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 10);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", 4),
+        new Entry("suggestion2", 3),
+        new Entry("suggestion3", 2),
+        new Entry("suggestion4", 1));
+
+    query = new PrefixCompletionQuery(analyzer, new Term("context_suggest_field", "sugg"));
+    suggest = suggestIndexSearcher.suggest(query, 10);
+    assertSuggestions(suggest,
+        new Entry("suggestion1", "type1", 4),
+        new Entry("suggestion2", "type2", 3),
+        new Entry("suggestion3", "type3", 2),
+        new Entry("suggestion4", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java
new file mode 100644
index 0000000..a7f2700
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestFuzzyCompletionQuery.java
@@ -0,0 +1,153 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField;
+
+public class TestFuzzyCompletionQuery extends LuceneTestCase {
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testFuzzyQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new SuggestField("suggest_field", "suggestion", 2));
+    document.add(new SuggestField("suggest_field", "suaggestion", 4));
+    document.add(new SuggestField("suggest_field", "ssuggestion", 1));
+    iw.addDocument(document);
+    document = new Document();
+    document.add(new SuggestField("suggest_field", "sugfoo", 1));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4);
+    assertSuggestions(suggest,
+        new Entry("suaggestion", 4 * 2),
+        new Entry("suggestion", 2 * 3),
+        new Entry("sugfoo", 1 * 3),
+        new Entry("ssuggestion", 1 * 1)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testFuzzyContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "sduggestion", 1, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "sudggestion", 1, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "sugdgestion", 1, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggdestion", 1, "type4"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query =  new ContextQuery(new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge")));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("suggestion", "type4", 1 + 4),
+        new Entry("suggdestion", "type4", 1 + 4),
+        new Entry("sugdgestion", "type3", 1 + 3),
+        new Entry("sudggestion", "type2", 1 + 2),
+        new Entry("sduggestion", "type1", 1 + 1)
+    );
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testFuzzyFilteredContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "sduggestion", 1, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "sudggestion", 1, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "sugdgestion", 1, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggdestion", 1, "type4"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery fuzzyQuery = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "sugge"));
+    ContextQuery contextQuery = new ContextQuery(fuzzyQuery);
+    contextQuery.addContext("type1", 6);
+    contextQuery.addContext("type3", 2);
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(contextQuery, 5);
+    assertSuggestions(suggest,
+        new Entry("sduggestion", "type1", 1 * (1 + 6)),
+        new Entry("sugdgestion", "type3", 1 * (3 + 2))
+    );
+
+    reader.close();
+    iw.close();
+  }
+}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java
new file mode 100644
index 0000000..2dba6a0
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java
@@ -0,0 +1,300 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+public class TestPrefixCompletionQuery extends LuceneTestCase {
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testSimple() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new SuggestField("suggest_field", "abc", 3));
+    document.add(new SuggestField("suggest_field", "abd", 4));
+    document.add(new SuggestField("suggest_field", "The Foo Fighters", 2));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new SuggestField("suggest_field", "abcdd", 5));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"));
+    TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3);
+    assertSuggestions(lookupDocs, new Entry("abcdd", 5), new Entry("abd", 4), new Entry("abc", 3));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testMostlyFilteredOutDocuments() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, i));
+      document.add(new IntField("filter_int_fld", i, Field.Store.NO));
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+
+    int topScore = num/2;
+    QueryWrapperFilter filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 0, topScore, true, true));
+    Filter filter = randomAccessFilter(filterWrapper);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
+    // if at most half of the top scoring documents have been filtered out
+    // the search should be admissible for a single segment
+    TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+    assertTrue(suggest.totalHits >= 1);
+    assertThat(suggest.scoreLookupDocs()[0].key.toString(), equalTo("abc_" + topScore));
+    assertThat(suggest.scoreLookupDocs()[0].score, equalTo((float) topScore));
+
+    filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 0, 0, true, true));
+    filter = randomAccessFilter(filterWrapper);
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
+    // if more than half of the top scoring documents have been filtered out
+    // search is not admissible, so # of suggestions requested is num instead of 1
+    suggest = indexSearcher.suggest(query, num);
+    assertSuggestions(suggest, new Entry("abc_0", 0));
+
+    filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", num - 1, num - 1, true, true));
+    filter = randomAccessFilter(filterWrapper);
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
+    // if only lower scoring documents are filtered out
+    // search is admissible
+    suggest = indexSearcher.suggest(query, 1);
+    assertSuggestions(suggest, new Entry("abc_" + (num - 1), num - 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testDocFiltering() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+
+    Document document = new Document();
+    document.add(new IntField("filter_int_fld", 9, Field.Store.NO));
+    document.add(new SuggestField("suggest_field", "apples", 3));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new IntField("filter_int_fld", 10, Field.Store.NO));
+    document.add(new SuggestField("suggest_field", "applle", 4));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new IntField("filter_int_fld", 4, Field.Store.NO));
+    document.add(new SuggestField("suggest_field", "apple", 5));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+
+    // suggest without filter
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 3);
+    assertSuggestions(suggest, new Entry("apple", 5), new Entry("applle", 4), new Entry("apples", 3));
+
+    // suggest with filter
+    QueryWrapperFilter filterWrapper = new QueryWrapperFilter(NumericRangeQuery.newIntRange("filter_int_fld", 5, 12, true, true));
+    Filter filter = randomAccessFilter(filterWrapper);
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "app"), filter);
+    suggest = indexSearcher.suggest(query, 3);
+    assertSuggestions(suggest, new Entry("applle", 4), new Entry("apples", 3));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testAnalyzerWithoutPreservePosAndSep() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
+    CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, false, false);
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, "suggest_field_no_p_sep_or_pos_inc"));
+    Document document = new Document();
+    document.add(new SuggestField("suggest_field_no_p_sep_or_pos_inc", "foobar", 7));
+    document.add(new SuggestField("suggest_field_no_p_sep_or_pos_inc", "foo bar", 8));
+    document.add(new SuggestField("suggest_field_no_p_sep_or_pos_inc", "the fo", 9));
+    document.add(new SuggestField("suggest_field_no_p_sep_or_pos_inc", "the foo bar", 10));
+    iw.addDocument(document);
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_sep_or_pos_inc", "fo"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 4); // all 4
+    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_sep_or_pos_inc", "foob"));
+    suggest = indexSearcher.suggest(query, 4); // not the fo
+    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testAnalyzerWithSepAndNoPreservePos() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
+    CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, true, false);
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, "suggest_field_no_p_pos_inc"));
+    Document document = new Document();
+    document.add(new SuggestField("suggest_field_no_p_pos_inc", "foobar", 7));
+    document.add(new SuggestField("suggest_field_no_p_pos_inc", "foo bar", 8));
+    document.add(new SuggestField("suggest_field_no_p_pos_inc", "the fo", 9));
+    document.add(new SuggestField("suggest_field_no_p_pos_inc", "the foo bar", 10));
+    iw.addDocument(document);
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_pos_inc", "fo"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 4); //matches all 4
+    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_pos_inc", "foob"));
+    suggest = indexSearcher.suggest(query, 4); // only foobar
+    assertSuggestions(suggest, new Entry("foobar", 7));
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testAnalyzerWithPreservePosAndNoSep() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
+    CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, false, true);
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, "suggest_field_no_p_sep"));
+    Document document = new Document();
+    document.add(new SuggestField("suggest_field_no_p_sep", "foobar", 7));
+    document.add(new SuggestField("suggest_field_no_p_sep", "foo bar", 8));
+    document.add(new SuggestField("suggest_field_no_p_sep", "the fo", 9));
+    document.add(new SuggestField("suggest_field_no_p_sep", "the foo bar", 10));
+    iw.addDocument(document);
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_sep", "fo"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 4); // matches all 4
+    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
+    query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_no_p_sep", "foob"));
+    suggest = indexSearcher.suggest(query, 4); // except the fo
+    assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
+    reader.close();
+    iw.close();
+  }
+
+  private static class RandomAccessFilter extends Filter {
+    private final Filter in;
+
+    private RandomAccessFilter(Filter in) {
+      this.in = in;
+    }
+
+    @Override
+    public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
+      DocIdSet docIdSet = in.getDocIdSet(context, acceptDocs);
+      DocIdSetIterator iterator = docIdSet.iterator();
+      FixedBitSet bits = new FixedBitSet(context.reader().maxDoc());
+      if (iterator != null) {
+        bits.or(iterator);
+      }
+      return new BitDocIdSet(bits);
+    }
+
+    @Override
+    public String toString(String field) {
+      return in.toString(field);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (super.equals(obj) == false) {
+        return false;
+      }
+      return in.equals(((RandomAccessFilter) obj).in);
+    }
+
+    @Override
+    public int hashCode() {
+      return 31 * super.hashCode() + in.hashCode();
+    }
+  }
+
+  private static Filter randomAccessFilter(Filter filter) {
+    return new RandomAccessFilter(filter);
+  }
+
+}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java
new file mode 100644
index 0000000..f306912
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestRegexCompletionQuery.java
@@ -0,0 +1,151 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TestSuggestField.Entry;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.assertSuggestions;
+import static org.apache.lucene.search.suggest.document.TestSuggestField.iwcWithSuggestField;
+
+public class TestRegexCompletionQuery extends LuceneTestCase {
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testRegexQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new SuggestField("suggest_field", "suggestion", 1));
+    document.add(new SuggestField("suggest_field", "asuggestion", 2));
+    document.add(new SuggestField("suggest_field", "ssuggestion", 3));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new SuggestField("suggest_field", "wsuggestion", 4));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    RegexCompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|w|s]s?ugg"));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4);
+    assertSuggestions(suggest, new Entry("wsuggestion", 4), new Entry("ssuggestion", 3),
+        new Entry("asuggestion", 2), new Entry("suggestion", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testSimpleRegexContextQuery() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "sduggestion", 5, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "sudggestion", 4, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "sugdgestion", 3, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggdestion", 2, "type4"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|d|g]"));
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5);
+    assertSuggestions(suggest,
+        new Entry("sduggestion", "type1", 5),
+        new Entry("sudggestion", "type2", 4),
+        new Entry("sugdgestion", "type3", 3),
+        new Entry("suggdestion", "type4", 2),
+        new Entry("suggestion", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testRegexContextQueryWithBoost() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    Document document = new Document();
+
+    document.add(new ContextSuggestField("suggest_field", "sduggestion", 5, "type1"));
+    document.add(new ContextSuggestField("suggest_field", "sudggestion", 4, "type2"));
+    document.add(new ContextSuggestField("suggest_field", "sugdgestion", 3, "type3"));
+    iw.addDocument(document);
+
+    document = new Document();
+    document.add(new ContextSuggestField("suggest_field", "suggdestion", 2, "type4"));
+    document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type4"));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "[a|s][d|u|s][u|g]"));
+    ContextQuery contextQuery = new ContextQuery(query);
+    contextQuery.addContext("type1", 6);
+    contextQuery.addContext("type3", 7);
+    contextQuery.addContext("*");
+    TopSuggestDocs suggest = suggestIndexSearcher.suggest(contextQuery, 5);
+    assertSuggestions(suggest,
+        new Entry("sduggestion", "type1", 5 * 6),
+        new Entry("sugdgestion", "type3", 3 * 7),
+        new Entry("suggdestion", "type4", 2),
+        new Entry("suggestion", "type4", 1));
+
+    reader.close();
+    iw.close();
+  }
+}
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java
new file mode 100644
index 0000000..4e91528
--- /dev/null
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestSuggestField.java
@@ -0,0 +1,643 @@
+package org.apache.lucene.search.suggest.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CyclicBarrier;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRefBuilder;
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.search.suggest.document.TopSuggestDocs.SuggestScoreDoc;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+public class TestSuggestField extends LuceneTestCase {
+
+  public Directory dir;
+
+  @Before
+  public void before() throws Exception {
+    dir = newDirectory();
+  }
+
+  @After
+  public void after() throws Exception {
+    dir.close();
+  }
+
+  @Test
+  public void testEmptySuggestion() throws Exception {
+    try {
+      new SuggestField("suggest_field", "", 3);
+      fail("no exception thrown when indexing zero length suggestion");
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("value"));
+    }
+  }
+
+  @Test
+  public void testNegativeWeight() throws Exception {
+    try {
+      new SuggestField("suggest_field", "sugg", -1);
+      fail("no exception thrown when indexing suggestion with negative weight");
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("weight"));
+    }
+  }
+
+  @Test
+  public void testReservedChars() throws Exception {
+    CharsRefBuilder charsRefBuilder = new CharsRefBuilder();
+    charsRefBuilder.append("sugg");
+    charsRefBuilder.setCharAt(2, (char) CompletionAnalyzer.SEP_LABEL);
+    try {
+      new SuggestField("name", charsRefBuilder.toString(), 1);
+      fail("no exception thrown for suggestion value containing SEP_LABEL:" + CompletionAnalyzer.SEP_LABEL);
+    } catch (IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("[0x1f]"));
+    }
+
+    charsRefBuilder.setCharAt(2, (char) CompletionAnalyzer.HOLE_CHARACTER);
+    try {
+      new SuggestField("name", charsRefBuilder.toString(), 1);
+      fail("no exception thrown for suggestion value containing HOLE_CHARACTER:" + CompletionAnalyzer.HOLE_CHARACTER);
+    } catch (IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("[0x1e]"));
+    }
+
+    charsRefBuilder.setCharAt(2, (char) NRTSuggesterBuilder.END_BYTE);
+    try {
+      new SuggestField("name", charsRefBuilder.toString(), 1);
+      fail("no exception thrown for suggestion value containing END_BYTE:" + NRTSuggesterBuilder.END_BYTE);
+    } catch (IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("[0x0]"));
+    }
+  }
+
+  @Test
+  public void testEmpty() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"));
+    TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3);
+    assertThat(lookupDocs.totalHits, equalTo(0));
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testDupSuggestFieldValues() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    final int num = Math.min(1000, atLeast(300));
+    int[] weights = new int[num];
+    for(int i = 0; i < num; i++) {
+      Document document = new Document();
+      weights[i] = Math.abs(random().nextInt());
+      document.add(new SuggestField("suggest_field", "abc", weights[i]));
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    Entry[] expectedEntries = new Entry[num];
+    Arrays.sort(weights);
+    for (int i = 1; i <= num; i++) {
+      expectedEntries[i - 1] = new Entry("abc", weights[num - i]);
+    }
+
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc"));
+    TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, num);
+    assertSuggestions(lookupDocs, expectedEntries);
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testNRTDeletedDocFiltering() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    // using IndexWriter instead of RandomIndexWriter
+    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
+
+    int num = Math.min(1000, atLeast(10));
+
+    int numLive = 0;
+    List<Entry> expectedEntries = new ArrayList<>();
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, num - i));
+      if (i % 2 == 0) {
+        document.add(newStringField("str_field", "delete", Field.Store.YES));
+      } else {
+        numLive++;
+        expectedEntries.add(new Entry("abc_" + i, num - i));
+        document.add(newStringField("str_field", "no_delete", Field.Store.YES));
+      }
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    iw.deleteDocuments(new Term("str_field", "delete"));
+
+    DirectoryReader reader = DirectoryReader.open(iw, true);
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, numLive);
+    assertSuggestions(suggest, expectedEntries.toArray(new Entry[expectedEntries.size()]));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testSuggestOnAllFilteredDocuments() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, i));
+      document.add(newStringField("str_fld", "deleted", Field.Store.NO));
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    Filter filter = new QueryWrapperFilter(new TermsQuery("str_fld", new BytesRef("non_existent")));
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    // no random access required;
+    // calling suggest with filter that does not match any documents should early terminate
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
+    TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+    assertThat(suggest.totalHits, equalTo(0));
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testSuggestOnAllDeletedDocuments() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    // using IndexWriter instead of RandomIndexWriter
+    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, i));
+      document.add(newStringField("delete", "delete", Field.Store.NO));
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    iw.deleteDocuments(new Term("delete", "delete"));
+
+    DirectoryReader reader = DirectoryReader.open(iw, true);
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+    assertThat(suggest.totalHits, equalTo(0));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testSuggestOnMostlyDeletedDocuments() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    // using IndexWriter instead of RandomIndexWriter
+    IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+    for (int i = 1; i <= num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, i));
+      document.add(new IntField("weight_fld", i, Field.Store.YES));
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    iw.deleteDocuments(NumericRangeQuery.newIntRange("weight_fld", 2, null, true, false));
+
+    DirectoryReader reader = DirectoryReader.open(iw, true);
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 1);
+    assertSuggestions(suggest, new Entry("abc_1", 1));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testMultipleSuggestFieldsPerDoc() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "sug_field_1", "sug_field_2"));
+
+    Document document = new Document();
+    document.add(new SuggestField("sug_field_1", "apple", 4));
+    document.add(new SuggestField("sug_field_2", "april", 3));
+    iw.addDocument(document);
+    document = new Document();
+    document.add(new SuggestField("sug_field_1", "aples", 3));
+    document.add(new SuggestField("sug_field_2", "apartment", 2));
+    iw.addDocument(document);
+
+    if (rarely()) {
+      iw.commit();
+    }
+
+    DirectoryReader reader = iw.getReader();
+
+    SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("sug_field_1", "ap"));
+    TopSuggestDocs suggestDocs1 = suggestIndexSearcher.suggest(query, 4);
+    assertSuggestions(suggestDocs1, new Entry("apple", 4), new Entry("aples", 3));
+    query = new PrefixCompletionQuery(analyzer, new Term("sug_field_2", "ap"));
+    TopSuggestDocs suggestDocs2 = suggestIndexSearcher.suggest(query, 4);
+    assertSuggestions(suggestDocs2, new Entry("april", 3), new Entry("apartment", 2));
+
+    // check that the doc ids are consistent
+    for (int i = 0; i < suggestDocs1.scoreDocs.length; i++) {
+      ScoreDoc suggestScoreDoc = suggestDocs1.scoreDocs[i];
+      assertThat(suggestScoreDoc.doc, equalTo(suggestDocs2.scoreDocs[i].doc));
+    }
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testEarlyTermination() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+
+    // have segments of 4 documents
+    // with descending suggestion weights
+    // suggest should early terminate for
+    // segments with docs having lower suggestion weights
+    for (int i = num; i > 0; i--) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, i));
+      iw.addDocument(document);
+      if (i % 4 == 0) {
+        iw.commit();
+      }
+    }
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, 1);
+    assertSuggestions(suggest, new Entry("abc_" + num, num));
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testMultipleSegments() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    int num = Math.min(1000, atLeast(10));
+    List<Entry> entries = new ArrayList<>();
+
+    // ensure at least some segments have no suggest field
+    for (int i = num; i > 0; i--) {
+      Document document = new Document();
+      if (random().nextInt(4) == 1) {
+        document.add(new SuggestField("suggest_field", "abc_" + i, i));
+        entries.add(new Entry("abc_" + i, i));
+      }
+      document.add(new IntField("weight_fld", i, Field.Store.YES));
+      iw.addDocument(document);
+      if (usually()) {
+        iw.commit();
+      }
+    }
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, (entries.size() == 0) ? 1 : entries.size());
+    assertSuggestions(suggest, entries.toArray(new Entry[entries.size()]));
+
+    reader.close();
+    iw.close();
+  }
+
+
+  @Test
+  public void testReturnedDocID() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+
+    int num = Math.min(1000, atLeast(10));
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      document.add(new SuggestField("suggest_field", "abc_" + i, num));
+      document.add(new IntField("int_field", i, Field.Store.YES));
+      iw.addDocument(document);
+
+      if (random().nextBoolean()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
+    TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+    assertEquals(num, suggest.totalHits);
+    for (SuggestScoreDoc suggestScoreDoc : suggest.scoreLookupDocs()) {
+      String key = suggestScoreDoc.key.toString();
+      assertTrue(key.startsWith("abc_"));
+      String substring = key.substring(4);
+      int fieldValue = Integer.parseInt(substring);
+      StoredDocument doc = reader.document(suggestScoreDoc.doc);
+      assertEquals(doc.getField("int_field").numericValue().intValue(), fieldValue);
+    }
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testScoring() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+
+    int num = Math.min(1000, atLeast(100));
+    String[] prefixes = {"abc", "bac", "cab"};
+    Map<String, Integer> mappings = new HashMap<>();
+    for (int i = 0; i < num; i++) {
+      Document document = new Document();
+      String suggest = prefixes[i % 3] + TestUtil.randomSimpleString(random(), 10) + "_" +String.valueOf(i);
+      int weight = Math.abs(random().nextInt());
+      document.add(new SuggestField("suggest_field", suggest, weight));
+      mappings.put(suggest, weight);
+      iw.addDocument(document);
+
+      if (usually()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    for (String prefix : prefixes) {
+      PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", prefix));
+      TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+      assertTrue(suggest.totalHits > 0);
+      float topScore = -1;
+      for (SuggestScoreDoc scoreDoc : suggest.scoreLookupDocs()) {
+        if (topScore != -1) {
+          assertTrue(topScore >= scoreDoc.score);
+        }
+        topScore = scoreDoc.score;
+        assertThat((float) mappings.get(scoreDoc.key.toString()), equalTo(scoreDoc.score));
+        assertNotNull(mappings.remove(scoreDoc.key.toString()));
+      }
+    }
+
+    assertThat(mappings.size(), equalTo(0));
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testRealisticKeys() throws Exception {
+    Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
+    LineFileDocs lineFileDocs = new LineFileDocs(random());
+    int num = Math.min(1000, atLeast(100));
+    Map<String, Integer> mappings = new HashMap<>();
+    for (int i = 0; i < num; i++) {
+      Document document = lineFileDocs.nextDoc();
+      String title = document.getField("title").stringValue();
+      int weight = Math.abs(random().nextInt());
+      Integer prevWeight = mappings.get(title);
+      if (prevWeight == null || prevWeight < weight) {
+        mappings.put(title, weight);
+      }
+      Document doc = new Document();
+      doc.add(new SuggestField("suggest_field", title, weight));
+      iw.addDocument(doc);
+
+      if (rarely()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+
+    for (Map.Entry<String, Integer> entry : mappings.entrySet()) {
+      String title = entry.getKey();
+
+      PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", title));
+      TopSuggestDocs suggest = indexSearcher.suggest(query, mappings.size());
+      assertTrue(suggest.totalHits > 0);
+      boolean matched = false;
+      for (ScoreDoc scoreDoc : suggest.scoreDocs) {
+        matched = Float.compare(scoreDoc.score, (float) entry.getValue()) == 0;
+        if (matched) {
+          break;
+        }
+      }
+      assertTrue("at least one of the entries should have the score", matched);
+    }
+
+    reader.close();
+    iw.close();
+  }
+
+  @Test
+  public void testThreads() throws Exception {
+    final Analyzer analyzer = new MockAnalyzer(random());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field_1", "suggest_field_2", "suggest_field_3"));
+    int num = Math.min(1000, atLeast(100));
+    final String prefix1 = "abc1_";
+    final String prefix2 = "abc2_";
+    final String prefix3 = "abc3_";
+    final Entry[] entries1 = new Entry[num];
+    final Entry[] entries2 = new Entry[num];
+    final Entry[] entries3 = new Entry[num];
+    for (int i = 0; i < num; i++) {
+      int weight = num - (i + 1);
+      entries1[i] = new Entry(prefix1 + weight, weight);
+      entries2[i] = new Entry(prefix2 + weight, weight);
+      entries3[i] = new Entry(prefix3 + weight, weight);
+    }
+    for (int i = 0; i < num; i++) {
+      Document doc = new Document();
+      doc.add(new SuggestField("suggest_field_1", prefix1 + i, i));
+      doc.add(new SuggestField("suggest_field_2", prefix2 + i, i));
+      doc.add(new SuggestField("suggest_field_3", prefix3 + i, i));
+      iw.addDocument(doc);
+
+      if (rarely()) {
+        iw.commit();
+      }
+    }
+
+    DirectoryReader reader = iw.getReader();
+    int numThreads = TestUtil.nextInt(random(), 2, 7);
+    Thread threads[] = new Thread[numThreads];
+    final CyclicBarrier startingGun = new CyclicBarrier(numThreads+1);
+    final CopyOnWriteArrayList<Throwable> errors = new CopyOnWriteArrayList<>();
+    final SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            startingGun.await();
+            PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_1", prefix1));
+            TopSuggestDocs suggest = indexSearcher.suggest(query, num);
+            assertSuggestions(suggest, entries1);
+            query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_2", prefix2));
+            suggest = indexSearcher.suggest(query, num);
+            assertSuggestions(suggest, entries2);
+            query = new PrefixCompletionQuery(analyzer, new Term("suggest_field_3", prefix3));
+            suggest = indexSearcher.suggest(query, num);
+            assertSuggestions(suggest, entries3);
+          } catch (Throwable e) {
+            errors.add(e);
+          }
+        }
+      };
+      threads[i].start();
+    }
+
+    startingGun.await();
+    for (Thread t : threads) {
+      t.join();
+    }
+    assertTrue(errors.toString(), errors.isEmpty());
+
+    reader.close();
+    iw.close();
+  }
+
+  static class Entry {
+    final String output;
+    final float value;
+    final String context;
+
+    Entry(String output, float value) {
+      this(output, null, value);
+    }
+
+    Entry(String output, String context, float value) {
+      this.output = output;
+      this.value = value;
+      this.context = context;
+    }
+  }
+
+  static void assertSuggestions(TopDocs actual, Entry... expected) {
+    SuggestScoreDoc[] suggestScoreDocs = (SuggestScoreDoc[]) actual.scoreDocs;
+    assertThat(suggestScoreDocs.length, equalTo(expected.length));
+    for (int i = 0; i < suggestScoreDocs.length; i++) {
+      SuggestScoreDoc lookupDoc = suggestScoreDocs[i];
+      String msg = "Expected: " + toString(expected[i]) + " Actual: " + toString(lookupDoc);
+      assertThat(msg, lookupDoc.key.toString(), equalTo(expected[i].output));
+      assertThat(msg, lookupDoc.score, equalTo(expected[i].value));
+      assertThat(msg, lookupDoc.context, equalTo(expected[i].context));
+    }
+  }
+
+  private static String toString(Entry expected) {
+    return "key:"+ expected.output+" score:"+expected.value+" context:"+expected.context;
+  }
+
+  private static String toString(SuggestScoreDoc actual) {
+    return "key:"+ actual.key.toString()+" score:"+actual.score+" context:"+actual.context;
+  }
+
+  static IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, String... suggestFields) {
+    return iwcWithSuggestField(analyzer, asSet(suggestFields));
+  }
+
+  static IndexWriterConfig iwcWithSuggestField(Analyzer analyzer, final Set<String> suggestFields) {
+    IndexWriterConfig iwc = newIndexWriterConfig(random(), analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    Codec filterCodec = new Lucene50Codec() {
+      PostingsFormat postingsFormat = new Completion50PostingsFormat();
+
+      @Override
+      public PostingsFormat getPostingsFormatForField(String field) {
+        if (suggestFields.contains(field)) {
+          return postingsFormat;
+        }
+        return super.getPostingsFormatForField(field);
+      }
+    };
+    iwc.setCodec(filterCodec);
+    return iwc;
+  }
+}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
index 34e5adc..34d14dc 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompoundFormatTestCase.java
@@ -320,7 +320,7 @@
     si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT);
     Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT);
     try {
-      cfs.makeLock("foobar");
+      cfs.obtainLock("foobar");
       fail("didn't get expected exception");
     } catch (UnsupportedOperationException expected) {
       // expected UOE
diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java
index 068a299..875b378 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/MockFileSystemTestCase.java
@@ -25,6 +25,7 @@
 import java.nio.file.Files;
 import java.nio.file.Path;
 
+import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
 
@@ -57,6 +58,7 @@
   
   /** Test that URIs are not corrumpted */
   public void testURI() throws IOException {
+    assumeFalse("broken on J9: see https://issues.apache.org/jira/browse/LUCENE-6517", Constants.JAVA_VENDOR.startsWith("IBM"));
     Path dir = wrap(createTempDir());
 
     Path f1 = dir.resolve("file1");
diff --git a/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java b/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java
index 1b732eb..a4cfe2d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/mockfile/WindowsFS.java
@@ -17,15 +17,19 @@
  * limitations under the License.
  */
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.nio.file.CopyOption;
 import java.nio.file.FileSystem;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
 import java.nio.file.attribute.BasicFileAttributeView;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 /** 
  * FileSystem that (imperfectly) acts like windows. 
@@ -33,8 +37,7 @@
  * Currently this filesystem only prevents deletion of open files.
  */
 public class WindowsFS extends HandleTrackingFS {
-  private final Map<Object,Integer> openFiles = new HashMap<>();
-  
+  final Map<Object,Integer> openFiles = new HashMap<>();
   // TODO: try to make this as realistic as possible... it depends e.g. how you
   // open files, if you map them, etc, if you can delete them (Uwe knows the rules)
   
@@ -60,8 +63,10 @@
 
   @Override
   protected void onOpen(Path path, Object stream) throws IOException {
-    Object key = getKey(path);
     synchronized (openFiles) {
+      final Object key = getKey(path);
+      // we have to read the key under the lock otherwise me might leak the openFile handle
+      // if we concurrently delete or move this file.
       Integer v = openFiles.get(key);
       if (v != null) {
         v = Integer.valueOf(v.intValue()+1);
@@ -74,9 +79,10 @@
 
   @Override
   protected void onClose(Path path, Object stream) throws IOException {
-    Object key = getKey(path);
+    Object key = getKey(path); // here we can read this outside of the lock
     synchronized (openFiles) {
       Integer v = openFiles.get(key);
+      assert v != null;
       if (v != null) {
         if (v.intValue() == 1) {
           openFiles.remove(key);
@@ -111,19 +117,25 @@
 
   @Override
   public void delete(Path path) throws IOException {
-    checkDeleteAccess(path);
-    super.delete(path);
+    synchronized (openFiles) {
+      checkDeleteAccess(path);
+      super.delete(path);
+    }
   }
 
   @Override
   public void move(Path source, Path target, CopyOption... options) throws IOException {
-    checkDeleteAccess(source);
-    super.move(source, target, options);
+    synchronized (openFiles) {
+      checkDeleteAccess(source);
+      super.move(source, target, options);
+    }
   }
 
   @Override
   public boolean deleteIfExists(Path path) throws IOException {
-    checkDeleteAccess(path);
-    return super.deleteIfExists(path);
+    synchronized (openFiles) {
+      checkDeleteAccess(path);
+      return super.deleteIfExists(path);
+    }
   }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java
index 2a30a17..cbb8264 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanQuery.java
@@ -44,7 +44,7 @@
   @Override
   public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, SpanCollectorFactory factory) throws IOException {
     SpanWeight weight = in.createWeight(searcher, needsScores, factory);
-    return new AssertingSpanWeight(weight);
+    return new AssertingSpanWeight(searcher, weight);
   }
 
   @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java
index d685832..ec9c152 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/spans/AssertingSpanWeight.java
@@ -20,6 +20,9 @@
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
@@ -38,8 +41,8 @@
    * @param in the SpanWeight to wrap
    * @throws IOException on error
    */
-  public AssertingSpanWeight(SpanWeight in) throws IOException {
-    super((SpanQuery) in.getQuery(), in.similarity, in.collectorFactory);
+  public AssertingSpanWeight(IndexSearcher searcher, SpanWeight in) throws IOException {
+    super((SpanQuery) in.getQuery(), searcher, null, in.collectorFactory);
     this.in = in;
   }
 
@@ -60,4 +63,24 @@
   public void extractTerms(Set<Term> terms) {
     in.extractTerms(terms);
   }
+
+  @Override
+  public float getValueForNormalization() throws IOException {
+    return in.getValueForNormalization();
+  }
+
+  @Override
+  public void normalize(float queryNorm, float topLevelBoost) {
+    in.normalize(queryNorm, topLevelBoost);
+  }
+
+  @Override
+  public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+    return in.scorer(context, acceptDocs);
+  }
+
+  @Override
+  public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+    return in.explain(context, doc);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
new file mode 100644
index 0000000..edda9d5
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
@@ -0,0 +1,275 @@
+package org.apache.lucene.store;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.LuceneTestCase;
+
+/** Base class for per-LockFactory tests. */
+public abstract class BaseLockFactoryTestCase extends LuceneTestCase {
+  
+  /** Subclass returns the Directory to be tested; if it's
+   *  an FS-based directory it should point to the specified
+   *  path, else it can ignore it. */
+  protected abstract Directory getDirectory(Path path) throws IOException;
+  
+  /** Test obtaining and releasing locks, checking validity */
+  public void testBasics() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    
+    Lock l = dir.obtainLock("commit");
+    try {
+      dir.obtainLock("commit");
+      fail("succeeded in obtaining lock twice, didn't get exception");
+    } catch (LockObtainFailedException expected) {}
+    l.close();
+    
+    // Make sure we can obtain first one again:
+    l = dir.obtainLock("commit");
+    l.close();
+    
+    dir.close();
+  }
+  
+  /** Test closing locks twice */
+  public void testDoubleClose() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    
+    Lock l = dir.obtainLock("commit");
+    l.close();
+    l.close(); // close again, should be no exception
+    
+    dir.close();
+  }
+  
+  /** Test ensureValid returns true after acquire */
+  public void testValidAfterAcquire() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+
+    Lock l = dir.obtainLock("commit");
+    l.ensureValid(); // no exception
+    l.close();
+    
+    dir.close();
+  }
+  
+  /** Test ensureValid throws exception after close */
+  public void testInvalidAfterClose() throws IOException {
+    Directory dir = getDirectory(createTempDir());
+    
+    Lock l = dir.obtainLock("commit");
+    l.close();
+
+    try {
+      l.ensureValid();
+      fail("didn't get exception");
+    } catch (AlreadyClosedException expected) {}
+    
+    dir.close();
+  }
+  
+  public void testObtainConcurrently() throws InterruptedException, IOException {
+    final Directory directory = getDirectory(createTempDir());
+    final AtomicBoolean running = new AtomicBoolean(true);
+    final AtomicInteger atomicCounter = new AtomicInteger(0);
+    final ReentrantLock assertingLock = new ReentrantLock();
+    int numThreads = 2 + random().nextInt(10);
+    final int runs = atLeast(10000);
+    CyclicBarrier barrier = new CyclicBarrier(numThreads);
+    Thread[] threads = new Thread[numThreads];
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            barrier.await();
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+          while (running.get()) {
+            try (Lock lock = directory.obtainLock("foo.lock")) {
+              assertFalse(assertingLock.isLocked());
+              if (assertingLock.tryLock()) {
+                assertingLock.unlock();
+              } else {
+                fail();
+              }
+              assert lock != null; // stupid compiler
+            } catch (IOException ex) {
+              //
+            }
+            if (atomicCounter.incrementAndGet() > runs) {
+              running.set(false);
+            }
+          }
+        }
+      };
+      threads[i].start();
+    }
+    
+    for (int i = 0; i < threads.length; i++) {
+      threads[i].join();
+    }
+    directory.close();
+  }
+  
+  // Verify: do stress test, by opening IndexReaders and
+  // IndexWriters over & over in 2 threads and making sure
+  // no unexpected exceptions are raised:
+  public void testStressLocks() throws Exception {
+    Directory dir = getDirectory(createTempDir());
+    
+    // First create a 1 doc index:
+    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
+    addDoc(w);
+    w.close();
+    
+    WriterThread writer = new WriterThread(100, dir);
+    SearcherThread searcher = new SearcherThread(100, dir);
+    writer.start();
+    searcher.start();
+    
+    while(writer.isAlive() || searcher.isAlive()) {
+      Thread.sleep(1000);
+    }
+    
+    assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException);
+    assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException);
+    
+    dir.close();
+  }
+  
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document doc = new Document();
+    doc.add(newTextField("content", "aaa", Field.Store.NO));
+    writer.addDocument(doc);
+  }
+  
+  private class WriterThread extends Thread { 
+    private Directory dir;
+    private int numIteration;
+    public boolean hitException = false;
+    public WriterThread(int numIteration, Directory dir) {
+      this.numIteration = numIteration;
+      this.dir = dir;
+    }
+    @Override
+    public void run() {
+      IndexWriter writer = null;
+      for(int i=0;i<this.numIteration;i++) {
+        try {
+          writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
+        } catch (LockObtainFailedException e) {
+          // lock obtain timed out
+          // NOTE: we should at some point
+          // consider this a failure?  The lock
+          // obtains, across IndexReader &
+          // IndexWriters should be "fair" (ie
+          // FIFO).
+        } catch (Exception e) {
+          hitException = true;
+          System.out.println("Stress Test Index Writer: creation hit unexpected exception: " + e.toString());
+          e.printStackTrace(System.out);
+          break;
+        }
+        if (writer != null) {
+          try {
+            addDoc(writer);
+          } catch (IOException e) {
+            hitException = true;
+            System.out.println("Stress Test Index Writer: addDoc hit unexpected exception: " + e.toString());
+            e.printStackTrace(System.out);
+            break;
+          }
+          try {
+            writer.close();
+          } catch (IOException e) {
+            hitException = true;
+            System.out.println("Stress Test Index Writer: close hit unexpected exception: " + e.toString());
+            e.printStackTrace(System.out);
+            break;
+          }
+          writer = null;
+        }
+      }
+    }
+  }
+  
+  private class SearcherThread extends Thread { 
+    private Directory dir;
+    private int numIteration;
+    public boolean hitException = false;
+    public SearcherThread(int numIteration, Directory dir) {
+      this.numIteration = numIteration;
+      this.dir = dir;
+    }
+    @Override
+    public void run() {
+      IndexReader reader = null;
+      IndexSearcher searcher = null;
+      Query query = new TermQuery(new Term("content", "aaa"));
+      for(int i=0;i<this.numIteration;i++) {
+        try{
+          reader = DirectoryReader.open(dir);
+          searcher = newSearcher(reader);
+        } catch (Exception e) {
+          hitException = true;
+          System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
+          e.printStackTrace(System.out);
+          break;
+        }
+        try {
+          searcher.search(query, 1000);
+        } catch (IOException e) {
+          hitException = true;
+          System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
+          e.printStackTrace(System.out);
+          break;
+        }
+        // System.out.println(hits.length() + " total results");
+        try {
+          reader.close();
+        } catch (IOException e) {
+          hitException = true;
+          System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
+          e.printStackTrace(System.out);
+          break;
+        }
+      }
+    }
+  }
+  
+}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
index 431b870..8018f16 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -34,6 +34,8 @@
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.index.DirectoryReader;
@@ -74,7 +76,6 @@
   boolean assertNoDeleteOpenFile = false;
   boolean preventDoubleWrite = true;
   boolean trackDiskUsage = false;
-  boolean wrapLocking = true;
   boolean useSlowOpenClosers = LuceneTestCase.TEST_NIGHTLY;
   boolean enableVirusScanner = true;
   boolean allowRandomFileNotFoundException = true;
@@ -82,7 +83,7 @@
   private Set<String> unSyncedFiles;
   private Set<String> createdFiles;
   private Set<String> openFilesForWrite = new HashSet<>();
-  Map<String,Exception> openLocks = Collections.synchronizedMap(new HashMap<String,Exception>());
+  ConcurrentMap<String,RuntimeException> openLocks = new ConcurrentHashMap<>();
   volatile boolean crashed;
   private ThrottledIndexOutput throttledOutput;
   private Throttling throttling = LuceneTestCase.TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER;
@@ -699,19 +700,6 @@
   public void setAssertNoUnrefencedFilesOnClose(boolean v) {
     assertNoUnreferencedFilesOnClose = v;
   }
-  
-  /**
-   * Set to false if you want to return the pure {@link LockFactory} and not
-   * wrap all lock with {@code AssertingLock}.
-   * <p>
-   * Be careful if you turn this off: {@code MockDirectoryWrapper} might
-   * no longer be able to detect if you forget to close an {@link IndexWriter},
-   * and spit out horribly scary confusing exceptions instead of
-   * simply telling you that.
-   */
-  public void setAssertLocks(boolean v) {
-    this.wrapLocking = v;
-  }
 
   @Override
   public synchronized void close() throws IOException {
@@ -748,7 +736,7 @@
       }
       if (openLocks.size() > 0) {
         Exception cause = null;
-        Iterator<Exception> stacktraces = openLocks.values().iterator();
+        Iterator<RuntimeException> stacktraces = openLocks.values().iterator();
         if (stacktraces.hasNext()) {
           cause = stacktraces.next();
         }
@@ -992,47 +980,12 @@
   }
 
   @Override
-  public synchronized Lock makeLock(String name) {
+  public synchronized Lock obtainLock(String name) throws IOException {
     maybeYield();
-    if (wrapLocking) {
-      return new AssertingLock(super.makeLock(name), name);
-    } else {
-      return super.makeLock(name);
-    }
+    return super.obtainLock(name);
+    // TODO: consider mocking locks, but not all the time, can hide bugs
   }
   
-  private final class AssertingLock extends Lock {
-    private final Lock delegateLock;
-    private final String name;
-    
-    AssertingLock(Lock delegate, String name) {
-      this.delegateLock = delegate;
-      this.name = name;
-    }
-
-    @Override
-    public boolean obtain() throws IOException {
-      if (delegateLock.obtain()) {
-        assert delegateLock == NoLockFactory.SINGLETON_LOCK || !openLocks.containsKey(name);
-        openLocks.put(name, new RuntimeException("lock \"" + name + "\" was not released"));
-        return true;
-      } else {
-        return false;
-      }
-    }
-
-    @Override
-    public void close() throws IOException {
-      delegateLock.close();
-      openLocks.remove(name);
-    }
-
-    @Override
-    public boolean isLocked() throws IOException {
-      return delegateLock.isLocked();
-    }
-  }  
-  
   /** Use this when throwing fake {@code IOException},
    *  e.g. from {@link MockDirectoryWrapper.Failure}. */
   public static class FakeIOException extends IOException {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index f2280e4..51306db 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -489,7 +489,7 @@
   /**
    * Suite failure marker (any error in the test or suite scope).
    */
-  private static TestRuleMarkFailure suiteFailureMarker;
+  protected static TestRuleMarkFailure suiteFailureMarker;
   
   /**
    * Temporary files cleanup rule.
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java b/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java
index f2924c2..8b78ff0 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/QuickPatchThreadsFilter.java
@@ -26,15 +26,21 @@
   static final boolean isJ9;
   
   static {
-    isJ9 = System.getProperty("java.vm.info", "<?>").contains("IBM J9");
+    isJ9 = Constants.JAVA_VENDOR.startsWith("IBM");
   }
 
   @Override
   public boolean reject(Thread t) {
     if (isJ9) {
+      // LUCENE-6518
+      if ("ClassCache Reaper".equals(t.getName())) {
+        return true;
+      }
+
+      // LUCENE-4736
       StackTraceElement [] stack = t.getStackTrace();
       if (stack.length > 0 && stack[stack.length - 1].getClassName().equals("java.util.Timer$TimerImpl")) {
-        return true; // LUCENE-4736
+        return true;
       }
     }
     return false;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index c789bd3..a66c6be 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -271,7 +271,7 @@
     ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
     // TODO: actually use the dir's locking, unless test uses a special method?
     // some tests e.g. exception tests become much more complicated if they have to close the writer
-    try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.makeLock(dir, "bogus"))) {
+    try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.obtainLock(dir, "bogus"))) {
       checker.setCrossCheckTermVectors(crossCheckTermVectors);
       checker.setFailFast(failFast);
       checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
diff --git a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java
index 47e4311..333ff00 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/mockfile/TestWindowsFS.java
@@ -17,15 +17,25 @@
  * limitations under the License.
  */
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.lang.Exception;
+import java.lang.InterruptedException;
+import java.lang.NoSuchFieldException;
+import java.lang.RuntimeException;
 import java.net.URI;
 import java.nio.file.FileSystem;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
 import java.nio.file.StandardCopyOption;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.lucene.mockfile.FilterPath;
+import org.apache.lucene.mockfile.WindowsFS;
 import org.apache.lucene.util.Constants;
 
 /** Basic tests for WindowsFS */
@@ -95,4 +105,57 @@
     }
     is.close();
   }
+
+  public void testOpenDeleteConcurrently() throws IOException, Exception {
+    final Path dir = wrap(createTempDir());
+    final Path file = dir.resolve("thefile");
+    final CyclicBarrier barrier = new CyclicBarrier(2);
+    final AtomicBoolean stopped = new AtomicBoolean(false);
+    Thread t = new Thread() {
+      @Override
+      public void run() {
+        try {
+          barrier.await();
+        } catch (Exception ex) {
+          throw new RuntimeException(ex);
+        }
+        while (stopped.get() == false) {
+          try {
+            if (random().nextBoolean()) {
+              Files.delete(file);
+            } else if (random().nextBoolean()) {
+              Files.deleteIfExists(file);
+            } else {
+              Path target = file.resolveSibling("other");
+              Files.move(file, target);
+              Files.delete(target);
+            }
+          } catch (IOException ex) {
+            // continue
+          }
+        }
+      }
+    };
+    t.start();
+    barrier.await();
+    try {
+      final int iters = 10 + random().nextInt(100);
+      for (int i = 0; i < iters; i++) {
+        boolean opened = false;
+        try (OutputStream stream = Files.newOutputStream(file)) {
+          opened = true;
+          stream.write(0);
+          // just create
+        } catch (FileNotFoundException | NoSuchFileException ex) {
+          assertEquals("File handle leaked - file is closed but still registered", 0, ((WindowsFS) dir.getFileSystem().provider()).openFiles.size());
+          assertFalse("caught FNF on close", opened);
+        }
+        assertEquals("File handle leaked - file is closed but still registered", 0, ((WindowsFS) dir.getFileSystem().provider()).openFiles.size());
+        Files.deleteIfExists(file);
+      }
+    } finally {
+      stopped.set(true);
+      t.join();
+    }
+  }
 }
diff --git a/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java b/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java
index 3602e01..fd30582 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/store/TestMockDirectoryWrapper.java
@@ -47,32 +47,6 @@
     super.testThreadSafety();
   }
   
-  public void testFailIfIndexWriterNotClosed() throws IOException {
-    MockDirectoryWrapper dir = newMockDirectory();
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    try {
-      dir.close();
-      fail();
-    } catch (Exception expected) {
-      assertTrue(expected.getMessage().contains("there are still open locks"));
-    } finally {
-      IOUtils.closeWhileHandlingException(iw);
-    }
-  }
-  
-  public void testFailIfIndexWriterNotClosedChangeLockFactory() throws IOException {
-    MockDirectoryWrapper dir = newMockDirectory(random(), new SingleInstanceLockFactory());
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
-    try {
-      dir.close();
-      fail();
-    } catch (Exception expected) {
-      assertTrue(expected.getMessage().contains("there are still open locks"));
-    } finally {
-      IOUtils.closeWhileHandlingException(iw);
-    }
-  }
-  
   public void testDiskFull() throws IOException {
     // test writeBytes
     MockDirectoryWrapper dir = newMockDirectory();
diff --git a/lucene/tools/junit4/tests.policy b/lucene/tools/junit4/tests.policy
index d4e0d6c..3bd0845 100644
--- a/lucene/tools/junit4/tests.policy
+++ b/lucene/tools/junit4/tests.policy
@@ -73,6 +73,8 @@
   permission java.lang.RuntimePermission "getClassLoader";
   // needed to test unmap hack on platforms that support it
   permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
+  // needed by cyberneko usage by benchmarks on J9
+  permission java.lang.RuntimePermission "accessClassInPackage.org.apache.xerces.util";
   // needed by jacoco to dump coverage
   permission java.lang.RuntimePermission "shutdownHooks";
   
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 869d961..986ab0b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -72,6 +72,16 @@
 * SolrJ's CollectionAdminRequest class is now marked as abstract. Use one of its concrete
   sub-classes instead.
 
+* Solr no longer supports forcefully unlocking an index.
+  This is no longer supported by the underlying Lucene locking
+  framework. The setting in solrconfig.xml has no effect anymore.
+  Background: If you use native lock factory, unlocking should
+  not be needed, because the locks are cleared after process
+  shutdown automatically by the operating system. If you are
+  using simple lock factory (not recommended) or hdfs lock
+  factory, you may need to manually unlock by deleting the lock
+  file from filesystem / HDFS.
+
 Detailed Change List
 ----------------------
 
@@ -84,9 +94,26 @@
 * SOLR-7389: Expose znodeVersion property for each of the collections returned for the clusterstatus
   operation in the collections API (Marius Grama via shalin)
 
+* SOLR-7622: A DocTransformer can now request fields from the SolrIndexSearcher that are not
+  necessarily returned in the file SolrDocument by returning a list of fields from 
+  DocTransformer#getExtraRequestFields  (ryan)
+
 Bug Fixes
 ----------------------
-(no changes)
+
+* SOLR-7361: Slow loading SolrCores should not hold up all other SolrCores that have finished loading from serving
+  requests. (Mark Miller, Timothy Potter, Ramkumar Aiyengar)
+
+* SOLR-7616: Faceting on a numeric field with a unique() subfacet function on another numeric field
+  can result in incorrect results or an exception. (yonik)
+
+* SOLR-7518: New Facet Module should respect shards.tolerant and process all non-failing shards
+  instead of throwing an exception. (yonik)
+
+* SOLR-4506: Clean-up old (unused) index directories in the background after initializing a new index;
+  previously, Solr would leave old index.yyyyMMddHHmmssSSS directories left behind after failed recoveries
+  in the data directory, which unnecessarily consumes disk space. (Mark Miller, Timothy Potter)
+
 
 Optimizations
 ----------------------
@@ -97,6 +124,23 @@
 
 * SOLR-7595: Allow method chaining for all CollectionAdminRequests in Solrj. (shalin)
 
+* SOLR-7146: MiniSolrCloudCluster based tests can fail with ZooKeeperException NoNode for /live_nodes.
+  (Vamsee Yarlagadda via shalin)
+
+* SOLR-7590: Finish and improve MDC context logging support. (Mark Miller)
+
+* SOLR-7599: Remove cruft from SolrCloud tests. (shalin)
+
+* SOLR-7623: Fix regression from SOLR-7484 that made it impossible to override 
+  SolrDispatchFilter#execute() and SolrDispatchFilter#sendError().  You can now override these
+  functions in HttpSolrCall.  (ryan)
+
+* SOLR-7636: CLUSTERSTATUS API is executed at CollectionsHandler (noble)
+
+* LUCENE-6508: Remove ability to forcefully unlock an index.
+  This is no longer supported by the underlying Lucene locking
+  framework.  (Uwe Schindler, Mike McCandless, Robert Muir)
+
 ==================  5.2.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release
@@ -259,7 +303,7 @@
   which blends RPT indexes for speed with serialized geometry for accuracy.  Includes a Lucene segment based
   in-memory shape cache. (David Smiley)
 
-* SOLR-7465: New file indexing example, under example/files.  (Esther Quansah, Erik Hatcher)
+* SOLR-7465, SOLR-7610: New file indexing example, under example/files.  (Esther Quansah, Erik Hatcher)
 
 * SOLR-7468: Kerberos authenticaion plugin for Solr. This would allow running a Kerberized Solr.
   (Noble Paul, Ishan Chattopadhyaya, Gregory Chanan, Anshum Gupta)
@@ -360,6 +404,21 @@
 * SOLR-7585: Fix NoSuchElementException in LFUCache resulting from heavy writes
   making concurrent put() calls. (Maciej Zasada via Shawn Heisey)
 
+* SOLR-7587: Seeding bucket versions from index when the firstSearcher event fires has a race condition
+  that leads to an infinite wait on VersionInfo's ReentrantReadWriteLock because the read-lock acquired
+  during a commit cannot be upgraded to a write-lock needed to block updates; solution is to move the
+  call out of the firstSearcher event path and into the SolrCore constructor. (Timothy Potter)
+
+* SOLR-7625: Ensure that the max value for seeding version buckets is updated after recovery even if
+  the UpdateLog is not replayed. (Timothy Potter)
+
+* SOLR-7610: Fix VelocityResponseWriter's $resource.locale to accurately report locale in use.
+  (ehatcher)
+
+* SOLR-7614: Distributed pivot facet refinement was broken due to a single correlation counter
+  used across multiple requests as if it was private to each request. (yonik)
+
+
 Optimizations
 ----------------------
 
@@ -491,6 +550,9 @@
   enable auto soft-commits for the bin/solr -e cloud example using the Config API.
   (Timothy Potter)
 
+* SOLR-7183: Fix Locale blacklisting for Minikdc based tests. (Ishan Chattopadhyaya, hossman
+  via Anshum Gupta)
+
 ==================  5.1.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release
diff --git a/solr/contrib/morphlines-core/src/test-files/solr/collection1/conf/solrconfig.xml b/solr/contrib/morphlines-core/src/test-files/solr/collection1/conf/solrconfig.xml
index f1b03eb..794e3d2 100644
--- a/solr/contrib/morphlines-core/src/test-files/solr/collection1/conf/solrconfig.xml
+++ b/solr/contrib/morphlines-core/src/test-files/solr/collection1/conf/solrconfig.xml
@@ -220,19 +220,6 @@
     -->
     <!-- <lockType>native</lockType> -->
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'none' or 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-    
     <!-- If true, IndexReaders will be reopened (often more efficient)
          instead of closed and then opened. Default: true
       -->
diff --git a/solr/contrib/morphlines-core/src/test-files/solr/minimr/conf/solrconfig.xml b/solr/contrib/morphlines-core/src/test-files/solr/minimr/conf/solrconfig.xml
index 9393c9c..22d97f7 100644
--- a/solr/contrib/morphlines-core/src/test-files/solr/minimr/conf/solrconfig.xml
+++ b/solr/contrib/morphlines-core/src/test-files/solr/minimr/conf/solrconfig.xml
@@ -236,19 +236,6 @@
     -->
       <lockType>${solr.lock.type:hdfs}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- If true, IndexReaders will be reopened (often more efficient)
          instead of closed and then opened. Default: true
       -->
diff --git a/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml b/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml
index ed7d8a2..d08d474 100644
--- a/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml
+++ b/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml
@@ -238,19 +238,6 @@
     -->
       <lockType>${solr.lock.type:hdfs}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-    
     <!-- If true, IndexReaders will be reopened (often more efficient)
          instead of closed and then opened. Default: true
       -->
diff --git a/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml b/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml
index 43ce433..004bafe 100644
--- a/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml
+++ b/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml
@@ -220,19 +220,6 @@
     -->
     <!-- <lockType>native</lockType> -->
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'none' or 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- If true, IndexReaders will be reopened (often more efficient)
          instead of closed and then opened. Default: true
       -->
diff --git a/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml b/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml
index f84e2d6..bb35753 100644
--- a/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml
+++ b/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml
@@ -239,19 +239,6 @@
     -->
       <lockType>${solr.lock.type:hdfs}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-    
     <!-- If true, IndexReaders will be reopened (often more efficient)
          instead of closed and then opened. Default: true
       -->
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java b/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
index 6828374..05456e4 100644
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
+++ b/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
@@ -326,19 +326,18 @@
   // see: http://svn.apache.org/repos/asf/velocity/tools/branches/2.0.x/src/main/java/org/apache/velocity/tools/generic/ResourceTool.java
   private class SolrVelocityResourceTool extends ResourceTool {
 
-    private final Locale locale;
     private ClassLoader solrClassLoader;
 
     public SolrVelocityResourceTool(ClassLoader cl, String localeString) {
       this.solrClassLoader = cl;
       Locale l = toLocale(localeString);
-      this.locale = (l == null ? Locale.ROOT : l);
+      this.setLocale(l == null ? Locale.ROOT : l);
     }
 
     @Override
     protected ResourceBundle getBundle(String baseName, Object loc) {
       // resource bundles for this tool must be in velocity "package"
-      return ResourceBundle.getBundle("velocity." + baseName, locale, solrClassLoader);
+      return ResourceBundle.getBundle("velocity." + baseName, getLocale(), solrClassLoader);
     }
 
     // Why did Velocity Tools make this private?  Copied from ResourceTools.java
diff --git a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
deleted file mode 100644
index ff1cf22..0000000
--- a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
+++ /dev/null
@@ -1,461 +0,0 @@
-package org.apache.solr;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.WeakHashMap;
-import java.util.logging.ConsoleHandler;
-import java.util.logging.Formatter;
-import java.util.logging.Handler;
-import java.util.logging.Level;
-import java.util.logging.LogRecord;
-import java.util.logging.Logger;
-
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestInfo;
-import org.slf4j.LoggerFactory;
-
-public class SolrLogFormatter extends Formatter {
-
-  /** Add this interface to a thread group and the string returned by
-   * getTag() will appear in log statements of any threads under that group.
-   */
-  public static interface TG {
-    public String getTag();
-  }
-
-  long startTime = System.currentTimeMillis();
-  long lastTime = startTime;
-  Map<Method, String> methodAlias = new HashMap<>();
-  
-  public static class Method {
-    public String className;
-    public String methodName;
-
-    public Method(String className, String methodName) {
-      this.className = className;
-      this.methodName = methodName;
-    }
-    
-    @Override
-    public int hashCode() {
-      return className.hashCode() + methodName.hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (!(obj instanceof  Method)) return false;
-      Method other = (Method)obj;
-      return (className.equals(other.className) && methodName.equals(other.methodName));
-    }
-
-    @Override
-    public String toString() {
-      return className + '.' + methodName;
-    }
-  }
-
-
-  public SolrLogFormatter() {
-    super();
-    
-    methodAlias.put(new Method("org.apache.solr.update.processor.LogUpdateProcessor","finish"), "UPDATE");
-    methodAlias.put(new Method("org.apache.solr.core.SolrCore","execute"), "REQ");
-  }
-
-
-  // TODO: name this better... it's only for cloud tests where every core container has just one solr server so Port/Core are fine
-  public boolean shorterFormat = false;
-
-  /**  Removes info that is redundant for current cloud tests including core name, webapp, and common labels path= and params=
-   * [] webapp=/solr path=/select params={q=foobarbaz} hits=0 status=0 QTime=1
-   * /select {q=foobarbaz} hits=0 status=0 QTime=1
-   * NOTE: this is a work in progress and different settings may be ideal for other types of tests.
-   */
-  public void setShorterFormat() {
-    shorterFormat = true;
-    // looking at /update is enough... we don't need "UPDATE /update"
-    methodAlias.put(new Method("org.apache.solr.update.processor.LogUpdateProcessor","finish"), "");
-  }
-
-  public static class CoreInfo {
-    static int maxCoreNum;
-    String shortId;
-    String url;
-    Map<String, Object> coreProps;
-  }
-
-  Map<SolrCore, CoreInfo> coreInfoMap = new WeakHashMap<>();    // TODO: use something that survives across a core reload?
-
-  public Map<String,String> classAliases = new HashMap<>();
-
-  @Override
-  public String format(LogRecord record) {
-    try {
-      return _format(record);
-    } catch (Exception e) {
-      // logging swallows exceptions, so if we hit an exception we need to convert it to a string to see it
-      return "ERROR IN SolrLogFormatter! original message:" + record.getMessage() + "\n\tException: " + SolrException.toStr(e);
-    }
-  }
-
-  
-  public void appendThread(StringBuilder sb, LogRecord record) {
-    Thread th = Thread.currentThread();
-
-
-/******
-    sb.append(" T=");
-    sb.append(th.getName()).append(' ');
-
-    // NOTE: tried creating a thread group around jetty but we seem to lose it and request
-    // threads are in the normal "main" thread group
-    ThreadGroup tg = th.getThreadGroup();
-    while (tg != null) {
-sb.append("(group_name=").append(tg.getName()).append(")");
-
-      if (tg instanceof TG) {
-        sb.append(((TG)tg).getTag());
-        sb.append('/');
-      }
-      try {
-        tg = tg.getParent();
-      } catch (Throwable e) {
-        tg = null;
-      }
-    }
- ******/
-
-    // NOTE: LogRecord.getThreadID is *not* equal to Thread.getId()
-    sb.append(" T");
-    sb.append(th.getId());
-  }
-
-  
-  public String _format(LogRecord record) {
-    String message = record.getMessage();
-    
-    StringBuilder sb = new StringBuilder(message.length() + 80);
-    
-    long now = record.getMillis();
-    long timeFromStart = now - startTime;
-    long timeSinceLast = now - lastTime;
-    lastTime = now;
-    String shortClassName = getShortClassName(record.getSourceClassName(), record.getSourceMethodName());
-
-/***
-    sb.append(timeFromStart).append(' ').append(timeSinceLast);
-    sb.append(' ');
-    sb.append(record.getSourceClassName()).append('.').append(record.getSourceMethodName());
-    sb.append(' ');
-    sb.append(record.getLevel());
-***/
-
-    SolrRequestInfo requestInfo = SolrRequestInfo.getRequestInfo();
-    SolrQueryRequest req = requestInfo == null ? null : requestInfo.getReq();
-    SolrCore core = req == null ? null : req.getCore();
-    ZkController zkController = null;
-    CoreInfo info = null;
-    
-    if (core != null) {
-      info = coreInfoMap.get(core);
-      if (info == null) {
-        info = new CoreInfo();
-        info.shortId = "C"+Integer.toString(CoreInfo.maxCoreNum++);
-        coreInfoMap.put(core, info);
-
-        if (sb.length() == 0) sb.append("ASYNC ");
-        sb.append(" NEW_CORE "+info.shortId);
-        sb.append(" name=" + core.getName());
-        sb.append(" " + core);
-      }
-
-      if (zkController == null) {
-        zkController = core.getCoreDescriptor().getCoreContainer().getZkController();
-      }
-      if (zkController != null) {
-        if (info.url == null) {
-          info.url = zkController.getBaseUrl() + "/" + core.getName();
-          sb.append(" url="+info.url + " node="+zkController.getNodeName());
-        }
-
-        Map<String, Object> coreProps = getReplicaProps(zkController, core);
-        if (info.coreProps == null || !coreProps.equals(info.coreProps)) {
-          info.coreProps = coreProps;
-          final String corePropsString = "coll:" + core.getCoreDescriptor().getCloudDescriptor().getCollectionName() + " core:" + core.getName() + " props:" + coreProps;
-          sb.append(" " + info.shortId + "_STATE=" + corePropsString);
-        }
-      }
-    }
-
-
-    if (sb.length() > 0) sb.append('\n');
-    sb.append(timeFromStart);
-
-//     sb.append("\nL").append(record.getSequenceNumber());     // log number is useful for sequencing when looking at multiple parts of a log file, but ms since start should be fine.
-   appendThread(sb, record);
-
-
-    if (info != null) {
-      sb.append(' ').append(info.shortId);                     // core
-    }
-    if (zkController != null) {
-      sb.append(" P").append(zkController.getHostPort());      // todo: should be able to get this from core container for non zk tests
-    }
-
-    if (shortClassName.length() > 0) {
-      sb.append(' ').append(shortClassName);
-    }
-
-    if (record.getLevel() != Level.INFO) {
-      sb.append(' ').append(record.getLevel());
-    }
-
-    sb.append(' ');
-    appendMultiLineString(sb, message);
-    Throwable th = record.getThrown();
-    if (th != null) {
-      sb.append(' ');
-      String err = SolrException.toStr(th);
-      String ignoredMsg = SolrException.doIgnore(th, err);
-      if (ignoredMsg != null) {
-        sb.append(ignoredMsg);
-      } else {
-        sb.append(err);
-      }
-    }
-
-    sb.append('\n');
-
-    /*** Isn't core specific... prob better logged from zkController
-    if (info != null) {
-      ClusterState clusterState = zkController.getClusterState();
-      if (info.clusterState != clusterState) {
-        // something has changed in the matrix...
-        sb.append(zkController.getBaseUrl() + " sees new ClusterState:");
-      }
-    }
-    ***/
-    
-    return sb.toString();
-  }
-
-  private Map<String,Object> getReplicaProps(ZkController zkController, SolrCore core) {
-    final String collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    Replica replica = zkController.getClusterState().getReplica(collection, core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-    if(replica!=null) {
-      return replica.getProperties();
-    }
-    return Collections.EMPTY_MAP;
-  }
-
-
-  private Method classAndMethod = new Method(null,null); // don't need to be thread safe
-  private String getShortClassName(String name, String method) {
-    classAndMethod.className = name;
-    classAndMethod.methodName = method;
-
-    String out = methodAlias.get(classAndMethod);
-    if (out != null) return out;
-
-    StringBuilder sb = new StringBuilder();
-
-    int lastDot = name.lastIndexOf('.');
-    if (lastDot < 0) return name + '.' + method;
-
-    int prevIndex = -1;
-    for (;;) {
-      char ch = name.charAt(prevIndex + 1);
-      sb.append(ch);
-      int idx = name.indexOf('.', prevIndex+1);
-      ch = name.charAt(idx+1);
-      if (idx >= lastDot || Character.isUpperCase(ch)) {
-        sb.append(name.substring(idx));
-        break;
-      }
-      prevIndex = idx;
-    }
-  
-    return sb.toString() + '.' + method;
-  }
-  
-  private void addFirstLine(StringBuilder sb, String msg) {
-//    INFO: [] webapp=/solr path=/select params={q=foobarbaz} hits=0 status=0 QTime=1
-
-    if (!shorterFormat || !msg.startsWith("[")) {
-      sb.append(msg);      
-      return;
-    }
-
-    int idx = msg.indexOf(']');
-    if (idx < 0 || !msg.startsWith(" webapp=", idx+1)) {
-      sb.append(msg);
-      return;
-    }
-    
-    idx = msg.indexOf(' ',idx+8); // space after webapp=
-    if (idx < 0) { sb.append(msg); return; }
-    idx = msg.indexOf('=',idx+1);   // = in  path=
-    if (idx < 0) { sb.append(msg); return; }
-
-    int idx2 = msg.indexOf(' ',idx+1);
-    if (idx2 < 0) { sb.append(msg); return; }
-
-
-    sb.append(msg.substring(idx+1, idx2+1));  // path
-    
-    idx = msg.indexOf("params=", idx2);
-    if (idx < 0) {
-      sb.append(msg.substring(idx2));
-    } else {
-      sb.append(msg.substring(idx+7));
-    }
-  }
-  
-  private void appendMultiLineString(StringBuilder sb, String msg) {
-    int idx = msg.indexOf('\n');
-    if (idx < 0) {
-      addFirstLine(sb, msg);
-      return;
-    }
-
-    int lastIdx = -1;
-    for (;;) {
-      if (idx < 0) {
-        if (lastIdx == -1) {
-          addFirstLine(sb, msg.substring(lastIdx+1));
-        } else {
-          sb.append(msg.substring(lastIdx+1));
-        }
-        break;
-      }
-      if (lastIdx == -1) {
-        addFirstLine(sb, msg.substring(lastIdx+1, idx));
-      } else {
-        sb.append(msg.substring(lastIdx+1, idx));
-      }
-
-      sb.append("\n\t");
-      lastIdx = idx;
-      idx = msg.indexOf('\n',lastIdx+1);
-    }
-  }
-
-  @Override
-  public String getHead(Handler h) {
-    return super.getHead(h);
-  }
-
-  @Override
-  public String getTail(Handler h) {
-    return super.getTail(h);
-  }
-
-  @Override
-  public String formatMessage(LogRecord record) {
-    return format(record);
-  }
-
-
-
-  static ThreadLocal<String> threadLocal = new ThreadLocal<>();
-  
-  public static void main(String[] args) throws Exception {
-
-      Handler[] handlers = Logger.getLogger("").getHandlers();
-      boolean foundConsoleHandler = false;
-      for (int index = 0; index < handlers.length; index++) {
-        // set console handler to SEVERE
-        if (handlers[index] instanceof ConsoleHandler) {
-          handlers[index].setLevel(Level.ALL);
-          handlers[index].setFormatter(new SolrLogFormatter());
-          foundConsoleHandler = true;
-        }
-      }
-      if (!foundConsoleHandler) {
-        // no console handler found
-        System.err.println("No consoleHandler found, adding one.");
-        ConsoleHandler consoleHandler = new ConsoleHandler();
-        consoleHandler.setLevel(Level.ALL);
-        consoleHandler.setFormatter(new SolrLogFormatter());
-        Logger.getLogger("").addHandler(consoleHandler);
-      }
-
-
-
-    final org.slf4j.Logger log = LoggerFactory.getLogger(SolrLogFormatter.class);
-    log.error("HELLO");
-    
-    ThreadGroup tg = new MyThreadGroup("YCS");
-        
-    Thread th = new Thread(tg, "NEW_THREAD") {
-
-      @Override
-      public void run() {
-        try {
-          go();
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-      }
-    };
-    
-    th.start();
-    th.join();
-  }
-  
-
-  static class MyThreadGroup extends ThreadGroup implements TG {
-    public MyThreadGroup(String name) {
-      super(name);
-    }
-    @Override
-    public String getTag() { return "HELLO"; }
-  }
-  
-  public static void go() throws Exception {
-    final org.slf4j.Logger log = LoggerFactory.getLogger(SolrLogFormatter.class);
- 
-    Thread thread1 = new Thread() {
-      @Override
-      public void run() {
-        threadLocal.set("from thread1");
-        log.error("[] webapp=/solr path=/select params={hello} wow");
-      }
-    };
-
-    Thread thread2 = new Thread() {
-      @Override
-      public void run() {
-        threadLocal.set("from thread2");
-        log.error("InThread2");
-      }
-    };
-
-    thread1.start();
-    thread2.start();
-    thread1.join();
-    thread2.join();    
-  }
-}
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
index dce51e3..93788c6 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
@@ -30,6 +30,8 @@
   public final String context;
 
   public final boolean stopAtShutdown;
+  
+  public final Long waitForLoadingCoresToFinishMs;
 
   public final Map<ServletHolder, String> extraServlets;
 
@@ -37,11 +39,12 @@
 
   public final SSLConfig sslConfig;
 
-  private JettyConfig(int port, String context, boolean stopAtShutdown, Map<ServletHolder, String> extraServlets,
+  private JettyConfig(int port, String context, boolean stopAtShutdown, Long waitForLoadingCoresToFinishMs, Map<ServletHolder, String> extraServlets,
                       Map<Class<? extends Filter>, String> extraFilters, SSLConfig sslConfig) {
     this.port = port;
     this.context = context;
     this.stopAtShutdown = stopAtShutdown;
+    this.waitForLoadingCoresToFinishMs = waitForLoadingCoresToFinishMs;
     this.extraServlets = extraServlets;
     this.extraFilters = extraFilters;
     this.sslConfig = sslConfig;
@@ -67,6 +70,7 @@
     int port = 0;
     String context = "/solr";
     boolean stopAtShutdown = true;
+    Long waitForLoadingCoresToFinishMs = 300000L;
     Map<ServletHolder, String> extraServlets = new TreeMap<>();
     Map<Class<? extends Filter>, String> extraFilters = new TreeMap<>();
     SSLConfig sslConfig = null;
@@ -85,6 +89,11 @@
       this.stopAtShutdown = stopAtShutdown;
       return this;
     }
+    
+    public Builder waitForLoadingCoresToFinish(Long waitForLoadingCoresToFinishMs) {
+      this.waitForLoadingCoresToFinishMs = waitForLoadingCoresToFinishMs;
+      return this;
+    }
 
     public Builder withServlet(ServletHolder servlet, String servletName) {
       extraServlets.put(servlet, servletName);
@@ -114,7 +123,7 @@
     }
 
     public JettyConfig build() {
-      return new JettyConfig(port, context, stopAtShutdown, extraServlets, extraFilters, sslConfig);
+      return new JettyConfig(port, context, stopAtShutdown, waitForLoadingCoresToFinishMs, extraServlets, extraFilters, sslConfig);
     }
 
   }
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index 05c5bb1..fa44fe5 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -17,6 +17,7 @@
 
 package org.apache.solr.client.solrj.embedded;
 
+import org.apache.solr.core.CoreContainer;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.eclipse.jetty.server.Connector;
 import org.eclipse.jetty.server.HttpConfiguration;
@@ -393,6 +394,8 @@
           }
         }
       }
+      
+      if (config.waitForLoadingCoresToFinishMs != null && config.waitForLoadingCoresToFinishMs > 0L) waitForLoadingCoresToFinish(config.waitForLoadingCoresToFinishMs);
     } finally {
       if (prevContext != null)  {
         MDC.setContextMap(prevContext);
@@ -562,4 +565,14 @@
   public String getSolrHome() {
     return solrHome;
   }
+
+  private void waitForLoadingCoresToFinish(long timeoutMs) {
+    if (dispatchFilter != null) {
+      SolrDispatchFilter solrFilter = (SolrDispatchFilter) dispatchFilter.getFilter();
+      CoreContainer cores = solrFilter.getCores();
+      if (cores != null) {
+        cores.waitForLoadingCoresToFinish(timeoutMs);
+      }
+    }
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index 1176313..62f9913 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -1,5 +1,11 @@
 package org.apache.solr.cloud;
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.solr.cloud.overseer.OverseerAction;
 import org.apache.solr.common.SolrException;
@@ -15,7 +21,7 @@
 import org.apache.solr.common.util.RetryUtil.RetryCmd;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
-import org.apache.solr.logging.MDCUtils;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.update.UpdateLog;
 import org.apache.solr.util.RefCounted;
@@ -25,14 +31,6 @@
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -117,8 +115,6 @@
     this.shardId = shardId;
     this.collection = collection;
 
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setMDC(collection, shardId, null, null);
     try {
       new ZkCmdExecutor(zkStateReader.getZkClient().getZkClientTimeout())
           .ensureExists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection,
@@ -128,8 +124,6 @@
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
     }
   }
   
@@ -203,158 +197,152 @@
    */
   @Override
   void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws KeeperException,
-      InterruptedException, IOException {
-    log.info("Running the leader process for shard " + shardId);
-    
+ InterruptedException, IOException {
     String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
     ActionThrottle lt;
     try (SolrCore core = cc.getCore(coreName)) {
-
       if (core == null) {
         cancelElection();
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "SolrCore not found:" + coreName + " in "
-                + cc.getCoreNames());
+        throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getCoreNames());
       }
-      
+      MDCLoggingContext.setCore(core);
       lt = core.getUpdateHandler().getSolrCoreState().getLeaderThrottle();
     }
-    
-    lt.minimumWaitBetweenActions();
-    lt.markAttemptingAction();
-    
-    // clear the leader in clusterstate
-    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
-        ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP,
-        collection);
-    Overseer.getInQueue(zkClient).offer(ZkStateReader.toJSON(m));
-    
-    int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
-    if (!weAreReplacement) {
-      waitForReplicasToComeUp(leaderVoteWait);
-    }
 
-    try (SolrCore core = cc.getCore(coreName)) {
-
-      if (core == null) {
-        cancelElection();
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "SolrCore not found:" + coreName + " in "
-                + cc.getCoreNames());
-      }
-      
-      // should I be leader?
-      if (weAreReplacement && !shouldIBeLeader(leaderProps, core, weAreReplacement)) {
-        rejoinLeaderElection(core);
-        return;
-      }
-      
-      log.info("I may be the new leader - try and sync");
- 
-      
-      // we are going to attempt to be the leader
-      // first cancel any current recovery
-      core.getUpdateHandler().getSolrCoreState().cancelRecovery();
-      
-      if (weAreReplacement) {
-        // wait a moment for any floating updates to finish
-        try {
-          Thread.sleep(2500);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, e);
-        }
-      }
-      
-      boolean success = false;
-      try {
-        success = syncStrategy.sync(zkController, core, leaderProps, weAreReplacement);
-      } catch (Exception e) {
-        SolrException.log(log, "Exception while trying to sync", e);
-        success = false;
-      }
-      
-      UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-
-      if (!success) {
-        boolean hasRecentUpdates = false;
-        if (ulog != null) {
-          // TODO: we could optimize this if necessary
-          UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates();
-          try {
-            hasRecentUpdates = !recentUpdates.getVersions(1).isEmpty();
-          } finally {
-            recentUpdates.close();
-          }
-        }
-
-        if (!hasRecentUpdates) {
-          // we failed sync, but we have no versions - we can't sync in that case
-          // - we were active
-          // before, so become leader anyway
-          log.info("We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway");
-          success = true;
-        }
-      }
-      
-      // solrcloud_debug
-      if (log.isDebugEnabled()) {
-        try {
-          RefCounted<SolrIndexSearcher> searchHolder = core
-              .getNewestSearcher(false);
-          SolrIndexSearcher searcher = searchHolder.get();
-          try {
-            log.debug(core.getCoreDescriptor().getCoreContainer()
-                .getZkController().getNodeName()
-                + " synched "
-                + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
-          } finally {
-            searchHolder.decref();
-          }
-        } catch (Exception e) {
-          log.error("Error in solrcloud_debug block", e);
-        }
-      }
-      if (!success) {
-        rejoinLeaderElection(core);
-        return;
-      }
-
-      log.info("I am the new leader: "
-          + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
-      core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
-    }
-
-    boolean isLeader = true;
     try {
-      super.runLeaderProcess(weAreReplacement, 0);
-    } catch (Exception e) {
-      isLeader = false;
-      SolrException.log(log, "There was a problem trying to register as the leader", e);
-  
+      lt.minimumWaitBetweenActions();
+      lt.markAttemptingAction();
+      
+      log.info("Running the leader process for shard " + shardId);
+      // clear the leader in clusterstate
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
+          ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
+      Overseer.getInQueue(zkClient).offer(ZkStateReader.toJSON(m));
+      
+      int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
+      if (!weAreReplacement) {
+        waitForReplicasToComeUp(leaderVoteWait);
+      }
+      
       try (SolrCore core = cc.getCore(coreName)) {
-
+        
         if (core == null) {
-          log.debug("SolrCore not found:" + coreName + " in " + cc.getCoreNames());
+          cancelElection();
+          throw new SolrException(ErrorCode.SERVER_ERROR,
+              "SolrCore not found:" + coreName + " in " + cc.getCoreNames());
+        }
+        
+        // should I be leader?
+        if (weAreReplacement && !shouldIBeLeader(leaderProps, core, weAreReplacement)) {
+          rejoinLeaderElection(core);
           return;
         }
         
-        core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
+        log.info("I may be the new leader - try and sync");
         
-        // we could not publish ourselves as leader - try and rejoin election
-        rejoinLeaderElection(core);
+        // we are going to attempt to be the leader
+        // first cancel any current recovery
+        core.getUpdateHandler().getSolrCoreState().cancelRecovery();
+        
+        if (weAreReplacement) {
+          // wait a moment for any floating updates to finish
+          try {
+            Thread.sleep(2500);
+          } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, e);
+          }
+        }
+        
+        boolean success = false;
+        try {
+          success = syncStrategy.sync(zkController, core, leaderProps, weAreReplacement);
+        } catch (Exception e) {
+          SolrException.log(log, "Exception while trying to sync", e);
+          success = false;
+        }
+        
+        UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
+        
+        if (!success) {
+          boolean hasRecentUpdates = false;
+          if (ulog != null) {
+            // TODO: we could optimize this if necessary
+            UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates();
+            try {
+              hasRecentUpdates = !recentUpdates.getVersions(1).isEmpty();
+            } finally {
+              recentUpdates.close();
+            }
+          }
+          
+          if (!hasRecentUpdates) {
+            // we failed sync, but we have no versions - we can't sync in that case
+            // - we were active
+            // before, so become leader anyway
+            log.info(
+                "We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway");
+            success = true;
+          }
+        }
+        
+        // solrcloud_debug
+        if (log.isDebugEnabled()) {
+          try {
+            RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
+            SolrIndexSearcher searcher = searchHolder.get();
+            try {
+              log.debug(core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName() + " synched "
+                  + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
+            } finally {
+              searchHolder.decref();
+            }
+          } catch (Exception e) {
+            log.error("Error in solrcloud_debug block", e);
+          }
+        }
+        if (!success) {
+          rejoinLeaderElection(core);
+          return;
+        }
+        
+        log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
+        core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
       }
-    }
-
-    if (isLeader) {
-      // check for any replicas in my shard that were set to down by the previous leader
+      
+      boolean isLeader = true;
       try {
-        startLeaderInitiatedRecoveryOnReplicas(coreName);
-      } catch (Exception exc) {
-        // don't want leader election to fail because of
-        // an error trying to tell others to recover
+        super.runLeaderProcess(weAreReplacement, 0);
+      } catch (Exception e) {
+        isLeader = false;
+        SolrException.log(log, "There was a problem trying to register as the leader", e);
+        
+        try (SolrCore core = cc.getCore(coreName)) {
+          
+          if (core == null) {
+            log.debug("SolrCore not found:" + coreName + " in " + cc.getCoreNames());
+            return;
+          }
+          
+          core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
+          
+          // we could not publish ourselves as leader - try and rejoin election
+          rejoinLeaderElection(core);
+        }
       }
-    }    
+      
+      if (isLeader) {
+        // check for any replicas in my shard that were set to down by the previous leader
+        try {
+          startLeaderInitiatedRecoveryOnReplicas(coreName);
+        } catch (Exception exc) {
+          // don't want leader election to fail because of
+          // an error trying to tell others to recover
+        }
+      }
+    } finally {
+      MDCLoggingContext.clear();
+    }
   }
   
   private void startLeaderInitiatedRecoveryOnReplicas(String coreName) throws Exception {
@@ -493,8 +481,7 @@
   }
 
   private boolean shouldIBeLeader(ZkNodeProps leaderProps, SolrCore core, boolean weAreReplacement) {
-    log.info("Checking if I (core={},coreNodeName={}) should try and be the leader.", core.getName(),
-        core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
+    log.info("Checking if I should try and be the leader.");
     
     if (isClosed) {
       log.info("Bailing on leader process because we have been closed");
@@ -534,7 +521,7 @@
 }
 
 final class OverseerElectionContext extends ElectionContext {
-  
+  private static Logger log = LoggerFactory.getLogger(OverseerElectionContext.class);
   private final SolrZkClient zkClient;
   private Overseer overseer;
   public static final String PATH = "/overseer_elect";
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
index f01ac51..36a9add 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
@@ -17,14 +17,6 @@
  * limitations under the License.
  */
 
-import static org.apache.solr.cloud.Assign.*;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
-import static org.apache.solr.common.cloud.ZkStateReader.*;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
-import static org.apache.solr.common.params.CommonParams.*;
-import static org.apache.solr.common.util.StrUtils.formatString;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -55,11 +47,11 @@
 import org.apache.solr.cloud.Assign.Node;
 import org.apache.solr.cloud.DistributedQueue.QueueEvent;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.ReplicaAssigner.Position;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.cloud.rule.ReplicaAssigner;
+import org.apache.solr.cloud.rule.ReplicaAssigner.Position;
+import org.apache.solr.cloud.rule.Rule;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.Aliases;
@@ -87,11 +79,11 @@
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.handler.admin.ClusterStatus;
 import org.apache.solr.handler.component.ShardHandler;
 import org.apache.solr.handler.component.ShardHandlerFactory;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.handler.component.ShardResponse;
-import org.apache.solr.logging.MDCUtils;
 import org.apache.solr.update.SolrIndexSplitter;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.solr.util.stats.Snapshot;
@@ -102,7 +94,35 @@
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
+
+import static org.apache.solr.cloud.Assign.getNodesForNewShard;
+import static org.apache.solr.common.cloud.DocCollection.SNITCH;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.StrUtils.formatString;
 
 
 public class OverseerCollectionProcessor implements Runnable, Closeable {
@@ -367,6 +387,7 @@
       return true;
 
     // CLUSTERSTATUS is always mutually exclusive
+    //TODO deprecated remove this check .
     if(CLUSTERSTATUS.isEqual(message.getStr(Overseer.QUEUE_OPERATION)))
       return true;
 
@@ -598,8 +619,8 @@
         case OVERSEERSTATUS:
           getOverseerStatus(message, results);
           break;
-        case CLUSTERSTATUS:
-          getClusterStatus(zkStateReader.getClusterState(), message, results);
+        case CLUSTERSTATUS://TODO . deprecated. OCP does not need to do it .remove in a later release
+          new ClusterStatus(zkStateReader, message).getClusterStatus(results);
           break;
         case ADDREPLICAPROP:
           processReplicaAddPropertyCommand(message);
@@ -998,68 +1019,66 @@
   }
 
   @SuppressWarnings("unchecked")
-  private void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP,REPLICA_PROP);
+  private void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws KeeperException, InterruptedException {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
     String collectionName = message.getStr(COLLECTION_PROP);
     String shard = message.getStr(SHARD_ID_PROP);
     String replicaName = message.getStr(REPLICA_PROP);
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setMDC(collectionName, shard, replicaName, null);
-    try {
-      DocCollection coll = clusterState.getCollection(collectionName);
-      Slice slice = coll.getSlice(shard);
-      ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-      if (slice == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid shard name : " + shard + " in collection : " + collectionName);
-      }
-      Replica replica = slice.getReplica(replicaName);
-      if (replica == null) {
-        ArrayList<String> l = new ArrayList<>();
-        for (Replica r : slice.getReplicas()) l.add(r.getName());
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid replica : " + replicaName + " in shard/collection : "
-            + shard + "/" + collectionName + " available replicas are " + StrUtils.join(l, ','));
-      }
-
-      // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
-      // on the command.
-      if (Boolean.parseBoolean(message.getStr(ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Attempted to remove replica : " + collectionName + "/" +
-            shard + "/" + replicaName +
-            " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
-      }
-
-      String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-      String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-      // assume the core exists and try to unload it
-      Map m = makeMap("qt", adminPath, CoreAdminParams.ACTION,
-          CoreAdminAction.UNLOAD.toString(), CoreAdminParams.CORE, core,
-          CoreAdminParams.DELETE_INSTANCE_DIR, "true",
-          CoreAdminParams.DELETE_DATA_DIR, "true");
-
-      ShardRequest sreq = new ShardRequest();
-      sreq.purpose = 1;
-      sreq.shards = new String[]{baseUrl};
-      sreq.actualShards = sreq.shards;
-      sreq.params = new ModifiableSolrParams(new MapSolrParams(m));
-      try {
-        shardHandler.submit(sreq, baseUrl, sreq.params);
-      } catch (Exception e) {
-        log.warn("Exception trying to unload core " + sreq, e);
-      }
-
-      collectShardResponses(replica.getState() != Replica.State.ACTIVE ? new NamedList() : results,
-          false, null, shardHandler);
-
-      if (waitForCoreNodeGone(collectionName, shard, replicaName, 5000))
-        return;//check if the core unload removed the corenode zk enry
-      deleteCoreNode(collectionName, replicaName, replica, core); // try and ensure core info is removed from clusterstate
-      if (waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return;
-
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Could not  remove replica : " + collectionName + "/" + shard + "/" + replicaName);
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+    
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Slice slice = coll.getSlice(shard);
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    if (slice == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Invalid shard name : " + shard + " in collection : " + collectionName);
     }
+    Replica replica = slice.getReplica(replicaName);
+    if (replica == null) {
+      ArrayList<String> l = new ArrayList<>();
+      for (Replica r : slice.getReplicas())
+        l.add(r.getName());
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid replica : " + replicaName + " in shard/collection : "
+          + shard + "/" + collectionName + " available replicas are " + StrUtils.join(l, ','));
+    }
+    
+    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
+    // on the command.
+    if (Boolean.parseBoolean(message.getStr(ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Attempted to remove replica : " + collectionName + "/" + shard + "/" + replicaName
+              + " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
+    }
+    
+    String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+    
+    // assume the core exists and try to unload it
+    Map m = makeMap("qt", adminPath, CoreAdminParams.ACTION, CoreAdminAction.UNLOAD.toString(), CoreAdminParams.CORE,
+        core, CoreAdminParams.DELETE_INSTANCE_DIR, "true", CoreAdminParams.DELETE_DATA_DIR, "true");
+        
+    ShardRequest sreq = new ShardRequest();
+    sreq.purpose = 1;
+    sreq.shards = new String[] {baseUrl};
+    sreq.actualShards = sreq.shards;
+    sreq.params = new ModifiableSolrParams(new MapSolrParams(m));
+    try {
+      shardHandler.submit(sreq, baseUrl, sreq.params);
+    } catch (Exception e) {
+      log.warn("Exception trying to unload core " + sreq, e);
+    }
+    
+    collectShardResponses(replica.getState() != Replica.State.ACTIVE ? new NamedList() : results, false, null,
+        shardHandler);
+        
+    if (waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return;// check if the core unload removed the
+                                                                              // corenode zk enry
+    deleteCoreNode(collectionName, replicaName, replica, core); // try and ensure core info is removed from clusterstate
+    if (waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return;
+    
+    throw new SolrException(ErrorCode.SERVER_ERROR,
+        "Could not  remove replica : " + collectionName + "/" + shard + "/" + replicaName);
+        
   }
 
   private boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
@@ -1155,42 +1174,33 @@
   private void createAlias(Aliases aliases, ZkNodeProps message) {
     String aliasName = message.getStr(NAME);
     String collections = message.getStr("collections");
-
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setCollection(aliasName);
-
-    try {
-      Map<String, Map<String, String>> newAliasesMap = new HashMap<>();
-      Map<String, String> newCollectionAliasesMap = new HashMap<>();
-      Map<String, String> prevColAliases = aliases.getCollectionAliasMap();
-      if (prevColAliases != null) {
-        newCollectionAliasesMap.putAll(prevColAliases);
-      }
-      newCollectionAliasesMap.put(aliasName, collections);
-      newAliasesMap.put("collection", newCollectionAliasesMap);
-      Aliases newAliases = new Aliases(newAliasesMap);
-      byte[] jsonBytes = null;
-      if (newAliases.collectionAliasSize() > 0) { // only sub map right now
-        jsonBytes = ZkStateReader.toJSON(newAliases.getAliasMap());
-      }
-      try {
-        zkStateReader.getZkClient().setData(ZkStateReader.ALIASES,
-            jsonBytes, true);
-
-        checkForAlias(aliasName, collections);
-        // some fudge for other nodes
-        Thread.sleep(100);
-      } catch (KeeperException e) {
-        log.error("", e);
-        throw new SolrException(ErrorCode.SERVER_ERROR, e);
-      } catch (InterruptedException e) {
-        log.warn("", e);
-        throw new SolrException(ErrorCode.SERVER_ERROR, e);
-      }
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+    
+    Map<String,Map<String,String>> newAliasesMap = new HashMap<>();
+    Map<String,String> newCollectionAliasesMap = new HashMap<>();
+    Map<String,String> prevColAliases = aliases.getCollectionAliasMap();
+    if (prevColAliases != null) {
+      newCollectionAliasesMap.putAll(prevColAliases);
     }
-
+    newCollectionAliasesMap.put(aliasName, collections);
+    newAliasesMap.put("collection", newCollectionAliasesMap);
+    Aliases newAliases = new Aliases(newAliasesMap);
+    byte[] jsonBytes = null;
+    if (newAliases.collectionAliasSize() > 0) { // only sub map right now
+      jsonBytes = ZkStateReader.toJSON(newAliases.getAliasMap());
+    }
+    try {
+      zkStateReader.getZkClient().setData(ZkStateReader.ALIASES, jsonBytes, true);
+      
+      checkForAlias(aliasName, collections);
+      // some fudge for other nodes
+      Thread.sleep(100);
+    } catch (KeeperException e) {
+      log.error("", e);
+      throw new SolrException(ErrorCode.SERVER_ERROR, e);
+    } catch (InterruptedException e) {
+      log.warn("", e);
+      throw new SolrException(ErrorCode.SERVER_ERROR, e);
+    }
   }
 
   private void checkForAlias(String name, String value) {
@@ -1233,8 +1243,6 @@
 
   private void deleteAlias(Aliases aliases, ZkNodeProps message) {
     String aliasName = message.getStr(NAME);
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setCollection(aliasName);
 
     Map<String,Map<String,String>> newAliasesMap = new HashMap<>();
     Map<String,String> newCollectionAliasesMap = new HashMap<>();
@@ -1258,282 +1266,393 @@
     } catch (InterruptedException e) {
       log.warn("", e);
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
-    }
+    } 
 
   }
 
   private boolean createShard(ClusterState clusterState, ZkNodeProps message, NamedList results)
       throws KeeperException, InterruptedException {
-    Map previousMDCContext = MDC.getCopyOfContextMap();
     String collectionName = message.getStr(COLLECTION_PROP);
     String sliceName = message.getStr(SHARD_ID_PROP);
-
-    MDCUtils.setMDC(collectionName, sliceName, null, null);
-    try {
-      log.info("Create shard invoked: {}", message);
-      if (collectionName == null || sliceName == null)
-        throw new SolrException(ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
-      int numSlices = 1;
-
-      ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-      DocCollection collection = clusterState.getCollection(collectionName);
-      int repFactor = message.getInt(REPLICATION_FACTOR, collection.getInt(REPLICATION_FACTOR, 1));
-      String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-      List<Node> sortedNodeList = getNodesForNewShard(clusterState, collectionName, sliceName, repFactor,
-          createNodeSetStr, overseer.getZkController().getCoreContainer());
-
-      Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(message));
-      // wait for a while until we see the shard
-      long waitUntil = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
-      boolean created = false;
-      while (System.nanoTime() < waitUntil) {
-        Thread.sleep(100);
-        created = zkStateReader.getClusterState().getCollection(collectionName).getSlice(sliceName) != null;
-        if (created) break;
-      }
-      if (!created)
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Could not fully create shard: " + message.getStr(NAME));
-
-
-      String configName = message.getStr(COLL_CONF);
-      for (int j = 1; j <= repFactor; j++) {
-        String nodeName = sortedNodeList.get(((j - 1)) % sortedNodeList.size()).nodeName;
-        String shardName = collectionName + "_" + sliceName + "_replica" + j;
-        log.info("Creating shard " + shardName + " as part of slice "
-            + sliceName + " of collection " + collectionName + " on "
-            + nodeName);
-
-        // Need to create new params for each request
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
-
-        params.set(CoreAdminParams.NAME, shardName);
-        params.set(COLL_CONF, configName);
-        params.set(CoreAdminParams.COLLECTION, collectionName);
-        params.set(CoreAdminParams.SHARD, sliceName);
-        params.set(ZkStateReader.NUM_SHARDS_PROP, numSlices);
-        addPropertyParams(message, params);
-
-        ShardRequest sreq = new ShardRequest();
-        params.set("qt", adminPath);
-        sreq.purpose = 1;
-        String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-        sreq.shards = new String[]{replica};
-        sreq.actualShards = sreq.shards;
-        sreq.params = params;
-
-        shardHandler.submit(sreq, replica, sreq.params);
-
-      }
-
-      processResponses(results, shardHandler);
-
-      log.info("Finished create command on all shards for collection: "
-          + collectionName);
-
-      return true;
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+    
+    log.info("Create shard invoked: {}", message);
+    if (collectionName == null || sliceName == null)
+      throw new SolrException(ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
+    int numSlices = 1;
+    
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    DocCollection collection = clusterState.getCollection(collectionName);
+    int repFactor = message.getInt(REPLICATION_FACTOR, collection.getInt(REPLICATION_FACTOR, 1));
+    String createNodeSetStr = message.getStr(CREATE_NODE_SET);
+    List<Node> sortedNodeList = getNodesForNewShard(clusterState, collectionName, sliceName, repFactor,
+        createNodeSetStr, overseer.getZkController().getCoreContainer());
+        
+    Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(message));
+    // wait for a while until we see the shard
+    long waitUntil = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
+    boolean created = false;
+    while (System.nanoTime() < waitUntil) {
+      Thread.sleep(100);
+      created = zkStateReader.getClusterState().getCollection(collectionName).getSlice(sliceName) != null;
+      if (created) break;
     }
+    if (!created)
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Could not fully create shard: " + message.getStr(NAME));
+      
+    String configName = message.getStr(COLL_CONF);
+    for (int j = 1; j <= repFactor; j++) {
+      String nodeName = sortedNodeList.get(((j - 1)) % sortedNodeList.size()).nodeName;
+      String shardName = collectionName + "_" + sliceName + "_replica" + j;
+      log.info("Creating shard " + shardName + " as part of slice " + sliceName + " of collection " + collectionName
+          + " on " + nodeName);
+          
+      // Need to create new params for each request
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
+      
+      params.set(CoreAdminParams.NAME, shardName);
+      params.set(COLL_CONF, configName);
+      params.set(CoreAdminParams.COLLECTION, collectionName);
+      params.set(CoreAdminParams.SHARD, sliceName);
+      params.set(ZkStateReader.NUM_SHARDS_PROP, numSlices);
+      addPropertyParams(message, params);
+      
+      ShardRequest sreq = new ShardRequest();
+      params.set("qt", adminPath);
+      sreq.purpose = 1;
+      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
+      sreq.shards = new String[] {replica};
+      sreq.actualShards = sreq.shards;
+      sreq.params = params;
+      
+      shardHandler.submit(sreq, replica, sreq.params);
+      
+    }
+    
+    processResponses(results, shardHandler);
+    
+    log.info("Finished create command on all shards for collection: " + collectionName);
+    
+    return true; 
   }
 
 
   private boolean splitShard(ClusterState clusterState, ZkNodeProps message, NamedList results) {
     String collectionName = message.getStr("collection");
     String slice = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setMDC(collectionName, slice, null, null);
-    try {
-      log.info("Split shard invoked");
-      String splitKey = message.getStr("split.key");
-      ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-      DocCollection collection = clusterState.getCollection(collectionName);
-      DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-      Slice parentSlice = null;
-
-      if (slice == null) {
-        if (router instanceof CompositeIdRouter) {
-          Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
-          if (searchSlices.isEmpty()) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
-          }
-          if (searchSlices.size() > 1) {
-            throw new SolrException(ErrorCode.BAD_REQUEST,
-                "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
-          }
-          parentSlice = searchSlices.iterator().next();
-          slice = parentSlice.getName();
-          log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
-        } else {
+    
+    log.info("Split shard invoked");
+    String splitKey = message.getStr("split.key");
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    
+    DocCollection collection = clusterState.getCollection(collectionName);
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+    
+    Slice parentSlice = null;
+    
+    if (slice == null) {
+      if (router instanceof CompositeIdRouter) {
+        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
+        if (searchSlices.isEmpty()) {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
+        }
+        if (searchSlices.size() > 1) {
           throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Split by route key can only be used with CompositeIdRouter or subclass. Found router: " + router.getClass().getName());
+              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
         }
+        parentSlice = searchSlices.iterator().next();
+        slice = parentSlice.getName();
+        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
       } else {
-        parentSlice = clusterState.getSlice(collectionName, slice);
+        throw new SolrException(ErrorCode.BAD_REQUEST,
+            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
+                + router.getClass().getName());
       }
-
-      if (parentSlice == null) {
-        if (clusterState.hasCollection(collectionName)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
-        } else {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "No collection with the specified name exists: " + collectionName);
-        }
-      }
-
-      // find the leader for the shard
-      Replica parentShardLeader = null;
-      try {
-        parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice, 10000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-
-      DocRouter.Range range = parentSlice.getRange();
-      if (range == null) {
-        range = new PlainIdRouter().fullRange();
-      }
-
-      List<DocRouter.Range> subRanges = null;
-      String rangesStr = message.getStr(CoreAdminParams.RANGES);
-      if (rangesStr != null) {
-        String[] ranges = rangesStr.split(",");
-        if (ranges.length == 0 || ranges.length == 1) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
-        } else {
-          subRanges = new ArrayList<>(ranges.length);
-          for (int i = 0; i < ranges.length; i++) {
-            String r = ranges[i];
-            try {
-              subRanges.add(DocRouter.DEFAULT.fromString(r));
-            } catch (Exception e) {
-              throw new SolrException(ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
-            }
-            if (!subRanges.get(i).isSubsetOf(range)) {
-              throw new SolrException(ErrorCode.BAD_REQUEST,
-                  "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
-            }
-          }
-          List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
-          Collections.sort(temp);
-          if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
-            throw new SolrException(ErrorCode.BAD_REQUEST,
-                "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
-          }
-          for (int i = 1; i < temp.size(); i++) {
-            if (temp.get(i - 1).max + 1 != temp.get(i).min) {
-              throw new SolrException(ErrorCode.BAD_REQUEST,
-                  "Specified hash ranges: " + rangesStr + " either overlap with each other or " +
-                      "do not cover the entire range of parent shard: " + range);
-            }
-          }
-        }
-      } else if (splitKey != null) {
-        if (router instanceof CompositeIdRouter) {
-          CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
-          subRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
-          if (subRanges.size() == 1) {
-            throw new SolrException(ErrorCode.BAD_REQUEST,
-                "The split.key: " + splitKey + " has a hash range that is exactly equal to hash range of shard: " + slice);
-          }
-          for (DocRouter.Range subRange : subRanges) {
-            if (subRange.min == subRange.max) {
-              throw new SolrException(ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
-            }
-          }
-          log.info("Partitioning parent shard " + slice + " range: " + parentSlice.getRange() + " yields: " + subRanges);
-          rangesStr = "";
-          for (int i = 0; i < subRanges.size(); i++) {
-            DocRouter.Range subRange = subRanges.get(i);
-            rangesStr += subRange.toString();
-            if (i < subRanges.size() - 1)
-              rangesStr += ',';
-          }
-        }
+    } else {
+      parentSlice = clusterState.getSlice(collectionName, slice);
+    }
+    
+    if (parentSlice == null) {
+      if (clusterState.hasCollection(collectionName)) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
       } else {
-        // todo: fixed to two partitions?
-        subRanges = router.partitionRange(2, range);
+        throw new SolrException(ErrorCode.BAD_REQUEST,
+            "No collection with the specified name exists: " + collectionName);
       }
-
-      try {
-        List<String> subSlices = new ArrayList<>(subRanges.size());
-        List<String> subShardNames = new ArrayList<>(subRanges.size());
-        String nodeName = parentShardLeader.getNodeName();
+    }
+    
+    // find the leader for the shard
+    Replica parentShardLeader = null;
+    try {
+      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice, 10000);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+    
+    DocRouter.Range range = parentSlice.getRange();
+    if (range == null) {
+      range = new PlainIdRouter().fullRange();
+    }
+    
+    List<DocRouter.Range> subRanges = null;
+    String rangesStr = message.getStr(CoreAdminParams.RANGES);
+    if (rangesStr != null) {
+      String[] ranges = rangesStr.split(",");
+      if (ranges.length == 0 || ranges.length == 1) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
+      } else {
+        subRanges = new ArrayList<>(ranges.length);
+        for (int i = 0; i < ranges.length; i++) {
+          String r = ranges[i];
+          try {
+            subRanges.add(DocRouter.DEFAULT.fromString(r));
+          } catch (Exception e) {
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
+          }
+          if (!subRanges.get(i).isSubsetOf(range)) {
+            throw new SolrException(ErrorCode.BAD_REQUEST,
+                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
+          }
+        }
+        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
+        Collections.sort(temp);
+        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
+          throw new SolrException(ErrorCode.BAD_REQUEST,
+              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
+        }
+        for (int i = 1; i < temp.size(); i++) {
+          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
+                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
+          }
+        }
+      }
+    } else if (splitKey != null) {
+      if (router instanceof CompositeIdRouter) {
+        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
+        subRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
+        if (subRanges.size() == 1) {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
+              + " has a hash range that is exactly equal to hash range of shard: " + slice);
+        }
+        for (DocRouter.Range subRange : subRanges) {
+          if (subRange.min == subRange.max) {
+            throw new SolrException(ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
+          }
+        }
+        log.info("Partitioning parent shard " + slice + " range: " + parentSlice.getRange() + " yields: " + subRanges);
+        rangesStr = "";
         for (int i = 0; i < subRanges.size(); i++) {
-          String subSlice = slice + "_" + i;
-          subSlices.add(subSlice);
-          String subShardName = collectionName + "_" + subSlice + "_replica1";
-          subShardNames.add(subShardName);
-
-          Slice oSlice = clusterState.getSlice(collectionName, subSlice);
-          if (oSlice != null) {
-            final Slice.State state = oSlice.getState();
-            if (state == Slice.State.ACTIVE) {
-              throw new SolrException(ErrorCode.BAD_REQUEST, "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
-            } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
-              // delete the shards
-              for (String sub : subSlices) {
-                log.info("Sub-shard: {} already exists therefore requesting its deletion", sub);
-                Map<String, Object> propMap = new HashMap<>();
-                propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
-                propMap.put(COLLECTION_PROP, collectionName);
-                propMap.put(SHARD_ID_PROP, sub);
-                ZkNodeProps m = new ZkNodeProps(propMap);
-                try {
-                  deleteShard(clusterState, m, new NamedList());
-                } catch (Exception e) {
-                  throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + sub, e);
-                }
+          DocRouter.Range subRange = subRanges.get(i);
+          rangesStr += subRange.toString();
+          if (i < subRanges.size() - 1) rangesStr += ',';
+        }
+      }
+    } else {
+      // todo: fixed to two partitions?
+      subRanges = router.partitionRange(2, range);
+    }
+    
+    try {
+      List<String> subSlices = new ArrayList<>(subRanges.size());
+      List<String> subShardNames = new ArrayList<>(subRanges.size());
+      String nodeName = parentShardLeader.getNodeName();
+      for (int i = 0; i < subRanges.size(); i++) {
+        String subSlice = slice + "_" + i;
+        subSlices.add(subSlice);
+        String subShardName = collectionName + "_" + subSlice + "_replica1";
+        subShardNames.add(subShardName);
+        
+        Slice oSlice = clusterState.getSlice(collectionName, subSlice);
+        if (oSlice != null) {
+          final Slice.State state = oSlice.getState();
+          if (state == Slice.State.ACTIVE) {
+            throw new SolrException(ErrorCode.BAD_REQUEST,
+                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
+          } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
+            // delete the shards
+            for (String sub : subSlices) {
+              log.info("Sub-shard: {} already exists therefore requesting its deletion", sub);
+              Map<String,Object> propMap = new HashMap<>();
+              propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+              propMap.put(COLLECTION_PROP, collectionName);
+              propMap.put(SHARD_ID_PROP, sub);
+              ZkNodeProps m = new ZkNodeProps(propMap);
+              try {
+                deleteShard(clusterState, m, new NamedList());
+              } catch (Exception e) {
+                throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + sub,
+                    e);
               }
             }
           }
         }
-
-        // do not abort splitshard if the unloading fails
-        // this can happen because the replicas created previously may be down
-        // the only side effect of this is that the sub shard may end up having more replicas than we want
-        collectShardResponses(results, false, null, shardHandler);
-
-        String asyncId = message.getStr(ASYNC);
-        HashMap<String, String> requestMap = new HashMap<String, String>();
-
-        for (int i = 0; i < subRanges.size(); i++) {
-          String subSlice = subSlices.get(i);
-          String subShardName = subShardNames.get(i);
-          DocRouter.Range subRange = subRanges.get(i);
-
-          log.info("Creating slice "
-              + subSlice + " of collection " + collectionName + " on "
-              + nodeName);
-
-          Map<String, Object> propMap = new HashMap<>();
-          propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
-          propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
-          propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-          propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
-          propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
-          propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
-          DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
-          inQueue.offer(ZkStateReader.toJSON(new ZkNodeProps(propMap)));
-
-          // wait until we are able to see the new shard in cluster state
-          waitForNewShard(collectionName, subSlice);
-
-          // refresh cluster state
-          clusterState = zkStateReader.getClusterState();
-
-          log.info("Adding replica " + subShardName + " as part of slice "
-              + subSlice + " of collection " + collectionName + " on "
-              + nodeName);
-          propMap = new HashMap<>();
+      }
+      
+      // do not abort splitshard if the unloading fails
+      // this can happen because the replicas created previously may be down
+      // the only side effect of this is that the sub shard may end up having more replicas than we want
+      collectShardResponses(results, false, null, shardHandler);
+      
+      String asyncId = message.getStr(ASYNC);
+      HashMap<String,String> requestMap = new HashMap<String,String>();
+      
+      for (int i = 0; i < subRanges.size(); i++) {
+        String subSlice = subSlices.get(i);
+        String subShardName = subShardNames.get(i);
+        DocRouter.Range subRange = subRanges.get(i);
+        
+        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
+        
+        Map<String,Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
+        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
+        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
+        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
+        DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
+        inQueue.offer(ZkStateReader.toJSON(new ZkNodeProps(propMap)));
+        
+        // wait until we are able to see the new shard in cluster state
+        waitForNewShard(collectionName, subSlice);
+        
+        // refresh cluster state
+        clusterState = zkStateReader.getClusterState();
+        
+        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
+            + " on " + nodeName);
+        propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+        propMap.put(COLLECTION_PROP, collectionName);
+        propMap.put(SHARD_ID_PROP, subSlice);
+        propMap.put("node", nodeName);
+        propMap.put(CoreAdminParams.NAME, subShardName);
+        // copy over property params:
+        for (String key : message.keySet()) {
+          if (key.startsWith(COLL_PROP_PREFIX)) {
+            propMap.put(key, message.getStr(key));
+          }
+        }
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        addReplica(clusterState, new ZkNodeProps(propMap), results);
+      }
+      
+      collectShardResponses(results, true, "SPLITSHARD failed to create subshard leaders", shardHandler);
+      
+      completeAsyncRequest(asyncId, requestMap, results);
+      
+      for (String subShardName : subShardNames) {
+        // wait for parent leader to acknowledge the sub-shard core
+        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
+        String coreNodeName = waitForCoreNodeName(collectionName, nodeName, subShardName);
+        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+        cmd.setCoreName(subShardName);
+        cmd.setNodeName(nodeName);
+        cmd.setCoreNodeName(coreNodeName);
+        cmd.setState(Replica.State.ACTIVE);
+        cmd.setCheckLive(true);
+        cmd.setOnlyIfLeader(true);
+        
+        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
+        sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
+      }
+      
+      collectShardResponses(results, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
+          shardHandler);
+          
+      completeAsyncRequest(asyncId, requestMap, results);
+      
+      log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
+          + " on: " + parentShardLeader);
+          
+      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
+          + collectionName + " on " + parentShardLeader);
+          
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminAction.SPLIT.toString());
+      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+        params.add(CoreAdminParams.TARGET_CORE, subShardName);
+      }
+      params.set(CoreAdminParams.RANGES, rangesStr);
+      
+      sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+      
+      collectShardResponses(results, true, "SPLITSHARD failed to invoke SPLIT core admin command", shardHandler);
+      completeAsyncRequest(asyncId, requestMap, results);
+      
+      log.info("Index on shard: " + nodeName + " split into two successfully");
+      
+      // apply buffered updates on sub-shards
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+        
+        log.info("Applying buffered updates on : " + subShardName);
+        
+        params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+        params.set(CoreAdminParams.NAME, subShardName);
+        
+        sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
+      }
+      
+      collectShardResponses(results, true, "SPLITSHARD failed while asking sub shard leaders to apply buffered updates",
+          shardHandler);
+          
+      completeAsyncRequest(asyncId, requestMap, results);
+      
+      log.info("Successfully applied buffered updates on : " + subShardNames);
+      
+      // Replica creation for the new Slices
+      
+      // look at the replication factor and see if it matches reality
+      // if it does not, find best nodes to create more cores
+      
+      // TODO: Have replication factor decided in some other way instead of numShards for the parent
+      
+      int repFactor = clusterState.getSlice(collectionName, slice).getReplicas().size();
+      
+      // we need to look at every node and see how many cores it serves
+      // add our new cores to existing nodes serving the least number of cores
+      // but (for now) require that each core goes on a distinct node.
+      
+      // TODO: add smarter options that look at the current number of cores per
+      // node?
+      // for now we just go random
+      Set<String> nodes = clusterState.getLiveNodes();
+      List<String> nodeList = new ArrayList<>(nodes.size());
+      nodeList.addAll(nodes);
+      
+      Collections.shuffle(nodeList, RANDOM);
+      
+      // TODO: Have maxShardsPerNode param for this operation?
+      
+      // Remove the node that hosts the parent shard for replica creation.
+      nodeList.remove(nodeName);
+      
+      // TODO: change this to handle sharding a slice into > 2 sub-shards.
+      
+      for (int i = 1; i <= subSlices.size(); i++) {
+        Collections.shuffle(nodeList, RANDOM);
+        String sliceName = subSlices.get(i - 1);
+        for (int j = 2; j <= repFactor; j++) {
+          String subShardNodeName = nodeList.get((repFactor * (i - 1) + (j - 2)) % nodeList.size());
+          String shardName = collectionName + "_" + sliceName + "_replica" + (j);
+          
+          log.info("Creating replica shard " + shardName + " as part of slice " + sliceName + " of collection "
+              + collectionName + " on " + subShardNodeName);
+              
+          HashMap<String,Object> propMap = new HashMap<>();
           propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
           propMap.put(COLLECTION_PROP, collectionName);
-          propMap.put(SHARD_ID_PROP, subSlice);
-          propMap.put("node", nodeName);
-          propMap.put(CoreAdminParams.NAME, subShardName);
+          propMap.put(SHARD_ID_PROP, sliceName);
+          propMap.put("node", subShardNodeName);
+          propMap.put(CoreAdminParams.NAME, shardName);
           // copy over property params:
           for (String key : message.keySet()) {
             if (key.startsWith(COLL_PROP_PREFIX)) {
@@ -1541,203 +1660,69 @@
             }
           }
           // add async param
-          if(asyncId != null) {
+          if (asyncId != null) {
             propMap.put(ASYNC, asyncId);
           }
           addReplica(clusterState, new ZkNodeProps(propMap), results);
-        }
-
-        collectShardResponses(results, true,
-            "SPLITSHARD failed to create subshard leaders", shardHandler);
-
-        completeAsyncRequest(asyncId, requestMap, results);
-
-        for (String subShardName : subShardNames) {
-          // wait for parent leader to acknowledge the sub-shard core
-          log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
-          String coreNodeName = waitForCoreNodeName(collectionName, nodeName, subShardName);
+          
+          String coreNodeName = waitForCoreNodeName(collectionName, subShardNodeName, shardName);
+          // wait for the replicas to be seen as active on sub shard leader
+          log.info("Asking sub shard leader to wait for: " + shardName + " to be alive on: " + subShardNodeName);
           CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-          cmd.setCoreName(subShardName);
-          cmd.setNodeName(nodeName);
+          cmd.setCoreName(subShardNames.get(i - 1));
+          cmd.setNodeName(subShardNodeName);
           cmd.setCoreNodeName(coreNodeName);
-          cmd.setState(Replica.State.ACTIVE);
+          cmd.setState(Replica.State.RECOVERING);
           cmd.setCheckLive(true);
           cmd.setOnlyIfLeader(true);
-
           ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
+          
           sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
+          
         }
-
-        collectShardResponses(results, true,
-            "SPLITSHARD timed out waiting for subshard leaders to come up", shardHandler);
-
-        completeAsyncRequest(asyncId, requestMap, results);
-
-        log.info("Successfully created all sub-shards for collection "
-            + collectionName + " parent shard: " + slice + " on: " + parentShardLeader);
-
-        log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice "
-            + slice + " of collection " + collectionName + " on "
-            + parentShardLeader);
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.SPLIT.toString());
-        params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
-        for (int i = 0; i < subShardNames.size(); i++) {
-          String subShardName = subShardNames.get(i);
-          params.add(CoreAdminParams.TARGET_CORE, subShardName);
-        }
-        params.set(CoreAdminParams.RANGES, rangesStr);
-
-        sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-        collectShardResponses(results, true, "SPLITSHARD failed to invoke SPLIT core admin command",
-            shardHandler);
-        completeAsyncRequest(asyncId, requestMap, results);
-
-        log.info("Index on shard: " + nodeName + " split into two successfully");
-
-        // apply buffered updates on sub-shards
-        for (int i = 0; i < subShardNames.size(); i++) {
-          String subShardName = subShardNames.get(i);
-
-          log.info("Applying buffered updates on : " + subShardName);
-
-          params = new ModifiableSolrParams();
-          params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-          params.set(CoreAdminParams.NAME, subShardName);
-
-          sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-        }
-
-        collectShardResponses(results, true,
-            "SPLITSHARD failed while asking sub shard leaders to apply buffered updates",
-            shardHandler);
-
-        completeAsyncRequest(asyncId, requestMap, results);
-
-        log.info("Successfully applied buffered updates on : " + subShardNames);
-
-        // Replica creation for the new Slices
-
-        // look at the replication factor and see if it matches reality
-        // if it does not, find best nodes to create more cores
-
-        // TODO: Have replication factor decided in some other way instead of numShards for the parent
-
-        int repFactor = clusterState.getSlice(collectionName, slice).getReplicas().size();
-
-        // we need to look at every node and see how many cores it serves
-        // add our new cores to existing nodes serving the least number of cores
-        // but (for now) require that each core goes on a distinct node.
-
-        // TODO: add smarter options that look at the current number of cores per
-        // node?
-        // for now we just go random
-        Set<String> nodes = clusterState.getLiveNodes();
-        List<String> nodeList = new ArrayList<>(nodes.size());
-        nodeList.addAll(nodes);
-
-        Collections.shuffle(nodeList, RANDOM);
-
-        // TODO: Have maxShardsPerNode param for this operation?
-
-        // Remove the node that hosts the parent shard for replica creation.
-        nodeList.remove(nodeName);
-
-        // TODO: change this to handle sharding a slice into > 2 sub-shards.
-
-        for (int i = 1; i <= subSlices.size(); i++) {
-          Collections.shuffle(nodeList, RANDOM);
-          String sliceName = subSlices.get(i - 1);
-          for (int j = 2; j <= repFactor; j++) {
-            String subShardNodeName = nodeList.get((repFactor * (i - 1) + (j - 2)) % nodeList.size());
-            String shardName = collectionName + "_" + sliceName + "_replica" + (j);
-
-            log.info("Creating replica shard " + shardName + " as part of slice "
-                + sliceName + " of collection " + collectionName + " on "
-                + subShardNodeName);
-
-            HashMap<String, Object> propMap = new HashMap<>();
-            propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-            propMap.put(COLLECTION_PROP, collectionName);
-            propMap.put(SHARD_ID_PROP, sliceName);
-            propMap.put("node", subShardNodeName);
-            propMap.put(CoreAdminParams.NAME, shardName);
-            // copy over property params:
-            for (String key : message.keySet()) {
-              if (key.startsWith(COLL_PROP_PREFIX)) {
-                propMap.put(key, message.getStr(key));
-              }
-            }
-            // add async param
-            if (asyncId != null) {
-              propMap.put(ASYNC, asyncId);
-            }
-            addReplica(clusterState, new ZkNodeProps(propMap), results);
-
-            String coreNodeName = waitForCoreNodeName(collectionName, subShardNodeName, shardName);
-            // wait for the replicas to be seen as active on sub shard leader
-            log.info("Asking sub shard leader to wait for: " + shardName + " to be alive on: " + subShardNodeName);
-            CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-            cmd.setCoreName(subShardNames.get(i - 1));
-            cmd.setNodeName(subShardNodeName);
-            cmd.setCoreNodeName(coreNodeName);
-            cmd.setState(Replica.State.RECOVERING);
-            cmd.setCheckLive(true);
-            cmd.setOnlyIfLeader(true);
-            ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
-
-            sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
-
-          }
-        }
-
-        collectShardResponses(results, true,
-            "SPLITSHARD failed to create subshard replicas or timed out waiting for them to come up",
-            shardHandler);
-
-        completeAsyncRequest(asyncId, requestMap, results);
-
-        log.info("Successfully created all replica shards for all sub-slices " + subSlices);
-
-        commit(results, slice, parentShardLeader);
-
-        if (repFactor == 1) {
-          // switch sub shard states to 'active'
-          log.info("Replication factor is 1 so switching shard states");
-          DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
-          Map<String, Object> propMap = new HashMap<>();
-          propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-          propMap.put(slice, Slice.State.INACTIVE.toString());
-          for (String subSlice : subSlices) {
-            propMap.put(subSlice, Slice.State.ACTIVE.toString());
-          }
-          propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-          ZkNodeProps m = new ZkNodeProps(propMap);
-          inQueue.offer(ZkStateReader.toJSON(m));
-        } else {
-          log.info("Requesting shard state be set to 'recovery'");
-          DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
-          Map<String, Object> propMap = new HashMap<>();
-          propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-          for (String subSlice : subSlices) {
-            propMap.put(subSlice, Slice.State.RECOVERY.toString());
-          }
-          propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-          ZkNodeProps m = new ZkNodeProps(propMap);
-          inQueue.offer(ZkStateReader.toJSON(m));
-        }
-
-        return true;
-      } catch (SolrException e) {
-        throw e;
-      } catch (Exception e) {
-        log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
-        throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
       }
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+      
+      collectShardResponses(results, true,
+          "SPLITSHARD failed to create subshard replicas or timed out waiting for them to come up", shardHandler);
+          
+      completeAsyncRequest(asyncId, requestMap, results);
+      
+      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
+      
+      commit(results, slice, parentShardLeader);
+      
+      if (repFactor == 1) {
+        // switch sub shard states to 'active'
+        log.info("Replication factor is 1 so switching shard states");
+        DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
+        Map<String,Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        propMap.put(slice, Slice.State.INACTIVE.toString());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.ACTIVE.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(ZkStateReader.toJSON(m));
+      } else {
+        log.info("Requesting shard state be set to 'recovery'");
+        DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
+        Map<String,Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.RECOVERY.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(ZkStateReader.toJSON(m));
+      }
+      
+      return true;
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
+      throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
     }
   }
 
@@ -1846,71 +1831,64 @@
   private void deleteShard(ClusterState clusterState, ZkNodeProps message, NamedList results) {
     String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
     String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setMDC(collection, sliceId, null, null);
-    try {
-      log.info("Delete shard invoked");
-      Slice slice = clusterState.getSlice(collection, sliceId);
-
-      if (slice == null) {
-        if (clusterState.hasCollection(collection)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "No shard with name " + sliceId + " exists for collection " + collection);
-        } else {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "No collection with the specified name exists: " + collection);
-        }
-      }
-      // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
-      // TODO: Add check for range gaps on Slice deletion
-      final Slice.State state = slice.getState();
-      if (!(slice.getRange() == null || state == Slice.State.INACTIVE
-          || state == Slice.State.RECOVERY || state == Slice.State.CONSTRUCTION)) {
+    
+    log.info("Delete shard invoked");
+    Slice slice = clusterState.getSlice(collection, sliceId);
+    
+    if (slice == null) {
+      if (clusterState.hasCollection(collection)) {
         throw new SolrException(ErrorCode.BAD_REQUEST,
-            "The slice: " + slice.getName() + " is currently "
-                + state + ". Only non-active (or custom-hashed) slices can be deleted.");
+            "No shard with name " + sliceId + " exists for collection " + collection);
+      } else {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "No collection with the specified name exists: " + collection);
       }
-      ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-      try {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.UNLOAD.toString());
-        params.set(CoreAdminParams.DELETE_INDEX, "true");
-        sliceCmd(clusterState, params, null, slice, shardHandler);
-
-        processResponses(results, shardHandler);
-
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-            DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.SHARD_ID_PROP, sliceId);
-        Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(m));
-
-        // wait for a while until we don't see the shard
-        long now = System.nanoTime();
-        long timeout = now + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
-        boolean removed = false;
-        while (System.nanoTime() < timeout) {
-          Thread.sleep(100);
-          removed = zkStateReader.getClusterState().getSlice(collection, sliceId) == null;
-          if (removed) {
-            Thread.sleep(100); // just a bit of time so it's more likely other readers see on return
-            break;
-          }
+    }
+    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
+    // TODO: Add check for range gaps on Slice deletion
+    final Slice.State state = slice.getState();
+    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
+        || state == Slice.State.CONSTRUCTION)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
+          + ". Only non-active (or custom-hashed) slices can be deleted.");
+    }
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    
+    try {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminAction.UNLOAD.toString());
+      params.set(CoreAdminParams.DELETE_INDEX, "true");
+      sliceCmd(clusterState, params, null, slice, shardHandler);
+      
+      processResponses(results, shardHandler);
+      
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
+          collection, ZkStateReader.SHARD_ID_PROP, sliceId);
+      Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(m));
+      
+      // wait for a while until we don't see the shard
+      long now = System.nanoTime();
+      long timeout = now + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
+      boolean removed = false;
+      while (System.nanoTime() < timeout) {
+        Thread.sleep(100);
+        removed = zkStateReader.getClusterState().getSlice(collection, sliceId) == null;
+        if (removed) {
+          Thread.sleep(100); // just a bit of time so it's more likely other readers see on return
+          break;
         }
-        if (!removed) {
-          throw new SolrException(ErrorCode.SERVER_ERROR,
-              "Could not fully remove collection: " + collection + " shard: " + sliceId);
-        }
-
-        log.info("Successfully deleted collection: " + collection + ", shard: " + sliceId);
-
-      } catch (SolrException e) {
-        throw e;
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error executing delete operation for collection: " + collection + " shard: " + sliceId, e);
       }
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+      if (!removed) {
+        throw new SolrException(ErrorCode.SERVER_ERROR,
+            "Could not fully remove collection: " + collection + " shard: " + sliceId);
+      }
+      
+      log.info("Successfully deleted collection: " + collection + ", shard: " + sliceId);
+      
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Error executing delete operation for collection: " + collection + " shard: " + sliceId, e);
     }
   }
 
@@ -2505,110 +2483,101 @@
     }
   }
 
-  private void addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
+  private void addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws KeeperException, InterruptedException {
     String collection = message.getStr(COLLECTION_PROP);
     String node = message.getStr("node");
     String shard = message.getStr(SHARD_ID_PROP);
     String coreName = message.getStr(CoreAdminParams.NAME);
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setMDC(collection, shard, null, coreName);
-    try {
-      String asyncId = message.getStr("async");
-
-      DocCollection coll = clusterState.getCollection(collection);
-      if (coll == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-      }
-      if (coll.getSlice(shard) == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " shard: " + shard + " does not exist");
-      }
-      ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-      if (node == null) {
-
-        node = getNodesForNewShard(clusterState, collection, shard, 1,
-            null, overseer.getZkController().getCoreContainer()).get(0).nodeName;
-        log.info("Node not provided, Identified {} for creating new replica", node);
-      }
-
-
-      if (!clusterState.liveNodesContain(node)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
-      }
-      if (coreName == null) {
-        // assign a name to this core
-        Slice slice = coll.getSlice(shard);
-        int replicaNum = slice.getReplicas().size();
-        for (; ; ) {
-          String replicaName = collection + "_" + shard + "_replica" + replicaNum;
-          boolean exists = false;
-          for (Replica replica : slice.getReplicas()) {
-            if (replicaName.equals(replica.getStr("core"))) {
-              exists = true;
-              break;
-            }
-          }
-          if (exists) replicaNum++;
-          else break;
-        }
-        coreName = collection + "_" + shard + "_replica" + replicaNum;
-      }
-      ModifiableSolrParams params = new ModifiableSolrParams();
-
-      if (!Overseer.isLegacy(zkStateReader.getClusterProps())) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.SHARD_ID_PROP, shard,
-            ZkStateReader.CORE_NAME_PROP, coreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node));
-        Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(props));
-        params.set(CoreAdminParams.CORE_NODE_NAME, waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
-      }
-
-
-      String configName = zkStateReader.readConfigName(collection);
-      String routeKey = message.getStr(ShardParams._ROUTE_);
-      String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
-      String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
-
-      params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
-      params.set(CoreAdminParams.NAME, coreName);
-      params.set(COLL_CONF, configName);
-      params.set(CoreAdminParams.COLLECTION, collection);
-      if (shard != null) {
-        params.set(CoreAdminParams.SHARD, shard);
-      } else if (routeKey != null) {
-        Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
-        if (slices.isEmpty()) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
-        } else {
-          params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
-        }
-      } else  {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
-      }
-      if (dataDir != null) {
-        params.set(CoreAdminParams.DATA_DIR, dataDir);
-      }
-      if (instanceDir != null) {
-        params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
-      }
-      addPropertyParams(message, params);
-
-      // For tracking async calls.
-      HashMap<String, String> requestMap = new HashMap<>();
-      sendShardRequest(node, params, shardHandler, asyncId, requestMap);
-
-      collectShardResponses(results, true,
-          "ADDREPLICA failed to create replica", shardHandler);
-
-      completeAsyncRequest(asyncId, requestMap, results);
-    } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+    
+    String asyncId = message.getStr("async");
+    
+    DocCollection coll = clusterState.getCollection(collection);
+    if (coll == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
     }
+    if (coll.getSlice(shard) == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Collection: " + collection + " shard: " + shard + " does not exist");
+    }
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    
+    if (node == null) {
+      
+      node = getNodesForNewShard(clusterState, collection, shard, 1, null,
+          overseer.getZkController().getCoreContainer()).get(0).nodeName;
+      log.info("Node not provided, Identified {} for creating new replica", node);
+    }
+    
+    if (!clusterState.liveNodesContain(node)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
+    }
+    if (coreName == null) {
+      // assign a name to this core
+      Slice slice = coll.getSlice(shard);
+      int replicaNum = slice.getReplicas().size();
+      for (;;) {
+        String replicaName = collection + "_" + shard + "_replica" + replicaNum;
+        boolean exists = false;
+        for (Replica replica : slice.getReplicas()) {
+          if (replicaName.equals(replica.getStr("core"))) {
+            exists = true;
+            break;
+          }
+        }
+        if (exists) replicaNum++;
+        else break;
+      }
+      coreName = collection + "_" + shard + "_replica" + replicaNum;
+    }
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    
+    if (!Overseer.isLegacy(zkStateReader.getClusterProps())) {
+      ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(), ZkStateReader.COLLECTION_PROP,
+          collection, ZkStateReader.SHARD_ID_PROP, shard, ZkStateReader.CORE_NAME_PROP, coreName,
+          ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(), ZkStateReader.BASE_URL_PROP,
+          zkStateReader.getBaseUrlForNodeName(node));
+      Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(props));
+      params.set(CoreAdminParams.CORE_NODE_NAME,
+          waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
+    }
+    
+    String configName = zkStateReader.readConfigName(collection);
+    String routeKey = message.getStr(ShardParams._ROUTE_);
+    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
+    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
+    
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
+    params.set(CoreAdminParams.NAME, coreName);
+    params.set(COLL_CONF, configName);
+    params.set(CoreAdminParams.COLLECTION, collection);
+    if (shard != null) {
+      params.set(CoreAdminParams.SHARD, shard);
+    } else if (routeKey != null) {
+      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
+      if (slices.isEmpty()) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
+      } else {
+        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
+      }
+    } else {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
+    }
+    if (dataDir != null) {
+      params.set(CoreAdminParams.DATA_DIR, dataDir);
+    }
+    if (instanceDir != null) {
+      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
+    }
+    addPropertyParams(message, params);
+    
+    // For tracking async calls.
+    HashMap<String,String> requestMap = new HashMap<>();
+    sendShardRequest(node, params, shardHandler, asyncId, requestMap);
+    
+    collectShardResponses(results, true, "ADDREPLICA failed to create replica", shardHandler);
+    
+    completeAsyncRequest(asyncId, requestMap, results);
   }
 
   private void processResponses(NamedList results, ShardHandler shardHandler) {
@@ -2834,7 +2803,8 @@
     synchronized (runningTasks) {
       runningTasks.add(head.getId());
     }
-    if(!CLUSTERSTATUS.isEqual(message.getStr(Overseer.QUEUE_OPERATION)) && collectionName != null) {
+    //TODO deprecated remove this check .
+    if (!CLUSTERSTATUS.isEqual(message.getStr(Overseer.QUEUE_OPERATION)) && collectionName != null) {
       synchronized (collectionWip) {
         collectionWip.add(collectionName);
       }
@@ -2867,8 +2837,7 @@
       String asyncId = message.getStr(ASYNC);
       String collectionName = message.containsKey(COLLECTION_PROP) ?
           message.getStr(COLLECTION_PROP) : message.getStr(NAME);
-      Map previousMDCContext = MDC.getCopyOfContextMap();
-      MDCUtils.setCollection(collectionName);
+
       try {
         try {
           log.debug("Runner processing {}", head.getId());
@@ -2913,7 +2882,6 @@
         synchronized (waitLock){
           waitLock.notifyAll();
         }
-        MDCUtils.cleanupMDC(previousMDCContext);
       }
     }
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
index 73ba4c1..db56ab7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
@@ -50,6 +50,7 @@
 import org.apache.solr.core.DirectoryFactory.DirContext;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.ReplicationHandler;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrRequestHandler;
@@ -125,7 +126,7 @@
   private void recoveryFailed(final SolrCore core,
       final ZkController zkController, final String baseUrl,
       final String shardZkNodeName, final CoreDescriptor cd) throws KeeperException, InterruptedException {
-    SolrException.log(log, "Recovery failed - I give up. core=" + coreName);
+    SolrException.log(log, "Recovery failed - I give up.");
     try {
       zkController.publish(cd, Replica.State.RECOVERY_FAILED);
     } finally {
@@ -140,7 +141,7 @@
     ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
     String leaderUrl = leaderCNodeProps.getCoreUrl();
     
-    log.info("Attempting to replicate from " + leaderUrl + ". core=" + coreName);
+    log.info("Attempting to replicate from " + leaderUrl + ".");
     
     // send commit
     commitOnLeader(leaderUrl);
@@ -218,12 +219,9 @@
         SolrException.log(log, "SolrCore not found - cannot recover:" + coreName);
         return;
       }
+      MDCLoggingContext.setCore(core);
 
-      SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
-      SolrQueryResponse rsp = new SolrQueryResponse();
-      SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
-
-      log.info("Starting recovery process.  core=" + coreName + " recoveringAfterStartup=" + recoveringAfterStartup);
+      log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
 
       try {
         doRecovery(core);
@@ -236,7 +234,7 @@
         throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
       }
     } finally {
-      SolrRequestInfo.clearRequestInfo();
+      MDCLoggingContext.clear();
     }
   }
 
@@ -248,7 +246,7 @@
     UpdateLog ulog;
     ulog = core.getUpdateHandler().getUpdateLog();
     if (ulog == null) {
-      SolrException.log(log, "No UpdateLog found - cannot recover. core=" + coreName);
+      SolrException.log(log, "No UpdateLog found - cannot recover.");
       recoveryFailed(core, zkController, baseUrl, coreZkNodeName,
           core.getCoreDescriptor());
       return;
@@ -262,7 +260,7 @@
       recentUpdates = ulog.getRecentUpdates();
       recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
     } catch (Exception e) {
-      SolrException.log(log, "Corrupt tlog - ignoring. core=" + coreName, e);
+      SolrException.log(log, "Corrupt tlog - ignoring.", e);
       recentVersions = new ArrayList<>(0);
     } finally {
       if (recentUpdates != null) {
@@ -291,7 +289,7 @@
         
         log.info("###### startupVersions=" + startingVersions);
       } catch (Exception e) {
-        SolrException.log(log, "Error getting recent versions. core=" + coreName, e);
+        SolrException.log(log, "Error getting recent versions.", e);
         recentVersions = new ArrayList<>(0);
       }
     }
@@ -306,17 +304,16 @@
           // this means we were previously doing a full index replication
           // that probably didn't complete and buffering updates in the
           // meantime.
-          log.info("Looks like a previous replication recovery did not complete - skipping peer sync. core="
-              + coreName);
+          log.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
           firstTime = false; // skip peersync
         }
       } catch (Exception e) {
-        SolrException.log(log, "Error trying to get ulog starting operation. core="
-            + coreName, e);
+        SolrException.log(log, "Error trying to get ulog starting operation.", e);
         firstTime = false; // skip peersync
       }
     }
 
+    Future<RecoveryInfo> replayFuture = null;
     while (!successfulRecovery && !isInterrupted() && !isClosed()) { // don't use interruption or it will close channels though
       try {
         CloudDescriptor cloudDesc = core.getCoreDescriptor()
@@ -337,13 +334,14 @@
         }
         if (cloudDesc.isLeader()) {
           // we are now the leader - no one else must have been suitable
-          log.warn("We have not yet recovered - but we are now the leader! core=" + coreName);
-          log.info("Finished recovery process. core=" + coreName);
+          log.warn("We have not yet recovered - but we are now the leader!");
+          log.info("Finished recovery process.");
           zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
           return;
         }
         
-        log.info("Publishing state of core "+core.getName()+" as recovering, leader is "+leaderUrl+" and I am "+ourUrl);
+        log.info("Publishing state of core " + core.getName() + " as recovering, leader is " + leaderUrl + " and I am "
+            + ourUrl);
         zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
         
         
@@ -380,7 +378,7 @@
         // first thing we just try to sync
         if (firstTime) {
           firstTime = false; // only try sync the first time through the loop
-          log.info("Attempting to PeerSync from " + leaderUrl + " core=" + coreName + " - recoveringAfterStartup="+recoveringAfterStartup);
+          log.info("Attempting to PeerSync from " + leaderUrl + " - recoveringAfterStartup="+recoveringAfterStartup);
           // System.out.println("Attempting to PeerSync from " + leaderUrl
           // + " i am:" + zkController.getNodeName());
           PeerSync peerSync = new PeerSync(core,
@@ -392,7 +390,7 @@
                 new ModifiableSolrParams());
             // force open a new searcher
             core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
-            log.info("PeerSync Recovery was successful - registering as Active. core=" + coreName);
+            log.info("PeerSync Recovery was successful - registering as Active.");
 
             // solrcloud_debug
             if (log.isDebugEnabled()) {
@@ -420,7 +418,7 @@
             return;
           }
 
-          log.info("PeerSync Recovery was not successful - trying replication. core=" + coreName);
+          log.info("PeerSync Recovery was not successful - trying replication.");
         }
 
         if (isClosed()) {
@@ -428,12 +426,12 @@
           break;
         }
         
-        log.info("Starting Replication Recovery. core=" + coreName);
+        log.info("Starting Replication Recovery.");
         
-        log.info("Begin buffering updates. core=" + coreName);
+        log.info("Begin buffering updates.");
         ulog.bufferUpdates();
         replayed = false;
-        
+
         try {
 
           replicate(zkController.getNodeName(), core, leaderprops);
@@ -442,8 +440,8 @@
             log.info("Recovery was cancelled");
             break;
           }
-          
-          replay(core);
+
+          replayFuture = replay(core);
           replayed = true;
           
           if (isClosed()) {
@@ -451,7 +449,7 @@
             break;
           }
 
-          log.info("Replication Recovery was successful - registering as Active. core=" + coreName);
+          log.info("Replication Recovery was successful - registering as Active.");
           // if there are pending recovery requests, don't advert as active
           zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
           close = true;
@@ -475,7 +473,7 @@
         }
 
       } catch (Exception e) {
-        SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
+        SolrException.log(log, "Error while trying to recover.", e);
       }
 
       if (!successfulRecovery) {
@@ -488,11 +486,11 @@
             break;
           }
           
-          log.error("Recovery failed - trying again... (" + retries + ") core=" + coreName);
+          log.error("Recovery failed - trying again... (" + retries + ")");
           
           retries++;
           if (retries >= MAX_RETRIES) {
-            SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + "). core=" + coreName);
+            SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
             try {
               recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
             } catch (Exception e) {
@@ -501,7 +499,7 @@
             break;
           }
         } catch (Exception e) {
-          SolrException.log(log, "core=" + coreName, e);
+          SolrException.log(log, "", e);
         }
 
         try {
@@ -514,13 +512,21 @@
           }
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
-          log.warn("Recovery was interrupted. core=" + coreName, e);
+          log.warn("Recovery was interrupted.", e);
           close = true;
         }
       }
 
     }
-    log.info("Finished recovery process. core=" + coreName);
+
+    // if replay was skipped (possibly to due pulling a full index from the leader),
+    // then we still need to update version bucket seeds after recovery
+    if (successfulRecovery && replayFuture == null) {
+      log.info("Updating version bucket highest from index after successful recovery.");
+      core.seedVersionBuckets();
+    }
+
+    log.info("Finished recovery process.");
 
     
   }
@@ -530,9 +536,9 @@
     Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
     if (future == null) {
       // no replay needed\
-      log.info("No replay needed. core=" + coreName);
+      log.info("No replay needed.");
     } else {
-      log.info("Replaying buffered documents. core=" + coreName);
+      log.info("Replaying buffered documents.");
       // wait for replay
       RecoveryInfo report = future.get();
       if (report.failed) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
index 9625024..a88f7bc 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
@@ -1,5 +1,10 @@
 package org.apache.solr.cloud;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -33,6 +38,7 @@
 import org.apache.solr.handler.component.ShardHandler;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.handler.component.ShardResponse;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrRequestInfo;
@@ -41,11 +47,7 @@
 import org.apache.solr.update.UpdateShardHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
+import org.slf4j.MDC;
 
 public class SyncStrategy {
   protected final Logger log = LoggerFactory.getLogger(getClass());
@@ -76,15 +78,16 @@
     return sync(zkController, core, leaderProps, false);
   }
   
-  public boolean sync(ZkController zkController, SolrCore core, ZkNodeProps leaderProps, boolean peerSyncOnlyWithActive) {
+  public boolean sync(ZkController zkController, SolrCore core, ZkNodeProps leaderProps,
+      boolean peerSyncOnlyWithActive) {
     if (SKIP_AUTO_RECOVERY) {
       return true;
     }
-    boolean success;
-    SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
-    SolrQueryResponse rsp = new SolrQueryResponse();
-    SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
+    
+    MDCLoggingContext.setCore(core);
     try {
+      boolean success;
+      
       if (isClosed) {
         log.warn("Closed, skipping sync up.");
         return false;
@@ -95,12 +98,13 @@
         log.error("No UpdateLog found - cannot sync");
         return false;
       }
-
+      
       success = syncReplicas(zkController, core, leaderProps, peerSyncOnlyWithActive);
+      
+      return success;
     } finally {
-      SolrRequestInfo.clearRequestInfo();
+      MDCLoggingContext.clear();
     }
-    return success;
   }
   
   private boolean syncReplicas(ZkController zkController, SolrCore core,
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index c4de44f..79796ae 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -82,7 +82,7 @@
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.logging.MDCUtils;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.update.UpdateLog;
 import org.apache.solr.update.UpdateShardHandler;
 import org.apache.zookeeper.CreateMode;
@@ -96,7 +96,6 @@
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
 
 import com.google.common.base.Strings;
 
@@ -248,9 +247,7 @@
     this.localHostPort = cloudConfig.getSolrHostPort();
     this.hostName = normalizeHostName(cloudConfig.getHost());
     this.nodeName = generateNodeName(this.hostName, Integer.toString(this.localHostPort), localHostContext);
-
-    MDC.put(NODE_NAME_PROP, nodeName);
-
+    MDCLoggingContext.setNode(nodeName);
     this.leaderVoteWait = cloudConfig.getLeaderVoteWait();
     this.leaderConflictResolveWait = cloudConfig.getLeaderConflictResolveWait();
 
@@ -849,39 +846,38 @@
    * @return the shardId for the SolrCore
    */
   public String register(String coreName, final CoreDescriptor desc, boolean recoverReloadedCores, boolean afterExpiration) throws Exception {
-    // pre register has published our down state
-    final String baseUrl = getBaseUrl();
-
-    final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
-    final String collection = cloudDesc.getCollectionName();
-
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setCollection(collection);
-
-    final String coreZkNodeName = desc.getCloudDescriptor().getCoreNodeName();
-    assert coreZkNodeName != null : "we should have a coreNodeName by now";
-
-    String shardId = cloudDesc.getShardId();
-    MDCUtils.setShard(shardId);
-    Map<String, Object> props = new HashMap<>();
-    // we only put a subset of props into the leader node
-    props.put(ZkStateReader.BASE_URL_PROP, baseUrl);
-    props.put(ZkStateReader.CORE_NAME_PROP, coreName);
-    props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
-
-
-    if (log.isInfoEnabled()) {
-      log.info("Register replica - core:" + coreName + " address:"
-          + baseUrl + " collection:" + cloudDesc.getCollectionName() + " shard:" + shardId);
+    try (SolrCore core = cc.getCore(desc.getName())) {
+      MDCLoggingContext.setCore(core);
     }
-
-    ZkNodeProps leaderProps = new ZkNodeProps(props);
-
     try {
+      // pre register has published our down state
+      final String baseUrl = getBaseUrl();
+      
+      final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
+      final String collection = cloudDesc.getCollectionName();
+      
+      final String coreZkNodeName = desc.getCloudDescriptor().getCoreNodeName();
+      assert coreZkNodeName != null : "we should have a coreNodeName by now";
+      
+      String shardId = cloudDesc.getShardId();
+      Map<String,Object> props = new HashMap<>();
+      // we only put a subset of props into the leader node
+      props.put(ZkStateReader.BASE_URL_PROP, baseUrl);
+      props.put(ZkStateReader.CORE_NAME_PROP, coreName);
+      props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
+      
+      if (log.isInfoEnabled()) {
+        log.info("Register replica - core:" + coreName + " address:" + baseUrl + " collection:"
+            + cloudDesc.getCollectionName() + " shard:" + shardId);
+      }
+      
+      ZkNodeProps leaderProps = new ZkNodeProps(props);
+      
       try {
         // If we're a preferred leader, insert ourselves at the head of the queue
         boolean joinAtHead = false;
-        Replica replica = zkStateReader.getClusterState().getReplica(desc.getCloudDescriptor().getCollectionName(), coreZkNodeName);
+        Replica replica = zkStateReader.getClusterState().getReplica(desc.getCloudDescriptor().getCollectionName(),
+            coreZkNodeName);
         if (replica != null) {
           joinAtHead = replica.getBool(SliceMutator.PREFERRED_LEADER_PROP, false);
         }
@@ -893,25 +889,24 @@
       } catch (KeeperException | IOException e) {
         throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
       }
-
-
-      // in this case, we want to wait for the leader as long as the leader might 
+      
+      // in this case, we want to wait for the leader as long as the leader might
       // wait for a vote, at least - but also long enough that a large cluster has
       // time to get its act together
       String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
-
+      
       String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
       log.info("We are " + ourUrl + " and leader is " + leaderUrl);
       boolean isLeader = leaderUrl.equals(ourUrl);
-
+      
       try (SolrCore core = cc.getCore(desc.getName())) {
-
+        
         // recover from local transaction log and wait for it to complete before
         // going active
         // TODO: should this be moved to another thread? To recoveryStrat?
         // TODO: should this actually be done earlier, before (or as part of)
         // leader election perhaps?
-
+        
         UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
         
         // we will call register again after zk expiration and on reload
@@ -931,18 +926,18 @@
             }
           }
         }
-        boolean didRecovery = checkRecovery(coreName, desc, recoverReloadedCores, isLeader, cloudDesc,
-            collection, coreZkNodeName, shardId, leaderProps, core, cc, afterExpiration);
+        boolean didRecovery = checkRecovery(coreName, desc, recoverReloadedCores, isLeader, cloudDesc, collection,
+            coreZkNodeName, shardId, leaderProps, core, cc, afterExpiration);
         if (!didRecovery) {
           publish(desc, Replica.State.ACTIVE);
         }
       }
-
+      
       // make sure we have an update cluster state right away
       zkStateReader.updateClusterState(true);
       return shardId;
     } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+      MDCLoggingContext.clear();
     }
   }
 
@@ -1138,28 +1133,26 @@
         if (core == null || core.isClosed()) {
           return;
         }
+        MDCLoggingContext.setCore(core);
       }
+    } else {
+      MDCLoggingContext.setCoreDescriptor(cd);
     }
-    String collection = cd.getCloudDescriptor().getCollectionName();
-
-    Map previousMDCContext = MDC.getCopyOfContextMap();
-    MDCUtils.setCollection(collection);
-
     try {
-      if (cd != null && cd.getName() != null)
-        MDCUtils.setCore(cd.getName());
-      log.info("publishing core={} state={} collection={}", cd.getName(), state.toString(), collection);
-      //System.out.println(Thread.currentThread().getStackTrace()[3]);
+      String collection = cd.getCloudDescriptor().getCollectionName();
+      
+      log.info("publishing state={}", state.toString());
+      // System.out.println(Thread.currentThread().getStackTrace()[3]);
       Integer numShards = cd.getCloudDescriptor().getNumShards();
-      if (numShards == null) { //XXX sys prop hack
+      if (numShards == null) { // XXX sys prop hack
         log.info("numShards not found on descriptor - reading it from system property");
         numShards = Integer.getInteger(ZkStateReader.NUM_SHARDS_PROP);
       }
-
+      
       assert collection != null && collection.length() > 0;
-
+      
       String shardId = cd.getCloudDescriptor().getShardId();
-      MDCUtils.setShard(shardId);
+      
       String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
       // If the leader initiated recovery, then verify that this replica has performed
       // recovery as requested before becoming active; don't even look at lirState if going down
@@ -1182,8 +1175,8 @@
           }
         }
       }
-
-      Map<String, Object> props = new HashMap<>();
+      
+      Map<String,Object> props = new HashMap<>();
       props.put(Overseer.QUEUE_OPERATION, "state");
       props.put(ZkStateReader.STATE_PROP, state.toString());
       props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
@@ -1198,7 +1191,7 @@
       if (coreNodeName != null) {
         props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
       }
-
+      
       if (ClusterStateUtil.isAutoAddReplicas(getZkStateReader(), collection)) {
         try (SolrCore core = cc.getCore(cd.getName())) {
           if (core != null && core.getDirectoryFactory().isSharedStorage()) {
@@ -1210,15 +1203,15 @@
           }
         }
       }
-
+      
       ZkNodeProps m = new ZkNodeProps(props);
-
+      
       if (updateLastState) {
         cd.getCloudDescriptor().lastPublished = state;
       }
       overseerJobQueue.offer(ZkStateReader.toJSON(m));
     } finally {
-      MDCUtils.cleanupMDC(previousMDCContext);
+      MDCLoggingContext.clear();
     }
   }
 
@@ -1909,7 +1902,7 @@
     } catch (NoNodeException nne) {
       return;
     } catch (Exception e) {
-      log.warn("could not readd the overseer designate ", e);
+      log.warn("could not read the overseer designate ", e);
     }
   }
 
diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
index 7d3fdc0..6bf5c66 100644
--- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
@@ -500,4 +500,15 @@
     }
     return livePaths;
   }
+
+  @Override
+  protected boolean deleteOldIndexDirectory(String oldDirPath) throws IOException {
+    Set<String> livePaths = getLivePaths();
+    if (livePaths.contains(oldDirPath)) {
+      log.warn("Cannot delete directory {} as it is still being referenced in the cache!", oldDirPath);
+      return false;
+    }
+
+    return super.deleteOldIndexDirectory(oldDirPath);
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 46c8a89..0cf3476 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -27,7 +27,10 @@
 import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
@@ -46,6 +49,7 @@
 import org.apache.solr.handler.component.HttpShardHandlerFactory;
 import org.apache.solr.handler.component.ShardHandlerFactory;
 import org.apache.solr.logging.LogWatcher;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.SolrRequestHandler;
 import org.apache.solr.security.AuthorizationPlugin;
 import org.apache.solr.security.AuthenticationPlugin;
@@ -98,6 +102,9 @@
   protected ShardHandlerFactory shardHandlerFactory;
   
   private UpdateShardHandler updateShardHandler;
+  
+  private ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
+      new DefaultSolrThreadFactory("coreContainerWorkExecutor") );
 
   protected LogWatcher logging = null;
 
@@ -121,6 +128,8 @@
 
   private PluginBag<SolrRequestHandler> containerHandlers = new PluginBag<>(SolrRequestHandler.class, null);
 
+  private boolean asyncSolrCoreLoad;
+
   public ExecutorService getCoreZkRegisterExecutorService() {
     return zkSys.getCoreZkRegisterExecutorService();
   }
@@ -182,13 +191,22 @@
   public CoreContainer(NodeConfig config, Properties properties) {
     this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()));
   }
+  
+  public CoreContainer(NodeConfig config, Properties properties, boolean asyncSolrCoreLoad) {
+    this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()), asyncSolrCoreLoad);
+  }
 
   public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator) {
+    this(config, properties, locator, false);
+  }
+  
+  public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator, boolean asyncSolrCoreLoad) {
     this.loader = config.getSolrResourceLoader();
     this.solrHome = loader.getInstanceDir();
     this.cfg = checkNotNull(config);
     this.coresLocator = locator;
     this.containerProperties = new Properties(properties);
+    this.asyncSolrCoreLoad = asyncSolrCoreLoad;
   }
 
   private void intializeAuthorizationPlugin() {
@@ -312,7 +330,6 @@
    * Load the cores defined for this CoreContainer
    */
   public void load()  {
-
     log.info("Loading cores into CoreContainer [instanceDir={}]", loader.getInstanceDir());
 
     // add the sharedLib to the shared resource loader before initializing cfg based plugins
@@ -334,7 +351,6 @@
     logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
 
     hostName = cfg.getNodeName();
-    log.info("Node Name: " + hostName);
 
     zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
 
@@ -360,57 +376,77 @@
     ExecutorService coreLoadExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(
         ( zkSys.getZkController() == null ? cfg.getCoreLoadThreadCount() : Integer.MAX_VALUE ),
         new DefaultSolrThreadFactory("coreLoadExecutor") );
-
+    final List<Future<SolrCore>> futures = new ArrayList<Future<SolrCore>>();
     try {
 
       List<CoreDescriptor> cds = coresLocator.discover(this);
       checkForDuplicateCoreNames(cds);
 
-      List<Callable<SolrCore>> creators = new ArrayList<>();
+
       for (final CoreDescriptor cd : cds) {
         if (cd.isTransient() || !cd.isLoadOnStartup()) {
           solrCores.putDynamicDescriptor(cd.getName(), cd);
+        } else if (asyncSolrCoreLoad) {
+          solrCores.markCoreAsLoading(cd);
         }
         if (cd.isLoadOnStartup()) {
-          creators.add(new Callable<SolrCore>() {
+          futures.add(coreLoadExecutor.submit(new Callable<SolrCore>() {
             @Override
             public SolrCore call() throws Exception {
-              if (zkSys.getZkController() != null) {
-                zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
+              SolrCore core;
+              try {
+                if (zkSys.getZkController() != null) {
+                  zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
+                }
+                
+                core = create(cd, false);
+              } finally {
+                if (asyncSolrCoreLoad) {
+                  solrCores.markCoreAsNotLoading(cd);
+                }
               }
-              return create(cd, false);   
+              try {
+                zkSys.registerInZk(core, true);
+              } catch (Throwable t) {
+                SolrException.log(log, "Error registering SolrCore", t);
+              }
+              return core;
             }
-          });
+          }));
         }
       }
 
-      try {
-        coreLoadExecutor.invokeAll(creators);
-      }
-      catch (InterruptedException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Interrupted while loading cores");
-      }
 
       // Start the background thread
       backgroundCloser = new CloserThread(this, solrCores, cfg);
       backgroundCloser.start();
 
     } finally {
-      ExecutorUtil.shutdownNowAndAwaitTermination(coreLoadExecutor);
+      if (asyncSolrCoreLoad && futures != null) {
+        Thread shutdownThread = new Thread() {
+          public void run() {
+            try {
+              for (Future<SolrCore> future : futures) {
+                try {
+                  future.get();
+                } catch (InterruptedException e) {
+                  Thread.currentThread().interrupt();
+                } catch (ExecutionException e) {
+                  log.error("Error waiting for SolrCore to be created", e);
+                }
+              }
+            } finally {
+              ExecutorUtil.shutdownNowAndAwaitTermination(coreLoadExecutor);
+            }
+          }
+        };
+        coreContainerWorkExecutor.submit(shutdownThread);
+      } else {
+        ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
+      }
     }
     
     if (isZooKeeperAware()) {
-      // register in zk in background threads
-      Collection<SolrCore> cores = getCores();
-      if (cores != null) {
-        for (SolrCore core : cores) {
-          try {
-            zkSys.registerInZk(core, true);
-          } catch (Throwable t) {
-            SolrException.log(log, "Error registering SolrCore", t);
-          }
-        }
-      }
       zkSys.getZkController().checkOverseerDesignate();
     }
   }
@@ -442,6 +478,8 @@
     
     isShutDown = true;
     
+    ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
+    
     if (isZooKeeperAware()) {
       cancelCoreRecoveries();
       zkSys.publishCoresAsDown(solrCores.getCores());
@@ -622,12 +660,12 @@
   public SolrCore create(CoreDescriptor dcore, boolean publishState) {
 
     if (isShutDown) {
-      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has close.");
+      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
     }
 
     SolrCore core = null;
     try {
-
+      MDCLoggingContext.setCore(core);
       if (zkSys.getZkController() != null) {
         zkSys.getZkController().preRegister(dcore);
       }
@@ -649,14 +687,18 @@
       coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
       log.error("Error creating core [{}]: {}", dcore.getName(), e.getMessage(), e);
       final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
-      IOUtils.closeQuietly(core);
+      if(core != null && !core.isClosed())
+        IOUtils.closeQuietly(core);
       throw solrException;
     } catch (Throwable t) {
       SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
       log.error("Error creating core [{}]: {}", dcore.getName(), t.getMessage(), t);
       coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
-      IOUtils.closeQuietly(core);
+      if(core != null && !core.isClosed())
+        IOUtils.closeQuietly(core);
       throw t;
+    } finally {
+      MDCLoggingContext.clear();
     }
 
   }
@@ -911,6 +953,20 @@
   public JarRepository getJarRepository(){
     return jarRepository;
   }
+  
+  /**
+   * If using asyncSolrCoreLoad=true, calling this after {@link #load()} will
+   * not return until all cores have finished loading.
+   * 
+   * @param timeoutMs timeout, upon which method simply returns
+   */
+  public void waitForLoadingCoresToFinish(long timeoutMs) {
+    solrCores.waitForLoadingCoresToFinish(timeoutMs);
+  }
+  
+  public void waitForLoadingCore(String name, long timeoutMs) {
+    solrCores.waitForLoadingCoreToFinish(name, timeoutMs);
+  }
 
   // ---------------- CoreContainer request handlers --------------
 
@@ -1000,6 +1056,10 @@
   public SolrResourceLoader getResourceLoader() {
     return loader;
   }
+  
+  public boolean isCoreLoading(String name) {
+    return solrCores.isCoreLoading(name);
+  }
 
   public AuthorizationPlugin getAuthorizationPlugin() {
     return authorizationPlugin;
diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
index 31de693..49e1b90 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
@@ -207,7 +207,7 @@
       cloudDesc = null;
     }
 
-    SolrCore.log.info("CORE DESCRIPTOR: " + coreProperties);
+    SolrCore.log.info("Created CoreDescriptor: " + coreProperties);
   }
 
   /**
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index d889d1a..8413e12 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -19,10 +19,12 @@
 
 import java.io.Closeable;
 import java.io.File;
+import java.io.FileFilter;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -45,6 +47,8 @@
   // A large estimate should currently have no other side effects.
   public static final IOContext IOCONTEXT_NO_CACHE = new IOContext(new FlushInfo(10*1000*1000, 100L*1000*1000*1000));
 
+  protected static final String INDEX_W_TIMESTAMP_REGEX = "index\\.[0-9]{17}"; // see SnapShooter.DATE_FMT
+
   // hint about what the directory contains - default is index directory
   public enum DirContext {DEFAULT, META_DATA}
 
@@ -271,4 +275,48 @@
   public Collection<SolrInfoMBean> offerMBeans() {
     return Collections.emptySet();
   }
+
+  public void cleanupOldIndexDirectories(final String dataDirPath, final String currentIndexDirPath) {
+    File dataDir = new File(dataDirPath);
+    if (!dataDir.isDirectory()) {
+      log.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDirPath);
+      return;
+    }
+
+    final File currentIndexDir = new File(currentIndexDirPath);
+    File[] oldIndexDirs = dataDir.listFiles(new FileFilter() {
+      @Override
+      public boolean accept(File file) {
+        String fileName = file.getName();
+        return file.isDirectory() &&
+               !file.equals(currentIndexDir) &&
+               (fileName.equals("index") || fileName.matches(INDEX_W_TIMESTAMP_REGEX));
+      }
+    });
+
+    if (oldIndexDirs == null || oldIndexDirs.length == 0)
+      return; // nothing to do (no log message needed)
+
+    log.info("Found {} old index directories to clean-up under {}", oldIndexDirs.length, dataDirPath);
+    for (File dir : oldIndexDirs) {
+
+      String dirToRmPath = dir.getAbsolutePath();
+      try {
+        if (deleteOldIndexDirectory(dirToRmPath)) {
+          log.info("Deleted old index directory: {}", dirToRmPath);
+        } else {
+          log.warn("Delete old index directory {} failed.", dirToRmPath);
+        }
+      } catch (IOException ioExc) {
+        log.error("Failed to delete old directory {} due to: {}", dir.getAbsolutePath(), ioExc.toString());
+      }
+    }
+  }
+
+  // Extension point to allow sub-classes to infuse additional code when deleting old index directories
+  protected boolean deleteOldIndexDirectory(String oldDirPath) throws IOException {
+    File dirToRm = new File(oldDirPath);
+    FileUtils.deleteDirectory(dirToRm);
+    return !dirToRm.isDirectory();
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
index 42f29cc..08577de 100644
--- a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
@@ -24,13 +24,16 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Locale;
+import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.LockFactory;
@@ -442,4 +445,77 @@
   public Collection<SolrInfoMBean> offerMBeans() {
     return Arrays.<SolrInfoMBean>asList(MetricsHolder.metrics);
   }
+
+  @Override
+  public void cleanupOldIndexDirectories(final String dataDir, final String currentIndexDir) {
+
+    // Get the FileSystem object
+    final Path dataDirPath = new Path(dataDir);
+    final Configuration conf = getConf();
+    FileSystem fileSystem = null;
+    try {
+      fileSystem = tmpFsCache.get(dataDir, new Callable<FileSystem>() {
+        @Override
+        public FileSystem call() throws IOException {
+          return FileSystem.get(dataDirPath.toUri(), conf);
+        }
+      });
+    } catch (ExecutionException e) {
+      throw new RuntimeException(e);
+    }
+
+    boolean pathExists = false;
+    try {
+      pathExists = fileSystem.exists(dataDirPath);
+    } catch (IOException e) {
+      LOG.error("Error checking if hdfs path "+dataDir+" exists", e);
+    }
+    if (!pathExists) {
+      LOG.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
+      return;
+    }
+
+    final Path currentIndexDirPath = new Path(currentIndexDir); // make sure we don't delete the current
+    final FileSystem fs = fileSystem;
+    FileStatus[] oldIndexDirs = null;
+    try {
+      oldIndexDirs = fileSystem.listStatus(dataDirPath, new PathFilter() {
+        @Override
+        public boolean accept(Path path) {
+          boolean accept = false;
+          String pathName = path.getName();
+          try {
+            accept = fs.isDirectory(path) && !path.equals(currentIndexDirPath) &&
+                (pathName.equals("index") || pathName.matches(INDEX_W_TIMESTAMP_REGEX));
+          } catch (IOException e) {
+            LOG.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
+          }
+          return accept;
+        }
+      });
+    } catch (IOException ioExc) {
+      LOG.error("Error checking for old index directories to clean-up.", ioExc);
+    }
+
+    if (oldIndexDirs == null || oldIndexDirs.length == 0)
+      return; // nothing to clean-up
+
+    Set<String> livePaths = getLivePaths();
+    for (FileStatus oldDir : oldIndexDirs) {
+      Path oldDirPath = oldDir.getPath();
+      if (livePaths.contains(oldDirPath.toString())) {
+        LOG.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
+      } else {
+        try {
+          if (fileSystem.delete(oldDirPath, true)) {
+            LOG.info("Deleted old index directory {}", oldDirPath);
+          } else {
+            LOG.warn("Failed to delete old index directory {}", oldDirPath);
+          }
+        } catch (IOException e) {
+          LOG.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
+        }
+      }
+    }
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
index 16d94ea..21d60ad 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
@@ -255,7 +255,6 @@
       conf = new CacheConfig(FastLRUCache.class, args, null);
     }
     fieldValueCacheConfig = conf;
-    unlockOnStartup = getBool(indexConfigPrefix + "/unlockOnStartup", false);
     useColdSearcher = getBool("query/useColdSearcher", false);
     dataDir = get("dataDir", null);
     if (dataDir != null && dataDir.length() == 0) dataDir = null;
@@ -485,7 +484,6 @@
   private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>();
 
   public final int maxWarmingSearchers;
-  public final boolean unlockOnStartup;
   public final boolean useColdSearcher;
   public final Version luceneMatchVersion;
   protected String dataDir;
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index a3e4849..8f0cb70 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -90,7 +90,7 @@
 import org.apache.solr.handler.admin.ShowFileRequestHandler;
 import org.apache.solr.handler.component.HighlightComponent;
 import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.logging.MDCUtils;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrRequestHandler;
 import org.apache.solr.response.BinaryResponseWriter;
@@ -500,53 +500,45 @@
 
   void initIndex(boolean reload) throws IOException {
 
-      String indexDir = getNewIndexDir();
-      boolean indexExists = getDirectoryFactory().exists(indexDir);
-      boolean firstTime;
-      synchronized (SolrCore.class) {
-        firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
-      }
-      boolean removeLocks = solrConfig.unlockOnStartup;
+    String indexDir = getNewIndexDir();
+    boolean indexExists = getDirectoryFactory().exists(indexDir);
+    boolean firstTime;
+    synchronized (SolrCore.class) {
+      firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
+    }
 
-      initIndexReaderFactory();
+    initIndexReaderFactory();
 
-      if (indexExists && firstTime && !reload) {
+    if (indexExists && firstTime && !reload) {
 
-        Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT,
-            getSolrConfig().indexConfig.lockType);
-        try {
-          if (IndexWriter.isLocked(dir)) {
-            if (removeLocks) {
-              log.warn(
-                  logid
-                      + "WARNING: Solr index directory '{}' is locked.  Unlocking...",
-                  indexDir);
-              dir.makeLock(IndexWriter.WRITE_LOCK_NAME).close();
-            } else {
-              log.error(logid
-                  + "Solr index directory '{}' is locked.  Throwing exception",
-                  indexDir);
-              throw new LockObtainFailedException(
-                  "Index locked for write for core " + name);
-            }
-
-          }
-        } finally {
-          directoryFactory.release(dir);
+      Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT,
+          getSolrConfig().indexConfig.lockType);
+      try {
+        if (IndexWriter.isLocked(dir)) {
+          log.error(logid
+              + "Solr index directory '{}' is locked.  Throwing exception.",
+              indexDir);
+          throw new LockObtainFailedException(
+              "Index locked for write for core '" + name +
+              "'. Solr now longer supports forceful unlocking via 'unlockOnStartup'. Please verify locks manually!");
         }
+      } finally {
+        directoryFactory.release(dir);
       }
+    }
 
-      // Create the index if it doesn't exist.
-      if(!indexExists) {
-        log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
-                + " Creating new index...");
+    // Create the index if it doesn't exist.
+    if(!indexExists) {
+      log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
+              + " Creating new index...");
 
-        SolrIndexWriter writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true,
-                                                        getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
-        writer.close();
-      }
+      SolrIndexWriter writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true,
+                                                      getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
+      writer.close();
+    }
 
 
+    cleanupOldIndexDirectories();
   }
 
 
@@ -711,7 +703,7 @@
     
     this.coreDescriptor = coreDescriptor;
     setName(name);
-    MDCUtils.setCore(name); // show the core name in the error logs
+    MDCLoggingContext.setCore(this);
     
     resourceLoader = config.getResourceLoader();
     this.solrConfig = config;
@@ -841,6 +833,9 @@
       }
     }
 
+    // seed version buckets with max from index during core initialization ... requires a searcher!
+    seedVersionBuckets();
+
     bufferUpdatesIfConstructing(coreDescriptor);
     
     // For debugging   
@@ -849,16 +844,20 @@
 
     this.ruleExpiryLock = new ReentrantLock();
     registerConfListener();
+  }
 
-    // seed version buckets with max from index during core initialization
-    if (this.updateHandler != null && this.updateHandler.getUpdateLog() != null) {
+  public void seedVersionBuckets() {
+    UpdateHandler uh = getUpdateHandler();
+    if (uh != null && uh.getUpdateLog() != null) {
       RefCounted<SolrIndexSearcher> newestSearcher = getRealtimeSearcher();
       if (newestSearcher != null) {
         try {
-          this.updateHandler.getUpdateLog().onFirstSearcher(newestSearcher.get());
+          uh.getUpdateLog().seedBucketsWithHighestVersion(newestSearcher.get());
         } finally {
           newestSearcher.decref();
         }
+      } else {
+        log.warn("No searcher available! Cannot seed version buckets with max from index.");
       }
     }
   }
@@ -1657,7 +1656,6 @@
         newestSearcher.decref();
       }
     }
-
   }
 
   /**
@@ -2624,7 +2622,28 @@
     return false;
   }
 
-
+  public void cleanupOldIndexDirectories() {
+    final DirectoryFactory myDirFactory = getDirectoryFactory();
+    final String myDataDir = getDataDir();
+    final String myIndexDir = getIndexDir();
+    final String coreName = getName();
+    if (myDirFactory != null && myDataDir != null && myIndexDir != null) {
+      Thread cleanupThread = new Thread() {
+        @Override
+        public void run() {
+          log.info("Looking for old index directories to cleanup for core {} in {}", coreName, myDataDir);
+          try {
+            myDirFactory.cleanupOldIndexDirectories(myDataDir, myIndexDir);
+          } catch (Exception exc) {
+            log.error("Failed to cleanup old index directories for core "+coreName, exc);
+          }
+        }
+      };
+      cleanupThread.setName("OldIndexDirectoryCleanupThreadForCore-"+coreName);
+      cleanupThread.setDaemon(true);
+      cleanupThread.start();
+    }
+  }
 }
 
 
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCores.java b/solr/core/src/java/org/apache/solr/core/SolrCores.java
index 3a15bee..3d78b2e 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCores.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCores.java
@@ -19,17 +19,21 @@
 
 import com.google.common.collect.Lists;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
 
 
 class SolrCores {
@@ -45,8 +49,10 @@
   private final Map<String, SolrCore> createdCores = new LinkedHashMap<>();
 
   private final CoreContainer container;
+  
+  private Set<String> currentlyLoadingCores = Collections.newSetFromMap(new ConcurrentHashMap<String,Boolean>());
 
-  private static final Logger logger = LoggerFactory.getLogger(SolrCores.class);
+  private static final Logger log = LoggerFactory.getLogger(SolrCores.class);
 
   // This map will hold objects that are being currently operated on. The core (value) may be null in the case of
   // initial load. The rule is, never to any operation on a core that is currently being operated upon.
@@ -71,7 +77,7 @@
           if (size() > cacheSize) {
             synchronized (modifyLock) {
               SolrCore coreToClose = eldest.getValue();
-              logger.info("Closing transient core [{}]", coreToClose.getName());
+              log.info("Closing transient core [{}]", coreToClose.getName());
               pendingCloses.add(coreToClose); // Essentially just queue this core up for closing.
               modifyLock.notifyAll(); // Wakes up closer thread too
             }
@@ -113,6 +119,7 @@
       }
 
       for (SolrCore core : coreList) {
+        MDCLoggingContext.setCore(core);
         try {
           core.close();
         } catch (Throwable e) {
@@ -120,6 +127,8 @@
           if (e instanceof Error) {
             throw (Error) e;
           }
+        } finally {
+          MDCLoggingContext.clear();
         }
       }
     } while (coreList.size() > 0);
@@ -391,6 +400,7 @@
 
   /**
    * Return the CoreDescriptor corresponding to a given core name.
+   * Blocks if the SolrCore is still loading until it is ready.
    * @param coreName the name of the core
    * @return the CoreDescriptor
    */
@@ -421,4 +431,63 @@
     }
     return cds;
   }
+
+  // cores marked as loading will block on getCore
+  public void markCoreAsLoading(CoreDescriptor cd) {
+    synchronized (modifyLock) {
+      currentlyLoadingCores.add(cd.getName());
+    }
+  }
+
+  //cores marked as loading will block on getCore
+  public void markCoreAsNotLoading(CoreDescriptor cd) {
+    synchronized (modifyLock) {
+      currentlyLoadingCores.remove(cd.getName());
+    }
+  }
+
+  // returns when no cores are marked as loading
+  public void waitForLoadingCoresToFinish(long timeoutMs) {
+    long time = System.nanoTime();
+    long timeout = time + TimeUnit.NANOSECONDS.convert(timeoutMs, TimeUnit.MILLISECONDS);
+    synchronized (modifyLock) {
+      while (!currentlyLoadingCores.isEmpty()) {
+        try {
+          modifyLock.wait(500);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+        if (System.nanoTime() >= timeout) {
+          log.warn("Timed out waiting for SolrCores to finish loading.");
+          break;
+        }
+      }
+    }
+  }
+  
+  // returns when core is finished loading, throws exception if no such core loading or loaded
+  public void waitForLoadingCoreToFinish(String core, long timeoutMs) {
+    long time = System.nanoTime();
+    long timeout = time + TimeUnit.NANOSECONDS.convert(timeoutMs, TimeUnit.MILLISECONDS);
+    synchronized (modifyLock) {
+      while (isCoreLoading(core)) {
+        try {
+          modifyLock.wait(500);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+        if (System.nanoTime() >= timeout) {
+          log.warn("Timed out waiting for SolrCore, {},  to finish loading.", core);
+          break;
+        }
+      }
+    }
+  }
+
+  public boolean isCoreLoading(String name) {
+    if (currentlyLoadingCores.contains(name)) {
+      return true;
+    }
+    return false;
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
index ae6e200..99daddb 100644
--- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
@@ -37,7 +37,7 @@
 import org.apache.solr.common.cloud.ZkConfigManager;
 import org.apache.solr.common.cloud.ZooKeeperException;
 import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.logging.MDCUtils;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -181,6 +181,8 @@
     Thread thread = new Thread() {
       @Override
       public void run() {
+        MDCLoggingContext.setCore(core);
+        try {
           try {
             zkController.register(core.getName(), core.getCoreDescriptor());
           } catch (InterruptedException e) {
@@ -198,20 +200,23 @@
             }
             SolrException.log(log, "", e);
           }
+        } finally {
+          MDC.clear();
         }
-
+      }
+      
     };
     
     if (zkController != null) {
-      MDCUtils.setCore(core.getName());
-      try {
-        if (background) {
-          coreZkRegister.execute(thread);
-        } else {
+      if (background) {
+        coreZkRegister.execute(thread);
+      } else {
+        MDCLoggingContext.setCore(core);
+        try {
           thread.run();
+        } finally {
+          MDC.clear();
         }
-      } finally {
-        MDC.remove(CORE_NAME_PROP);
       }
     }
   }
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index ea603da..a0a812d 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -80,6 +80,7 @@
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.search.SolrIndexSearcher;
+import org.apache.solr.update.CdcrUpdateLog;
 import org.apache.solr.update.CommitUpdateCommand;
 import org.apache.solr.update.UpdateLog;
 import org.apache.solr.util.DefaultSolrThreadFactory;
@@ -808,7 +809,9 @@
       // this is called before copying the files to the original conf dir
       // so that if there is an exception avoid corrupting the original files.
       terminateAndWaitFsyncService();
+      ((CdcrUpdateLog) ulog).reset(); // reset the update log before copying the new tlog directory
       copyTmpTlogFiles2Tlog(tmpTlogDir, timestamp);
+      ulog.init(solrCore.getUpdateHandler(), solrCore); // re-initialise the update log with the new directory
     } finally {
       delTree(tmpTlogDir);
     }
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java b/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java
new file mode 100644
index 0000000..fd9df8a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java
@@ -0,0 +1,237 @@
+package org.apache.solr.handler.admin;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.zookeeper.KeeperException;
+
+public class ClusterStatus {
+  private final ZkStateReader zkStateReader;
+  private final String collection;
+  private ZkNodeProps message;
+
+  public ClusterStatus(ZkStateReader zkStateReader, ZkNodeProps props) {
+    this.zkStateReader = zkStateReader;
+    this.message = props;
+    collection = props.getStr(ZkStateReader.COLLECTION_PROP);
+
+  }
+
+  @SuppressWarnings("unchecked")
+  public  void getClusterStatus(NamedList results)
+      throws KeeperException, InterruptedException {
+    zkStateReader.updateClusterState(true);
+
+
+    // read aliases
+    Aliases aliases = zkStateReader.getAliases();
+    Map<String, List<String>> collectionVsAliases = new HashMap<>();
+    Map<String, String> aliasVsCollections = aliases.getCollectionAliasMap();
+    if (aliasVsCollections != null) {
+      for (Map.Entry<String, String> entry : aliasVsCollections.entrySet()) {
+        List<String> colls = StrUtils.splitSmart(entry.getValue(), ',');
+        String alias = entry.getKey();
+        for (String coll : colls) {
+          if (collection == null || collection.equals(coll))  {
+            List<String> list = collectionVsAliases.get(coll);
+            if (list == null) {
+              list = new ArrayList<>();
+              collectionVsAliases.put(coll, list);
+            }
+            list.add(alias);
+          }
+        }
+      }
+    }
+
+    Map roles = null;
+    if (zkStateReader.getZkClient().exists(ZkStateReader.ROLES, true)) {
+      roles = (Map) ZkStateReader.fromJSON(zkStateReader.getZkClient().getData(ZkStateReader.ROLES, null, null, true));
+    }
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    // convert cluster state into a map of writable types
+    byte[] bytes = ZkStateReader.toJSON(clusterState);
+    Map<String, Object> stateMap = (Map<String,Object>) ZkStateReader.fromJSON(bytes);
+
+    Set<String> collections = new HashSet<>();
+    String routeKey = message.getStr(ShardParams._ROUTE_);
+    String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
+    if (collection == null) {
+      collections = new HashSet<>(clusterState.getCollections());
+    } else  {
+      collections = Collections.singleton(collection);
+    }
+
+    NamedList<Object> collectionProps = new SimpleOrderedMap<Object>();
+
+    for (String name : collections) {
+      Map<String, Object> collectionStatus = null;
+      DocCollection clusterStateCollection = clusterState.getCollection(name);
+
+      Set<String> requestedShards = new HashSet<>();
+      if (routeKey != null) {
+        DocRouter router = clusterStateCollection.getRouter();
+        Collection<Slice> slices = router.getSearchSlices(routeKey, null, clusterStateCollection);
+        for (Slice slice : slices) {
+          requestedShards.add(slice.getName());
+        }
+      }
+      if (shard != null) {
+        requestedShards.add(shard);
+      }
+
+      if (clusterStateCollection.getStateFormat() > 1) {
+        bytes = ZkStateReader.toJSON(clusterStateCollection);
+        Map<String, Object> docCollection = (Map<String, Object>) ZkStateReader.fromJSON(bytes);
+        collectionStatus = getCollectionStatus(docCollection, name, requestedShards);
+      } else {
+        collectionStatus = getCollectionStatus((Map<String, Object>) stateMap.get(name), name, requestedShards);
+      }
+
+      collectionStatus.put("znodeVersion", clusterStateCollection.getZNodeVersion());
+      if (collectionVsAliases.containsKey(name) && !collectionVsAliases.get(name).isEmpty()) {
+        collectionStatus.put("aliases", collectionVsAliases.get(name));
+      }
+      String configName = zkStateReader.readConfigName(name);
+      collectionStatus.put("configName", configName);
+      collectionProps.add(name, collectionStatus);
+    }
+
+    List<String> liveNodes = zkStateReader.getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
+
+    // now we need to walk the collectionProps tree to cross-check replica state with live nodes
+    crossCheckReplicaStateWithLiveNodes(liveNodes, collectionProps);
+
+    NamedList<Object> clusterStatus = new SimpleOrderedMap<>();
+    clusterStatus.add("collections", collectionProps);
+
+    // read cluster properties
+    Map clusterProps = zkStateReader.getClusterProps();
+    if (clusterProps != null && !clusterProps.isEmpty())  {
+      clusterStatus.add("properties", clusterProps);
+    }
+
+    // add the alias map too
+    if (aliasVsCollections != null && !aliasVsCollections.isEmpty())  {
+      clusterStatus.add("aliases", aliasVsCollections);
+    }
+
+    // add the roles map
+    if (roles != null)  {
+      clusterStatus.add("roles", roles);
+    }
+
+    // add live_nodes
+    clusterStatus.add("live_nodes", liveNodes);
+
+    results.add("cluster", clusterStatus);
+  }
+  /**
+   * Get collection status from cluster state.
+   * Can return collection status by given shard name.
+   *
+   *
+   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
+   * @param name  collection name
+   * @param requestedShards a set of shards to be returned in the status.
+   *                        An empty or null values indicates <b>all</b> shards.
+   * @return map of collection properties
+   */
+  @SuppressWarnings("unchecked")
+  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
+    if (collection == null)  {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
+    }
+    if (requestedShards == null || requestedShards.isEmpty()) {
+      return collection;
+    } else {
+      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
+      Map<String, Object>  selected = new HashMap<>();
+      for (String selectedShard : requestedShards) {
+        if (!shards.containsKey(selectedShard)) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
+        }
+        selected.put(selectedShard, shards.get(selectedShard));
+        collection.put("shards", selected);
+      }
+      return collection;
+    }
+  }
+
+
+
+  /**
+   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
+   * on a live node, if any replicas reporting their status as "active" but the node is not live is
+   * marked as "down"; used by CLUSTERSTATUS.
+   * @param liveNodes List of currently live node names.
+   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
+   */
+
+  @SuppressWarnings("unchecked")
+  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
+    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
+    while (colls.hasNext()) {
+      Map.Entry<String,Object> next = colls.next();
+      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
+      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
+      for (Object nextShard : shards.values()) {
+        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
+        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
+        for (Object nextReplica : replicas.values()) {
+          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
+          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
+            // not down, so verify the node is live
+            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
+            if (!liveNodes.contains(node_name)) {
+              // node is not live, so this replica is actually down
+              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
+            }
+          }
+        }
+      }
+    }
+  }
+
+
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 07d5bbc..a392c43 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -20,6 +20,10 @@
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -44,9 +48,13 @@
 import org.apache.solr.cloud.rule.Rule;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.Aliases;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCmdExecutor;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
@@ -55,9 +63,11 @@
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.handler.BlobHandler;
 import org.apache.solr.handler.RequestHandlerBase;
@@ -597,10 +607,13 @@
       @Override
       Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
           throws KeeperException, InterruptedException {
-        return req.getParams().getAll(null,
+        Map<String, Object> all = req.getParams().getAll(null,
             COLLECTION_PROP,
             SHARD_ID_PROP,
             _ROUTE_);
+        new ClusterStatus(handler.coreContainer.getZkController().getZkStateReader(),
+            new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
+        return null;
       }
     },
     ADDREPLICAPROP_OP(ADDREPLICAPROP) {
@@ -759,4 +772,4 @@
       MAX_SHARDS_PER_NODE,
       AUTO_ADD_REPLICAS);
 
-}
+ }
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
index 3244331..fded4ac 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
@@ -65,12 +65,7 @@
   private static final String PIVOT_KEY = "facet_pivot";
   private static final String PIVOT_REFINE_PREFIX = "{!"+PivotFacet.REFINE_PARAM+"=";
 
-  /**
-   * Incremented counter used to track the values being refined in a given request.
-   * This counter is used in conjunction with {@link PivotFacet#REFINE_PARAM} to identify
-   * which refinement values are associated with which pivots.
-   */
-  int pivotRefinementCounter = 0;
+
 
   @Override
   public void prepare(ResponseBuilder rb) throws IOException {
@@ -271,14 +266,14 @@
 
       if ( ! queuedRefinementsForShard.isEmpty() ) {
         
-        String fieldsKey = PivotFacet.REFINE_PARAM + pivotRefinementCounter;
+        String fieldsKey = PivotFacet.REFINE_PARAM + fi.pivotRefinementCounter;
         String command;
         
         if (pivotFacet.localParams != null) {
-          command = PIVOT_REFINE_PREFIX + pivotRefinementCounter + " "
+          command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + " "
             + pivotFacet.facetStr.substring(2);
         } else {
-          command = PIVOT_REFINE_PREFIX + pivotRefinementCounter + "}"
+          command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + "}"
             + pivotFacet.getKey();
         }
         
@@ -290,7 +285,7 @@
           
         }
       }
-      pivotRefinementCounter++;
+      fi.pivotRefinementCounter++;
     }
     
     rb.addRequest(this, shardsRefineRequestPivot);
@@ -981,13 +976,12 @@
   
   @Override
   public void finishStage(ResponseBuilder rb) {
-    pivotRefinementCounter = 0;
     if (!rb.doFacets || rb.stage != ResponseBuilder.STAGE_GET_FIELDS) return;
     // wait until STAGE_GET_FIELDS
     // so that "result" is already stored in the response (for aesthetics)
     
     FacetInfo fi = rb._facetInfo;
-    
+
     NamedList<Object> facet_counts = new SimpleOrderedMap<>();
     
     NamedList<Number> facet_queries = new SimpleOrderedMap<>();
@@ -1111,6 +1105,12 @@
    * <b>This API is experimental and subject to change</b>
    */
   public static class FacetInfo {
+    /**
+     * Incremented counter used to track the values being refined in a given request.
+     * This counter is used in conjunction with {@link PivotFacet#REFINE_PARAM} to identify
+     * which refinement values are associated with which pivots.
+     */
+    int pivotRefinementCounter = 0;
 
     public LinkedHashMap<String,QueryFacet> queryFacets;
     public LinkedHashMap<String,DistribFieldFacet> facets;
diff --git a/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java
new file mode 100644
index 0000000..488adb4
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java
@@ -0,0 +1,163 @@
+package org.apache.solr.logging;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+
+import java.util.function.Supplier;
+
+import org.apache.solr.cloud.CloudDescriptor;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.SolrCore;
+import org.slf4j.MDC;
+
+/**
+ * Set's per thread context info for logging. Nested calls will use the top level parent for all context. The first
+ * caller always owns the context until it calls {@link #clear()}. Always call {@link #setCore(SolrCore)} or
+ * {@link #setCoreDescriptor(CoreDescriptor)} and then {@link #clear()} in a finally block.
+ */
+public class MDCLoggingContext {
+  // When a thread sets context and finds that the context is already set, we should noop and ignore the finally clear
+  private static ThreadLocal<Integer> CALL_DEPTH = ThreadLocal.withInitial(new Supplier<Integer>() {
+    @Override
+    public Integer get() {
+      return 0;
+    }
+  });
+  
+  private static void setCollection(String collection) {
+    if (collection != null) {
+      MDC.put(COLLECTION_PROP, "c:" + collection);
+    } else {
+      MDC.remove(COLLECTION_PROP);
+    }
+  }
+  
+  private static void setShard(String shard) {
+    if (shard != null) {
+      MDC.put(SHARD_ID_PROP, "s:" + shard);
+    } else {
+      MDC.remove(SHARD_ID_PROP);
+    }
+  }
+  
+  private static void setReplica(String replica) {
+    if (replica != null) {
+      MDC.put(REPLICA_PROP, "r:" + replica);
+    } else {
+      MDC.remove(REPLICA_PROP);
+    }
+  }
+  
+  private static void setCoreName(String core) {
+    if (core != null) {
+      MDC.put(CORE_NAME_PROP, "x:" + core);
+    } else {
+      MDC.remove(CORE_NAME_PROP);
+    }
+  }
+  
+  public static void setNode(CoreContainer cc) {
+    if (cc != null) {
+      ZkController zk = cc.getZkController();
+      if (zk != null) {
+        setNode(zk.getNodeName());
+      }
+    }
+  }
+  
+  // we allow the host to be set like this because it is the same for any thread
+  // in the thread pool - we can't do this with the per core properties!
+  public static void setNode(String node) {
+    int used = CALL_DEPTH.get();
+    if (used == 0) {
+      setNodeName(node);
+    }
+  }
+  
+  private static void setNodeName(String node) {
+    if (node != null) {
+      MDC.put(NODE_NAME_PROP, "n:" + node);
+    } else {
+      MDC.remove(NODE_NAME_PROP);
+    }
+  }
+  
+  public static void setCore(SolrCore core) {
+    if (core != null) {
+      CoreDescriptor cd = core.getCoreDescriptor();
+      setCoreDescriptor(cd);
+    }
+  }
+  
+  public static void setCoreDescriptor(CoreDescriptor cd) {
+    if (cd != null) {
+      int callDepth = CALL_DEPTH.get();
+      CALL_DEPTH.set(callDepth + 1);
+      if (callDepth > 0) {
+        return;
+      }
+      
+      setCoreName(cd.getName());
+      CoreContainer cc = cd.getCoreContainer();
+      if (cc != null) {
+        ZkController zkController = cc.getZkController();
+        if (zkController != null) {
+          setNodeName(zkController.getNodeName());
+        }
+      }
+      
+      CloudDescriptor ccd = cd.getCloudDescriptor();
+      if (ccd != null) {
+        setCollection(ccd.getCollectionName());
+        setShard(ccd.getShardId());
+        setReplica(ccd.getCoreNodeName());
+      }
+    }
+  }
+  
+  public static void clear() {
+    int used = CALL_DEPTH.get();
+    CALL_DEPTH.set(used - 1);
+    if (used == 0) {
+      MDC.remove(COLLECTION_PROP);
+      MDC.remove(CORE_NAME_PROP);
+      MDC.remove(REPLICA_PROP);
+      MDC.remove(SHARD_ID_PROP);
+    }
+  }
+  
+  private static void removeAll() {
+    MDC.remove(COLLECTION_PROP);
+    MDC.remove(CORE_NAME_PROP);
+    MDC.remove(REPLICA_PROP);
+    MDC.remove(SHARD_ID_PROP);
+    MDC.remove(NODE_NAME_PROP);
+  }
+  
+  public static void reset() {
+    CALL_DEPTH.set(0);
+    removeAll();
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/logging/MDCUtils.java b/solr/core/src/java/org/apache/solr/logging/MDCUtils.java
deleted file mode 100644
index 61a0aef..0000000
--- a/solr/core/src/java/org/apache/solr/logging/MDCUtils.java
+++ /dev/null
@@ -1,69 +0,0 @@
-package org.apache.solr.logging;
-
-import java.util.Map;
-
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.MDC;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class MDCUtils {
-  public static void cleanupMDC(Map previousMDCContext) {
-    if (previousMDCContext != null)
-      MDC.setContextMap(previousMDCContext);
-  }
-
-  public static void setMDC (String collection, String shard, String replica, String core) {
-    setCollection(collection);
-    setShard(shard);
-    setReplica(replica);
-    setCore(core);
-  }
-
-  public static void setCollection(String collection) {
-    if (collection != null)
-      MDC.put(COLLECTION_PROP, collection);
-  }
-
-  public static void setShard(String shard) {
-    if (shard != null)
-      MDC.put(SHARD_ID_PROP, shard);
-  }
-
-  public static void setReplica(String replica) {
-    if (replica != null)
-      MDC.put(REPLICA_PROP, replica);
-  }
-
-  public static void setCore(String core) {
-    if (core != null)
-      MDC.put(CORE_NAME_PROP, core);
-  }
-
-  public static void clearMDC() {
-    MDC.remove(COLLECTION_PROP);
-    MDC.remove(CORE_NAME_PROP);
-    MDC.remove(REPLICA_PROP);
-    MDC.remove(SHARD_ID_PROP);
-  }
-}
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index b6bad19..4958f3a 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -18,7 +18,6 @@
 package org.apache.solr.request;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.log4j.Logger;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -80,6 +79,8 @@
 import org.apache.solr.util.BoundedTreeSet;
 import org.apache.solr.util.DateMathParser;
 import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -108,7 +109,7 @@
  */
 public class SimpleFacets {
   
-  private final static Logger log = Logger.getLogger(SimpleFacets.class);
+  private final static Logger log = LoggerFactory.getLogger(SimpleFacets.class);
 
   /** The main set of documents all facet counts should be relative to */
   protected DocSet docsOrig;
diff --git a/solr/core/src/java/org/apache/solr/response/ResponseWriterUtil.java b/solr/core/src/java/org/apache/solr/response/ResponseWriterUtil.java
index 69d3021..d469b2c 100644
--- a/solr/core/src/java/org/apache/solr/response/ResponseWriterUtil.java
+++ b/solr/core/src/java/org/apache/solr/response/ResponseWriterUtil.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.solr.common.SolrDocument;
@@ -54,4 +55,15 @@
     }
     return out;
   }
+
+  public static String getAsString(String field, SolrDocument doc) {
+    Object v = doc.getFirstValue(field);
+    if(v != null) {
+      if(v instanceof StoredField) {
+        return ((StoredField)v).stringValue();
+      }
+      return v.toString();
+    }
+    return null;
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/response/transform/DocTransformer.java b/solr/core/src/java/org/apache/solr/response/transform/DocTransformer.java
index 183042d..a28c325 100644
--- a/solr/core/src/java/org/apache/solr/response/transform/DocTransformer.java
+++ b/solr/core/src/java/org/apache/solr/response/transform/DocTransformer.java
@@ -20,7 +20,9 @@
 import java.io.IOException;
 
 import org.apache.solr.common.SolrDocument;
-import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.QueryResponseWriter;
+import org.apache.solr.response.ResponseWriterUtil;
+import org.apache.solr.search.SolrIndexSearcher;
 
 /**
  * A DocTransformer can add, remove or alter a Document before it is written out to the Response.  For instance, there are implementations
@@ -57,6 +59,19 @@
    */
   public abstract void transform(SolrDocument doc, int docid) throws IOException;
 
+  /**
+   * When a transformer needs access to fields that are not automaticaly derived from the
+   * input fields names, this option lets us explicitly say the field names that we hope
+   * will be in the SolrDocument.  These fields will be requestd from the 
+   * {@link SolrIndexSearcher} but may or may not be returned in the final
+   * {@link QueryResponseWriter}
+   * 
+   * @return a list of extra lucene fields
+   */
+  public String[] getExtraRequestFields() {
+    return null;
+  }
+  
   @Override
   public String toString() {
     return getName();
diff --git a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
index 61271d9..0307910 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java
@@ -264,6 +264,14 @@
             MapSolrParams augmenterParams = new MapSolrParams( augmenterArgs );
             DocTransformer t = factory.create(disp, augmenterParams, req);
             if(t!=null) {
+              if(!_wantsAllFields) {
+                String[] extra = t.getExtraRequestFields();
+                if(extra!=null) {
+                  for(String f : extra) {
+                    fields.add(f); // also request this field from IndexSearcher
+                  }
+                }
+              }
               augmenters.addTransformer( t );
             }
           }
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
index efeb654..cf81b64 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
@@ -169,6 +169,7 @@
     for (ShardResponse shardRsp : sreq.responses) {
       SolrResponse rsp = shardRsp.getSolrResponse();
       NamedList<Object> top = rsp.getResponse();
+      if (top == null) continue; // shards.tolerant=true will cause this to happen on exceptions/errors
       Object facet = top.get("facets");
       if (facet == null) continue;
       if (facetState.merger == null) {
diff --git a/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java b/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
index 200f313..2ea7523 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
@@ -208,7 +208,7 @@
 
     @Override
     public void resize(Resizer resizer) {
-      resizer.resize(sets, null);
+      sets = resizer.resize(sets, null);
     }
 
     @Override
diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
index d4574d9..1f46ab8 100644
--- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
+++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
@@ -1,5 +1,37 @@
 package org.apache.solr.servlet;
 
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.RELOAD;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.ADMIN;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.FORWARD;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.PASSTHROUGH;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.PROCESS;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.REMOTEQUERY;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETRY;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETURN;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.Principal;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -19,22 +51,6 @@
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.UnsupportedEncodingException;
-import java.security.Principal;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
@@ -54,8 +70,8 @@
 import org.apache.http.entity.InputStreamEntity;
 import org.apache.http.util.EntityUtils;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.cloud.CloudDescriptor;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.Aliases;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
@@ -73,7 +89,7 @@
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.ContentStreamHandlerBase;
-import org.apache.solr.logging.MDCUtils;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequestBase;
 import org.apache.solr.request.SolrRequestHandler;
@@ -85,66 +101,50 @@
 import org.apache.solr.security.AuthorizationContext.CollectionRequest;
 import org.apache.solr.security.AuthorizationContext.RequestType;
 import org.apache.solr.security.AuthorizationResponse;
+import org.apache.solr.servlet.SolrDispatchFilter.Action;
 import org.apache.solr.servlet.cache.HttpCacheHeaderUtil;
 import org.apache.solr.servlet.cache.Method;
 import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
 import org.apache.solr.util.RTimer;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.RELOAD;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.ADMIN;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.FORWARD;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.PASSTHROUGH;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.PROCESS;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.REMOTEQUERY;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETRY;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETURN; 
+import org.slf4j.LoggerFactory; 
 
 /**
  * This class represents a call made to Solr
  **/
 public class HttpSolrCall {
-  private static Logger log = LoggerFactory.getLogger(HttpSolrCall.class);
+  protected static Logger log = LoggerFactory.getLogger(HttpSolrCall.class);
 
-  private final SolrDispatchFilter solrDispatchFilter;
-  private final CoreContainer cores;
-  private final HttpServletRequest req;
-  private final HttpServletResponse response;
-  private final boolean retry;
-  private SolrCore core = null;
-  private SolrQueryRequest solrReq = null;
-  private SolrRequestHandler handler = null;
-  private final SolrParams queryParams;
-  private String path;
-  private Action action;
-  private String coreUrl;
-  private SolrConfig config;
-  private Map<String, Integer> invalidStates;
+  protected final SolrDispatchFilter solrDispatchFilter;
+  protected final CoreContainer cores;
+  protected final HttpServletRequest req;
+  protected final HttpServletResponse response;
+  protected final boolean retry;
+  protected SolrCore core = null;
+  protected SolrQueryRequest solrReq = null;
+  protected SolrRequestHandler handler = null;
+  protected final SolrParams queryParams;
+  protected String path;
+  protected Action action;
+  protected String coreUrl;
+  protected SolrConfig config;
+  protected Map<String, Integer> invalidStates;
 
   public RequestType getRequestType() {
     return requestType;
   }
 
-  private RequestType requestType;
+  protected RequestType requestType;
 
 
   public List<String> getCollectionsList() {
     return collectionsList;
   }
 
-  private List<String> collectionsList;
+  protected List<String> collectionsList;
 
-  HttpSolrCall(SolrDispatchFilter solrDispatchFilter, CoreContainer cores,
+  public HttpSolrCall(SolrDispatchFilter solrDispatchFilter, CoreContainer cores,
                HttpServletRequest request, HttpServletResponse response, boolean retry) {
     this.solrDispatchFilter = solrDispatchFilter;
     this.cores = cores;
@@ -233,6 +233,14 @@
         core = cores.getCore(corename);
         if (core != null) {
           path = path.substring(idx);
+        } else if (cores.isCoreLoading(corename)) { // extra mem barriers, so don't look at this before trying to get core
+          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is loading");
+        } else {
+          // the core may have just finished loading
+          core = cores.getCore(corename);
+          if (core != null) {
+            path = path.substring(idx);
+          } 
         }
       }
       if (core == null) {
@@ -242,15 +250,12 @@
       }
     }
 
-    if (core != null) addMDCValues();
-
     if (core == null && cores.isZooKeeperAware()) {
       // we couldn't find the core - lets make sure a collection was not specified instead
       core = getCoreByCollection(corename);
       if (core != null) {
         // we found a core, update the path
         path = path.substring(idx);
-        addMDCValues();
         if (collectionsList == null)
           collectionsList = new ArrayList<>();
         collectionsList.add(corename);
@@ -263,12 +268,12 @@
       // try the default core
       if (core == null) {
         core = cores.getCore("");
-        if (core != null) addMDCValues();
       }
     }
 
     // With a valid core...
     if (core != null) {
+      MDCLoggingContext.setCore(core);
       config = core.getSolrConfig();
       // get or create/cache the parser for the core
       SolrRequestParsers parser = config.getRequestParsers();
@@ -378,17 +383,14 @@
    * This method processes the request.
    */
   public Action call() throws IOException {
-    MDCUtils.clearMDC();
+    MDCLoggingContext.reset();
+    MDCLoggingContext.setNode(cores);
 
     if (cores == null) {
       sendError(503, "Server is shutting down or failed to initialize");
       return RETURN;
     }
 
-    if (cores.isZooKeeperAware()) {
-      MDC.put(NODE_NAME_PROP, cores.getZkController().getNodeName());
-    }
-
     if (solrDispatchFilter.abortErrorMessage != null) {
       sendError(500, solrDispatchFilter.abortErrorMessage);
       return RETURN;
@@ -461,6 +463,8 @@
         t = t.getCause();
       }
       return RETURN;
+    } finally {
+      MDCLoggingContext.clear();
     }
 
   }
@@ -614,7 +618,7 @@
     }
   }
 
-  void sendError(int code, String message) throws IOException {
+  protected void sendError(int code, String message) throws IOException {
     try {
       response.sendError(code, message);
     } catch (EOFException e) {
@@ -622,16 +626,6 @@
     }
   }
 
-  private void addMDCValues() {
-    MDCUtils.setCore(core.getName());
-    if (cores.isZooKeeperAware()) {
-      CloudDescriptor cloud = core.getCoreDescriptor().getCloudDescriptor();
-      MDCUtils.setCollection(cloud.getCollectionName());
-      MDCUtils.setShard(cloud.getShardId());
-      MDCUtils.setReplica(cloud.getCoreNodeName());
-    }
-  }
-
   protected void execute(SolrQueryResponse rsp) {
     // a custom filter could add more stuff to the request before passing it on.
     // for example: sreq.getContext().put( "HttpServletRequest", req );
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index db7963d..090ebef 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -136,7 +136,7 @@
    */
   protected CoreContainer createCoreContainer(String solrHome, Properties extraProperties) {
     NodeConfig nodeConfig = loadNodeConfig(solrHome, extraProperties);
-    cores = new CoreContainer(nodeConfig, extraProperties);
+    cores = new CoreContainer(nodeConfig, extraProperties, true);
     cores.load();
     return cores;
   }
@@ -222,7 +222,7 @@
       }
     }
     
-    HttpSolrCall call = new HttpSolrCall(this, cores, (HttpServletRequest) request, (HttpServletResponse) response, retry);
+    HttpSolrCall call = getHttpSolrCall((HttpServletRequest) request, (HttpServletResponse) response, retry);
     try {
       Action result = call.call();
       switch (result) {
@@ -240,6 +240,14 @@
       call.destroy();
     }
   }
+  
+  /**
+   * Allow a subclass to modify the HttpSolrCall.  In particular, subclasses may
+   * want to add attributes to the request and send errors differently
+   */
+  protected HttpSolrCall getHttpSolrCall(HttpServletRequest request, HttpServletResponse response, boolean retry) {
+    return new HttpSolrCall(this, cores, request, response, retry);
+  }
 
   private boolean authenticateRequest(ServletRequest request, ServletResponse response, final AtomicReference<ServletRequest> wrappedRequest) throws IOException {
     final AtomicBoolean isAuthenticated = new AtomicBoolean(false);
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
index 62b5548..940c78c 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
@@ -28,6 +28,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.LockReleaseFailedException;
 import org.apache.solr.common.util.IOUtils;
 import org.slf4j.Logger;
@@ -41,98 +42,83 @@
   private HdfsLockFactory() {}
   
   @Override
-  public Lock makeLock(Directory dir, String lockName) {
+  public Lock obtainLock(Directory dir, String lockName) throws IOException {
     if (!(dir instanceof HdfsDirectory)) {
       throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got: " + dir);
     }
     final HdfsDirectory hdfsDir = (HdfsDirectory) dir;
-    return new HdfsLock(hdfsDir.getHdfsDirPath(), lockName, hdfsDir.getConfiguration());
+    final Configuration conf = hdfsDir.getConfiguration();
+    final Path lockPath = hdfsDir.getHdfsDirPath();
+    final Path lockFile = new Path(lockPath, lockName);
+    
+    FSDataOutputStream file = null;
+    final FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
+    while (true) {
+      try {
+        if (!fs.exists(lockPath)) {
+          boolean success = fs.mkdirs(lockPath);
+          if (!success) {
+            throw new RuntimeException("Could not create directory: " + lockPath);
+          }
+        } else {
+          // just to check for safe mode
+          fs.mkdirs(lockPath);
+        }
+        
+        file = fs.create(lockFile, false);
+        break;
+      } catch (FileAlreadyExistsException e) {
+        throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
+      } catch (RemoteException e) {
+        if (e.getClassName().equals(
+            "org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
+          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
+          try {
+            Thread.sleep(5000);
+          } catch (InterruptedException e1) {
+            Thread.interrupted();
+          }
+          continue;
+        }
+        throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
+      } catch (IOException e) {
+        throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
+      } finally {
+        IOUtils.closeQuietly(file);
+      }
+    }
+
+    return new HdfsLock(fs, lockFile);
   }
   
-  static class HdfsLock extends Lock {
+  private static final class HdfsLock extends Lock {
     
-    private final Path lockPath;
-    private final String lockName;
-    private final Configuration conf;
+    private final FileSystem fs;
+    private final Path lockFile;
+    private volatile boolean closed;
     
-    public HdfsLock(Path lockPath, String lockName, Configuration conf) {
-      this.lockPath = lockPath;
-      this.lockName = lockName;
-      this.conf = conf;
-    }
-    
-    @Override
-    public boolean obtain() throws IOException {
-      FSDataOutputStream file = null;
-      FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
-      try {
-        while (true) {
-          try {
-            if (!fs.exists(lockPath)) {
-              boolean success = fs.mkdirs(lockPath);
-              if (!success) {
-                throw new RuntimeException("Could not create directory: " + lockPath);
-              }
-            } else {
-              // just to check for safe mode
-              fs.mkdirs(lockPath);
-            }
-
-            
-            file = fs.create(new Path(lockPath, lockName), false);
-            break;
-          } catch (FileAlreadyExistsException e) {
-            return false;
-          } catch (RemoteException e) {
-            if (e.getClassName().equals(
-                "org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
-              log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
-              try {
-                Thread.sleep(5000);
-              } catch (InterruptedException e1) {
-                Thread.interrupted();
-              }
-              continue;
-            }
-            log.error("Error creating lock file", e);
-            return false;
-          } catch (IOException e) {
-            log.error("Error creating lock file", e);
-            return false;
-          } finally {
-            IOUtils.closeQuietly(file);
-          }
-        }
-      } finally {
-        IOUtils.closeQuietly(fs);
-      }
-      return true;
+    HdfsLock(FileSystem fs, Path lockFile) {
+      this.fs = fs;
+      this.lockFile = lockFile;
     }
     
     @Override
     public void close() throws IOException {
-      FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
+      if (closed) {
+        return;
+      }
       try {
-        if (fs.exists(new Path(lockPath, lockName))
-            && !fs.delete(new Path(lockPath, lockName), false)) throw new LockReleaseFailedException(
-            "failed to delete " + new Path(lockPath, lockName));
+        if (fs.exists(lockFile) && !fs.delete(lockFile, false)) {
+          throw new LockReleaseFailedException("failed to delete: " + lockFile);
+        }
       } finally {
         IOUtils.closeQuietly(fs);
       }
     }
-    
+
     @Override
-    public boolean isLocked() throws IOException {
-      boolean isLocked = false;
-      FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
-      try {
-        isLocked = fs.exists(new Path(lockPath, lockName));
-      } finally {
-        IOUtils.closeQuietly(fs);
-      }
-      return isLocked;
+    public void ensureValid() throws IOException {
+      // no idea how to implement this on HDFS
     }
-    
   }
-  
 }
diff --git a/solr/core/src/java/org/apache/solr/update/CdcrUpdateLog.java b/solr/core/src/java/org/apache/solr/update/CdcrUpdateLog.java
index 32aa6f6..6046eb6 100644
--- a/solr/core/src/java/org/apache/solr/update/CdcrUpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/CdcrUpdateLog.java
@@ -238,6 +238,30 @@
     }
   }
 
+  /**
+   * expert: Reset the update log before initialisation. This is needed by the IndexFetcher during a
+   * a Recovery operation in order to re-initialise the UpdateLog with a new set of tlog files.
+   */
+  public void reset() {
+    synchronized (this) {
+      // Close readers
+      for (CdcrLogReader reader : new ArrayList<>(logPointers.keySet())) {
+        reader.close();
+      }
+
+      // Close and clear logs
+      for (TransactionLog log : logs) {
+        log.deleteOnClose = false;
+        log.decref();
+        log.forceClose();
+      }
+      logs.clear();
+
+      // reset lastDataDir for #init()
+      lastDataDir = null;
+    }
+  }
+
   @Override
   public void close(boolean committed, boolean deleteOnClose) {
     for (CdcrLogReader reader : new ArrayList<>(logPointers.keySet())) {
diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
index d38fc6b..60fe8e9 100644
--- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
@@ -30,6 +30,7 @@
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.DirectoryFactory;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.util.RefCounted;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -282,52 +283,57 @@
 
   @Override
   public void doRecovery(CoreContainer cc, CoreDescriptor cd) {
-    if (SKIP_AUTO_RECOVERY) {
-      log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
-      return;
-    }
-    
-    // check before we grab the lock
-    if (cc.isShutDown()) {
-      log.warn("Skipping recovery because Solr is close");
-      return;
-    }
-    
-    synchronized (recoveryLock) {
-      // to be air tight we must also check after lock
-      if (cc.isShutDown()) {
-        log.warn("Skipping recovery because Solr is close");
+    MDCLoggingContext.setCoreDescriptor(cd);
+    try {
+      if (SKIP_AUTO_RECOVERY) {
+        log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
         return;
       }
-      log.info("Running recovery - first canceling any ongoing recovery");
-      cancelRecovery();
       
-      while (recoveryRunning) {
-        try {
-          recoveryLock.wait(1000);
-        } catch (InterruptedException e) {
-
-        }
-        // check again for those that were waiting
+      // check before we grab the lock
+      if (cc.isShutDown()) {
+        log.warn("Skipping recovery because Solr is shutdown");
+        return;
+      }
+      
+      synchronized (recoveryLock) {
+        // to be air tight we must also check after lock
         if (cc.isShutDown()) {
-          log.warn("Skipping recovery because Solr is close");
+          log.warn("Skipping recovery because Solr is shutdown");
           return;
         }
-        if (closed) return;
+        log.info("Running recovery - first canceling any ongoing recovery");
+        cancelRecovery();
+        
+        while (recoveryRunning) {
+          try {
+            recoveryLock.wait(1000);
+          } catch (InterruptedException e) {
+          
+          }
+          // check again for those that were waiting
+          if (cc.isShutDown()) {
+            log.warn("Skipping recovery because Solr is shutdown");
+            return;
+          }
+          if (closed) return;
+        }
+        
+        // if true, we are recovering after startup and shouldn't have (or be receiving) additional updates (except for
+        // local tlog recovery)
+        boolean recoveringAfterStartup = recoveryStrat == null;
+        
+        recoveryThrottle.minimumWaitBetweenActions();
+        recoveryThrottle.markAttemptingAction();
+        
+        recoveryStrat = new RecoveryStrategy(cc, cd, this);
+        recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
+        recoveryStrat.start();
+        recoveryRunning = true;
       }
-
-      // if true, we are recovering after startup and shouldn't have (or be receiving) additional updates (except for local tlog recovery)
-      boolean recoveringAfterStartup = recoveryStrat == null;
-
-      recoveryThrottle.minimumWaitBetweenActions();
-      recoveryThrottle.markAttemptingAction();
-      
-      recoveryStrat = new RecoveryStrategy(cc, cd, this);
-      recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
-      recoveryStrat.start();
-      recoveryRunning = true;
+    } finally {
+      MDCLoggingContext.clear();
     }
-    
   }
   
   @Override
diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java
index 1b3bf96..88ce821 100644
--- a/solr/core/src/java/org/apache/solr/update/PeerSync.java
+++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java
@@ -46,6 +46,7 @@
 import org.apache.solr.handler.component.ShardHandlerFactory;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.handler.component.ShardResponse;
+import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -80,6 +81,7 @@
   private final boolean getNoVersionsIsSuccess;
   private final HttpClient client;
   private final boolean onlyIfActive;
+  private SolrCore core;
 
   // comparator that sorts by absolute value, putting highest first
   private static Comparator<Long> absComparator = new Comparator<Long>() {
@@ -128,6 +130,7 @@
   }
   
   public PeerSync(SolrCore core, List<String> replicas, int nUpdates, boolean cantReachIsSuccess, boolean getNoVersionsIsSuccess, boolean onlyIfActive) {
+    this.core = core;
     this.replicas = replicas;
     this.nUpdates = nUpdates;
     this.maxUpdates = nUpdates;
@@ -175,95 +178,102 @@
     if (ulog == null) {
       return false;
     }
-
-    log.info(msg() + "START replicas=" + replicas + " nUpdates=" + nUpdates);
-
-    // TODO: does it ever make sense to allow sync when buffering or applying buffered?  Someone might request that we do it...
-    if (!(ulog.getState() == UpdateLog.State.ACTIVE || ulog.getState()==UpdateLog.State.REPLAYING)) {
-      log.error(msg() + "ERROR, update log not in ACTIVE or REPLAY state. " + ulog);
-      // return false;
-    }
-    
-    if (debug) {
-      if (startingVersions != null) {
-        log.debug(msg() + "startingVersions=" + startingVersions.size() + " " + startingVersions);
-      }
-    }
-
-    // Fire off the requests before getting our own recent updates (for better concurrency)
-    // This also allows us to avoid getting updates we don't need... if we got our updates and then got their updates, they would
-    // have newer stuff that we also had (assuming updates are going on and are being forwarded).
-    for (String replica : replicas) {
-      requestVersions(replica);
-    }
-
-    recentUpdates = ulog.getRecentUpdates();
+    MDCLoggingContext.setCore(core);
     try {
-      ourUpdates = recentUpdates.getVersions(nUpdates);
-    } finally {
-      recentUpdates.close();
-    }
-
-    Collections.sort(ourUpdates, absComparator);
-
-    if (startingVersions != null) {
-      if (startingVersions.size() == 0) {
-        log.warn("no frame of reference to tell if we've missed updates");
-        return false;
+      log.info(msg() + "START replicas=" + replicas + " nUpdates=" + nUpdates);
+      
+      // TODO: does it ever make sense to allow sync when buffering or applying buffered? Someone might request that we
+      // do it...
+      if (!(ulog.getState() == UpdateLog.State.ACTIVE || ulog.getState() == UpdateLog.State.REPLAYING)) {
+        log.error(msg() + "ERROR, update log not in ACTIVE or REPLAY state. " + ulog);
+        // return false;
       }
-      Collections.sort(startingVersions, absComparator);
-
-      ourLowThreshold = percentile(startingVersions, 0.8f);
-      ourHighThreshold = percentile(startingVersions, 0.2f);
-
-      // now make sure that the starting updates overlap our updates
-      // there shouldn't be reorders, so any overlap will do.
-
-      long smallestNewUpdate = Math.abs(ourUpdates.get(ourUpdates.size()-1));
-
-      if (Math.abs(startingVersions.get(0)) < smallestNewUpdate) {
-        log.warn(msg() + "too many updates received since start - startingUpdates no longer overlaps with our currentUpdates");
-        return false;
-      }
-
-      // let's merge the lists
-      List<Long> newList = new ArrayList<>(ourUpdates);
-      for (Long ver : startingVersions) {
-        if (Math.abs(ver) < smallestNewUpdate) {
-          newList.add(ver);
+      
+      if (debug) {
+        if (startingVersions != null) {
+          log.debug(msg() + "startingVersions=" + startingVersions.size() + " " + startingVersions);
         }
       }
-
-      ourUpdates = newList;
-    }  else {
-
-      if (ourUpdates.size() > 0) {
-        ourLowThreshold = percentile(ourUpdates, 0.8f);
-        ourHighThreshold = percentile(ourUpdates, 0.2f);
-      }  else {
-        // we have no versions and hence no frame of reference to tell if we can use a peers
-        // updates to bring us into sync
-        log.info(msg() + "DONE.  We have no versions.  sync failed.");
-        return false;
+      
+      // Fire off the requests before getting our own recent updates (for better concurrency)
+      // This also allows us to avoid getting updates we don't need... if we got our updates and then got their updates,
+      // they would
+      // have newer stuff that we also had (assuming updates are going on and are being forwarded).
+      for (String replica : replicas) {
+        requestVersions(replica);
       }
-    }
-
-    ourUpdateSet = new HashSet<>(ourUpdates);
-    requestedUpdateSet = new HashSet<>(ourUpdates);
-
-    for(;;) {
-      ShardResponse srsp = shardHandler.takeCompletedOrError();
-      if (srsp == null) break;
-      boolean success = handleResponse(srsp);
-      if (!success) {
-        log.info(msg() +  "DONE. sync failed");
-        shardHandler.cancelAll();
-        return false;
+      
+      recentUpdates = ulog.getRecentUpdates();
+      try {
+        ourUpdates = recentUpdates.getVersions(nUpdates);
+      } finally {
+        recentUpdates.close();
       }
+      
+      Collections.sort(ourUpdates, absComparator);
+      
+      if (startingVersions != null) {
+        if (startingVersions.size() == 0) {
+          log.warn("no frame of reference to tell if we've missed updates");
+          return false;
+        }
+        Collections.sort(startingVersions, absComparator);
+        
+        ourLowThreshold = percentile(startingVersions, 0.8f);
+        ourHighThreshold = percentile(startingVersions, 0.2f);
+        
+        // now make sure that the starting updates overlap our updates
+        // there shouldn't be reorders, so any overlap will do.
+        
+        long smallestNewUpdate = Math.abs(ourUpdates.get(ourUpdates.size() - 1));
+        
+        if (Math.abs(startingVersions.get(0)) < smallestNewUpdate) {
+          log.warn(msg()
+              + "too many updates received since start - startingUpdates no longer overlaps with our currentUpdates");
+          return false;
+        }
+        
+        // let's merge the lists
+        List<Long> newList = new ArrayList<>(ourUpdates);
+        for (Long ver : startingVersions) {
+          if (Math.abs(ver) < smallestNewUpdate) {
+            newList.add(ver);
+          }
+        }
+        
+        ourUpdates = newList;
+      } else {
+        
+        if (ourUpdates.size() > 0) {
+          ourLowThreshold = percentile(ourUpdates, 0.8f);
+          ourHighThreshold = percentile(ourUpdates, 0.2f);
+        } else {
+          // we have no versions and hence no frame of reference to tell if we can use a peers
+          // updates to bring us into sync
+          log.info(msg() + "DONE.  We have no versions.  sync failed.");
+          return false;
+        }
+      }
+      
+      ourUpdateSet = new HashSet<>(ourUpdates);
+      requestedUpdateSet = new HashSet<>(ourUpdates);
+      
+      for (;;) {
+        ShardResponse srsp = shardHandler.takeCompletedOrError();
+        if (srsp == null) break;
+        boolean success = handleResponse(srsp);
+        if (!success) {
+          log.info(msg() + "DONE. sync failed");
+          shardHandler.cancelAll();
+          return false;
+        }
+      }
+      
+      log.info(msg() + "DONE. sync succeeded");
+      return true;
+    } finally {
+      MDCLoggingContext.clear();
     }
-
-    log.info(msg() +  "DONE. sync succeeded");
-    return true;
   }
   
   private void requestVersions(String replica) {
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 7a2e72a..7028d45 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -1545,6 +1545,10 @@
     }
   }
 
+  public Long getCurrentMaxVersion() {
+    return maxVersionFromIndex;
+  }
+
   // this method is primarily used for unit testing and is not part of the public API for this class
   Long getMaxVersionFromIndex() {
     if (maxVersionFromIndex == null && versionInfo != null) {
@@ -1599,8 +1603,8 @@
     return highestVersion;
   }
 
-  public void onFirstSearcher(SolrIndexSearcher newSearcher) {
-    log.info("On first searcher opened, looking up max value of version field");
+  public void seedBucketsWithHighestVersion(SolrIndexSearcher newSearcher) {
+    log.info("Looking up max value of version field to seed version buckets");
     versionInfo.blockUpdates();
     try {
       maxVersionFromIndex = seedBucketsWithHighestVersion(newSearcher, versionInfo);
diff --git a/solr/core/src/test-files/log4j.properties b/solr/core/src/test-files/log4j.properties
index 659b430..86446e9 100644
--- a/solr/core/src/test-files/log4j.properties
+++ b/solr/core/src/test-files/log4j.properties
@@ -3,9 +3,8 @@
 
 log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
 log4j.appender.CONSOLE.Target=System.err
-log4j.appender.CONSOLE.layout=org.apache.solr.util.SolrLogLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
-
+log4j.appender.CONSOLE.layout=org.apache.log4j.EnhancedPatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n
 log4j.logger.org.apache.zookeeper=WARN
 log4j.logger.org.apache.hadoop=WARN
 log4j.logger.org.apache.directory=WARN
diff --git a/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-multiple-indexconfigs.xml b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-multiple-indexconfigs.xml
index 61d507c..6c3f90c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-multiple-indexconfigs.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-multiple-indexconfigs.xml
@@ -23,12 +23,10 @@
 
   <indexConfig>
     <useCompoundFile>true</useCompoundFile>
-    <unlockOnStartup>false</unlockOnStartup>
   </indexConfig>
   <!-- BEGIN BAD: multiple indexConfig sections -->
   <indexConfig>
     <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
-    <unlockOnStartup>true</unlockOnStartup>
   </indexConfig>
   <!-- END BAD -->
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-doctransformers.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-doctransformers.xml
new file mode 100644
index 0000000..5a6681f
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-doctransformers.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+ This is a stripped down config file used for a simple example...
+ It is *not* a good example to work from.
+-->
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
+  <dataDir>${solr.data.dir:}</dataDir>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <updateLog>
+      <str name="dir">${solr.data.dir:}</str>
+    </updateLog>
+  </updateHandler>
+
+  <transformer name="custom" class="org.apache.solr.response.TestCustomDocTransformer$CustomTransformerFactory" />
+
+  <requestHandler name="/select" class="solr.StandardRequestHandler"/>
+
+  <requestDispatcher handleSelect="true" >
+    <requestParsers enableRemoteStreaming="false" multipartUploadLimitInKB="2048" />
+  </requestDispatcher>
+
+  <requestHandler name="/admin/" class="org.apache.solr.handler.admin.AdminHandlers" />
+
+  <!-- config for the admin interface -->
+  <admin>
+    <defaultQuery>solr</defaultQuery>
+  </admin>
+
+</config>
+
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
index 654cb39..b95c295 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
@@ -30,7 +30,7 @@
   <codecFactory class="solr.SchemaCodecFactory"/>
 
   <updateHandler>
-    <updateLog enable="${enable.update.log}">
+    <updateLog enable="true">
       <str name="dir">${solr.ulog.dir:}</str>
     </updateLog> 
   </updateHandler>
diff --git a/solr/core/src/test/org/apache/solr/TestJoin.java b/solr/core/src/test/org/apache/solr/TestJoin.java
index f62ffe0..5628bb1 100644
--- a/solr/core/src/test/org/apache/solr/TestJoin.java
+++ b/solr/core/src/test/org/apache/solr/TestJoin.java
@@ -17,6 +17,7 @@
 
 package org.apache.solr;
 
+import org.apache.solr.common.params.ModifiableSolrParams;
 import org.noggit.JSONUtil;
 import org.noggit.ObjectBuilder;
 import org.apache.solr.request.SolrQueryRequest;
@@ -56,41 +57,43 @@
 
     assertU(commit());
 
+    ModifiableSolrParams p = params("sort","id asc");
+
     // test debugging
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id", "debugQuery","true")
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id", "debugQuery","true")
         ,"/debug/join/{!join from=dept_s to=dept_id_s}title:MTS=={'_MATCH_':'fromSetSize,toSetSize', 'fromSetSize':2, 'toSetSize':3}"
     );
 
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id")
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id")
         ,"/response=={'numFound':3,'start':0,'docs':[{'id':'10'},{'id':'12'},{'id':'13'}]}"
     );
 
     // empty from
-    assertJQ(req("q","{!join from=noexist_s to=dept_id_s}*:*", "fl","id")
+    assertJQ(req(p, "q","{!join from=noexist_s to=dept_id_s}*:*", "fl","id")
         ,"/response=={'numFound':0,'start':0,'docs':[]}"
     );
 
     // empty to
-    assertJQ(req("q","{!join from=dept_s to=noexist_s}*:*", "fl","id")
+    assertJQ(req(p, "q","{!join from=dept_s to=noexist_s}*:*", "fl","id")
         ,"/response=={'numFound':0,'start':0,'docs':[]}"
     );
 
     // self join... return everyone with she same title as Dave
-    assertJQ(req("q","{!join from=title to=title}name:dave", "fl","id")
+    assertJQ(req(p, "q","{!join from=title to=title}name:dave", "fl","id")
         ,"/response=={'numFound':2,'start':0,'docs':[{'id':'3'},{'id':'4'}]}"
     );
 
     // find people that develop stuff
-    assertJQ(req("q","{!join from=dept_id_s to=dept_s}text:develop", "fl","id")
+    assertJQ(req(p, "q","{!join from=dept_id_s to=dept_s}text:develop", "fl","id")
         ,"/response=={'numFound':3,'start':0,'docs':[{'id':'1'},{'id':'4'},{'id':'5'}]}"
     );
 
     // self join on multivalued text field
-    assertJQ(req("q","{!join from=title to=title}name:dave", "fl","id")
+    assertJQ(req(p, "q","{!join from=title to=title}name:dave", "fl","id")
         ,"/response=={'numFound':2,'start':0,'docs':[{'id':'3'},{'id':'4'}]}"
     );
 
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id", "debugQuery","true")
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s}title:MTS", "fl","id", "debugQuery","true")
         ,"/response=={'numFound':3,'start':0,'docs':[{'id':'10'},{'id':'12'},{'id':'13'}]}"
     );
     
@@ -99,12 +102,12 @@
       "/response=={'numFound':2,'start':0,'docs':[{'id':'10'},{'id':'13'}]}";
 
     // straight forward query
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s}name:dave", 
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s}name:dave",
                  "fl","id"),
              davesDepartments);
 
     // variable deref for sub-query parsing
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s v=$qq}", 
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s v=$qq}",
                  "qq","{!dismax}dave",
                  "qf","name",
                  "fl","id", 
@@ -112,14 +115,14 @@
              davesDepartments);
 
     // variable deref for sub-query parsing w/localparams
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s v=$qq}", 
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s v=$qq}",
                  "qq","{!dismax qf=name}dave",
                  "fl","id", 
                  "debugQuery","true"),
              davesDepartments);
 
     // defType local param to control sub-query parsing
-    assertJQ(req("q","{!join from=dept_s to=dept_id_s defType=dismax}dave", 
+    assertJQ(req(p, "q","{!join from=dept_s to=dept_id_s defType=dismax}dave",
                  "qf","name",
                  "fl","id", 
                  "debugQuery","true"),
@@ -127,7 +130,7 @@
 
     // find people that develop stuff - but limit via filter query to a name of "john"
     // this tests filters being pushed down to queries (SOLR-3062)
-    assertJQ(req("q","{!join from=dept_id_s to=dept_s}text:develop", "fl","id", "fq", "name:john")
+    assertJQ(req(p, "q","{!join from=dept_id_s to=dept_s}text:develop", "fl","id", "fq", "name:john")
              ,"/response=={'numFound':1,'start':0,'docs':[{'id':'1'}]}"
             );
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
index e0eb3e7..0cc02a8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
@@ -52,21 +52,6 @@
 
   private static final Logger logger = LoggerFactory.getLogger(AliasIntegrationTest.class);
   
-  @BeforeClass
-  public static void beforeSuperClass() throws Exception {
-  }
-  
-  @AfterClass
-  public static void afterSuperClass() {
-    
-  }
-  
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-
   public AliasIntegrationTest() {
     super();
     sliceCount = 1;
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index dbd3ae3..982a881 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -104,18 +104,6 @@
   CompletionService<Object> completionService;
   Set<Future<Object>> pending;
   
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
-  }
-  
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-  }
-
-  
   public BasicDistributedZkTest() {
     sliceCount = 2;
     completionService = new ExecutorCompletionService<>(executor);
@@ -519,7 +507,7 @@
         createCores(httpSolrClient, executor, "multiunload2", 1, cnt);
       } finally {
         if (executor != null) {
-          ExecutorUtil.shutdownAndAwaitTermination(executor, 120, TimeUnit.SECONDS);
+          ExecutorUtil.shutdownAndAwaitTermination(executor);
         }
       }
     }
@@ -1116,21 +1104,6 @@
     }
   }
 
-  volatile CloudSolrClient commondCloudSolrClient;
-  protected CloudSolrClient getCommonCloudSolrClient() {
-    if (commondCloudSolrClient == null) {
-      synchronized(this) {
-        commondCloudSolrClient = new CloudSolrClient(zkServer.getZkAddress(), random().nextBoolean());
-        commondCloudSolrClient.setParallelUpdates(random().nextBoolean());
-        commondCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
-        commondCloudSolrClient.getLbClient().setConnectionTimeout(15000);
-        commondCloudSolrClient.getLbClient().setSoTimeout(30000);
-        commondCloudSolrClient.connect();
-      }
-    }
-    return commondCloudSolrClient;
-  }
-
   @Override
   protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {
 
@@ -1147,9 +1120,6 @@
   @Override
   public void distribTearDown() throws Exception {
     super.distribTearDown();
-    if (commondCloudSolrClient != null) {
-      commondCloudSolrClient.close();
-    }
     if (otherCollectionClients != null) {
       for (List<SolrClient> clientList : otherCollectionClients.values()) {
         IOUtils.close(clientList);
@@ -1158,11 +1128,5 @@
     otherCollectionClients = null;
     List<Runnable> tasks = executor.shutdownNow();
     assertTrue(tasks.isEmpty());
-
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-    
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
index fb2619e..99d8cab 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
@@ -49,7 +49,7 @@
 @SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
 @ThreadLeakLingering(linger = 60000)
 public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase {
-  private static final int FAIL_TOLERANCE = 40;
+  private static final int FAIL_TOLERANCE = 60;
 
   public static Logger log = LoggerFactory.getLogger(ChaosMonkeyNothingIsSafeTest.class);
   
@@ -91,16 +91,9 @@
     super.distribSetUp();
     // can help to hide this when testing and looking at logs
     //ignoreException("shard update error");
-    System.setProperty("numShards", Integer.toString(sliceCount));
     useFactory("solr.StandardDirectoryFactory");
   }
   
-  @Override
-  public void distribTearDown() throws Exception {
-    System.clearProperty("numShards");
-    super.distribTearDown();
-  }
-  
   public ChaosMonkeyNothingIsSafeTest() {
     super();
     sliceCount = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.slicecount", "-1"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java
index c6dd00c..f646238 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java
@@ -71,16 +71,7 @@
   @Override
   public void distribSetUp() throws Exception {
     useFactory("solr.StandardDirectoryFactory");
-
     super.distribSetUp();
-    
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
-  @Override
-  public void distribTearDown() throws Exception {
-    System.clearProperty("numShards");
-    super.distribTearDown();
   }
   
   public ChaosMonkeySafeLeaderTest() {
diff --git a/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java b/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java
new file mode 100644
index 0000000..e4a0041
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java
@@ -0,0 +1,165 @@
+package org.apache.solr.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.handler.SnapShooter;
+import org.apache.solr.servlet.SolrDispatchFilter;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Slow
+public class CleanupOldIndexTest extends AbstractFullDistribZkTestBase {
+
+  private static Logger log = LoggerFactory.getLogger(CleanupOldIndexTest.class);
+  private StoppableIndexingThread indexThread;
+
+  public CleanupOldIndexTest() {
+    super();
+    sliceCount = 1;
+    fixShardCount(2);
+    schemaString = "schema15.xml";
+  }
+  
+  public static String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
+  public static RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
+  
+  protected String[] getFieldNames() {
+    return fieldNames;
+  }
+
+  protected RandVal[] getRandValues() {
+    return randVals;
+  }
+
+  @Test
+  public void test() throws Exception {
+    handle.clear();
+    handle.put("timestamp", SKIPVAL);
+    
+    int[] maxDocList = new int[] {300, 700, 1200};
+    int maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
+
+    indexThread = new StoppableIndexingThread(controlClient, cloudClient, "1", true, maxDoc, 1, true);
+    indexThread.start();
+
+    // give some time to index...
+    int[] waitTimes = new int[] {200, 2000, 3000};
+    Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
+
+    // create some "old" index directories
+    JettySolrRunner jetty = chaosMonkey.getShard("shard1", 1);
+    SolrDispatchFilter filter = (SolrDispatchFilter)jetty.getDispatchFilter().getFilter();
+    CoreContainer coreContainer = filter.getCores();
+    File dataDir = null;
+    try (SolrCore solrCore = coreContainer.getCore("collection1")) {
+      dataDir = new File(solrCore.getDataDir());
+    }
+    assertTrue(dataDir.isDirectory());
+
+    long msInDay = 60*60*24L;
+    String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date(1*msInDay));
+    String timestamp2 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date(2*msInDay));
+    File oldIndexDir1 = new File(dataDir, "index."+timestamp1);
+    FileUtils.forceMkdir(oldIndexDir1);
+    File oldIndexDir2 = new File(dataDir, "index."+timestamp2);
+    FileUtils.forceMkdir(oldIndexDir2);
+
+    // verify the "old" index directories exist
+    assertTrue(oldIndexDir1.isDirectory());
+    assertTrue(oldIndexDir2.isDirectory());
+
+    // bring shard replica down
+    JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1).jetty;
+
+    // wait a moment - lets allow some docs to be indexed so replication time is non 0
+    Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
+    
+    // bring shard replica up
+    replica.start();
+    
+    // make sure replication can start
+    Thread.sleep(3000);
+    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    
+    // stop indexing threads
+    indexThread.safeStop();
+    indexThread.join();
+
+    Thread.sleep(1000);
+  
+    waitForThingsToLevelOut(120);
+    waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
+
+    // test that leader and replica have same doc count
+    
+    String fail = checkShardConsistency("shard1", false, false);
+    if (fail != null)
+      fail(fail);
+
+    SolrQuery query = new SolrQuery("*:*");
+    query.setParam("distrib", "false");
+    long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
+    long client2Docs = shardToJetty.get("shard1").get(1).client.solrClient.query(query).getResults().getNumFound();
+    
+    assertTrue(client1Docs > 0);
+    assertEquals(client1Docs, client2Docs);
+
+    assertTrue(!oldIndexDir1.isDirectory());
+    assertTrue(!oldIndexDir2.isDirectory());
+  }
+  
+  @Override
+  protected void indexDoc(SolrInputDocument doc) throws IOException, SolrServerException {
+    controlClient.add(doc);
+    cloudClient.add(doc);
+  }
+
+  
+  @Override
+  public void distribTearDown() throws Exception {
+    // make sure threads have been stopped...
+    indexThread.safeStop();
+    indexThread.join();
+    super.distribTearDown();
+  }
+  
+  // skip the randoms - they can deadlock...
+  @Override
+  protected void indexr(Object... fields) throws Exception {
+    SolrInputDocument doc = new SolrInputDocument();
+    addFields(doc, fields);
+    addFields(doc, "rnd_b", true);
+    indexDoc(doc);
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
index 1db6652..1df104c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
@@ -54,12 +54,6 @@
     sliceCount = 1;
   }
   
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-
   @Test
   public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
     waitForThingsToLevelOut(30000);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionStateFormat2Test.java
similarity index 80%
rename from solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java
rename to solr/core/src/test/org/apache/solr/cloud/CollectionStateFormat2Test.java
index cb1240a..9992689 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionStateFormat2Test.java
@@ -27,54 +27,34 @@
 import org.apache.zookeeper.data.Stat;
 import org.junit.Test;
 
-public class ExternalCollectionsTest extends AbstractFullDistribZkTestBase {
-  private CloudSolrClient client;
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-    client = createCloudClient(null);
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    client.close();
-  }
+public class CollectionStateFormat2Test extends AbstractFullDistribZkTestBase {
 
   protected String getSolrXml() {
     return "solr-no-core.xml";
   }
 
-  public ExternalCollectionsTest() {
-    checkCreatedVsState = false;
-  }
-
-
   @Test
   @ShardsFixed(num = 4)
   public void test() throws Exception {
-    testZkNodeLocation();
-    testConfNameAndCollectionNameSame();
+    try (CloudSolrClient client = createCloudClient(null))  {
+      testZkNodeLocation(client);
+      testConfNameAndCollectionNameSame(client);
+    }
   }
 
-
-
   @Override
   protected String getStateFormat() {
     return "2";
   }
 
-  private void testConfNameAndCollectionNameSame() throws Exception{
+  private void testConfNameAndCollectionNameSame(CloudSolrClient client) throws Exception{
     // .system collection precreates the configset
 
     createCollection(".system", client, 2, 1);
     waitForRecoveriesToFinish(".system", false);
   }
 
-  private void testZkNodeLocation() throws Exception{
+  private void testZkNodeLocation(CloudSolrClient client) throws Exception{
 
     String collectionName = "myExternColl";
 
@@ -103,7 +83,7 @@
 
     client.request(request);
 
-    checkForMissingCollection(collectionName);
+    assertCollectionNotExists(collectionName, 45);
     assertFalse("collection state should not exist externally", cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.getCollectionPath(collectionName), true));
 
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
index 71363e4..31e179c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
@@ -38,17 +38,6 @@
 @Slow
 public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkTestBase {
   private static final int MAX_TIMEOUT_SECONDS = 60;
-  private static final boolean DEBUG = false;
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-
-    useJettyDataDir = false;
-
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-  }
 
   public CollectionsAPIAsyncDistributedZkTest() {
     sliceCount = 1;
@@ -100,10 +89,6 @@
   
       assertEquals("Shard split did not complete. Last recorded state: " + state, "completed", state);
     }
-
-    if (DEBUG) {
-      printLayout();
-    }
   }
 
   private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
@@ -128,15 +113,4 @@
     NamedList innerResponse = (NamedList) response.getResponse().get("status");
     return (String) innerResponse.get("state");
   }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-    
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
-  }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index 619822f..e38bb98 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -103,22 +103,14 @@
 public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBase {
 
   private static final String DEFAULT_COLLECTION = "collection1";
-  private static final boolean DEBUG = false;
 
   // we randomly use a second config set rather than just one
   private boolean secondConfigSet = random().nextBoolean();
   
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
-
-  }
-  
   @Override
   public void distribSetUp() throws Exception {
     super.distribSetUp();
     
-    useJettyDataDir = false;
-
     if (secondConfigSet ) {
       String zkHost = zkServer.getZkHost();
       String zkAddress = zkServer.getZkAddress();
@@ -146,9 +138,6 @@
       AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "elevate.xml");
       zkClient.close();
     }
-    
-    System.setProperty("numShards", Integer.toString(sliceCount));
-
   }
   
   protected String getSolrXml() {
@@ -158,8 +147,6 @@
   
   public CollectionsAPIDistributedZkTest() {
     sliceCount = 2;
-    checkCreatedVsState = false;
-    
   }
   
   @Override
@@ -183,6 +170,7 @@
   @ShardsFixed(num = 4)
   public void test() throws Exception {
     testNodesUsedByCreate();
+    testNoConfigSetExist();
     testCollectionsAPI();
     testCollectionsAPIAddRemoveStress();
     testErrorHandling();
@@ -193,10 +181,6 @@
     // last
     deleteCollectionWithDownNodes();
     addReplicaTest();
-
-    if (DEBUG) {
-      super.printLayout();
-    }
   }
 
   private void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
@@ -225,7 +209,7 @@
       
     }
     
-    checkForMissingCollection(collectionName);
+    assertCollectionNotExists(collectionName, 45);
     
     assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
 
@@ -253,8 +237,8 @@
     request.setPath("/admin/collections");
 
     makeRequest(baseUrl, request);
-    
-    checkForMissingCollection(collectionName);
+
+    assertCollectionNotExists(collectionName, 45);
     
     // now creating that collection should work
     params = new ModifiableSolrParams();
@@ -514,6 +498,40 @@
     assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
   }
 
+  private void testNoConfigSetExist() throws Exception {
+    cloudClient.getZkStateReader().updateClusterState(true);
+    assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
+
+    // try and create a SolrCore with no collection name
+    Create createCmd = new Create();
+    createCmd.setCoreName("corewithnocollection3");
+    createCmd.setCollection("");
+    String dataDir = createTempDir().toFile().getAbsolutePath();
+    createCmd.setDataDir(dataDir);
+    createCmd.setNumShards(1);
+    createCmd.setCollectionConfigName("conf123");
+    boolean gotExp = false;
+    try {
+      makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+    } catch (SolrException e) {
+      gotExp = true;
+    }
+
+    assertTrue(gotExp);
+    TimeUnit.MILLISECONDS.sleep(200);
+    // in both cases, the collection should have default to the core name
+    cloudClient.getZkStateReader().updateClusterState(true);
+
+    Collection<Slice> slices = cloudClient.getZkStateReader().getClusterState().getActiveSlices("corewithnocollection3");
+    assertNull(slices);
+
+    CollectionAdminRequest.List list = new CollectionAdminRequest.List();
+    CollectionAdminResponse res = new CollectionAdminResponse();
+        res.setResponse(makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), list));
+    List<String> collections = (List<String>) res.getResponse().get("collections");
+    assertFalse(collections.contains("corewithnocollection3"));
+  }
+
   private void testNodesUsedByCreate() throws Exception {
     // we can use this client because we just want base url
     final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
@@ -751,7 +769,7 @@
     makeRequest(baseUrl, request);
     
     // ensure its out of the state
-    checkForMissingCollection(collectionName);
+    assertCollectionNotExists(collectionName, 45);
     
     //collectionNameList.remove(collectionName);
 
@@ -1164,16 +1182,6 @@
     waitForRecoveriesToFinish(COLL_NAME, false);
   }
   
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
-  }
-
   private void clusterPropTest() throws Exception {
     try (CloudSolrClient client = createCloudClient(null)) {
       assertTrue("cluster property not set", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, "false"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
index c60386f..e244cd3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
@@ -75,29 +75,37 @@
   }
   
   public void testConcurrentCreateAndDeleteOverTheSameConfig() {
-    Logger.getLogger("org.apache.solr").setLevel(Level.WARN);
-    final String configName = "testconfig";
-    final File configDir = getFile("solr").toPath().resolve("configsets/configset-2/conf").toFile();
-    uploadConfig(configDir, configName); // upload config once, to be used by all collections
-    final SolrClient solrClient = new HttpSolrClient(solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final Thread[] threads = new Thread[2];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName, 
-          timeToRunSec, solrClient, failure);
-    }
-    
-    startAll(threads);
-    joinAll(threads);
-    
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-    
+    // TODO: no idea what this test needs to override the level, but regardless of reason it should
+    // reset when it's done.
+    final Logger logger = Logger.getLogger("org.apache.solr");
+    final Level SAVED_LEVEL = logger.getLevel();
     try {
-      solrClient.close();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
+      logger.setLevel(Level.WARN);
+      final String configName = "testconfig";
+      final File configDir = getFile("solr").toPath().resolve("configsets/configset-2/conf").toFile();
+      uploadConfig(configDir, configName); // upload config once, to be used by all collections
+      final SolrClient solrClient = new HttpSolrClient(solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
+      final AtomicReference<Exception> failure = new AtomicReference<>();
+      final int timeToRunSec = 30;
+      final Thread[] threads = new Thread[2];
+      for (int i = 0; i < threads.length; i++) {
+        final String collectionName = "collection" + i;
+        threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName, 
+                                                      timeToRunSec, solrClient, failure);
+      }
+    
+      startAll(threads);
+      joinAll(threads);
+    
+      assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+      
+      try {
+        solrClient.close();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    } finally {
+      logger.setLevel(SAVED_LEVEL);
     }
   }
   
@@ -222,4 +230,4 @@
     
   }
   
-}
\ No newline at end of file
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
index 312e77c..dba975a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
@@ -72,20 +72,8 @@
 @SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
 public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
 
-  private static final String DEFAULT_COLLECTION = "collection1";
   private static final boolean DEBUG = false;
 
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-  }
-
   protected String getSolrXml() {
     return "solr-no-core.xml";
   }
@@ -93,8 +81,6 @@
 
   public CustomCollectionTest() {
     sliceCount = 2;
-    checkCreatedVsState = false;
-
   }
 
   @Override
@@ -453,14 +439,4 @@
     QueryResponse rsp = getCommonCloudSolrClient().query(params);
     return rsp;
   }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
-  }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
index 721273d..a63c174 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
@@ -42,28 +42,7 @@
 import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
 
-@Ignore("SOLR-6347")
 public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTestBase {
-  private CloudSolrClient client;
-
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
-
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-    client = createCloudClient(null);
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    client.close();
-  }
 
   protected String getSolrXml() {
     return "solr-no-core.xml";
@@ -71,45 +50,46 @@
 
   public DeleteLastCustomShardedReplicaTest() {
     sliceCount = 2;
-    checkCreatedVsState = false;
   }
 
   @Test
   @ShardsFixed(num = 2)
   public void test() throws Exception {
-    int replicationFactor = 1;
-    int maxShardsPerNode = 5;
+    try (CloudSolrClient client = createCloudClient(null))  {
+      int replicationFactor = 1;
+      int maxShardsPerNode = 5;
 
-    Map<String, Object> props = ZkNodeProps.makeMap(
-        "router.name", ImplicitDocRouter.NAME,
-        ZkStateReader.REPLICATION_FACTOR, replicationFactor,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode,
-        NUM_SLICES, 1,
-        SHARDS_PROP,"a,b");
+      Map<String, Object> props = ZkNodeProps.makeMap(
+              "router.name", ImplicitDocRouter.NAME,
+              ZkStateReader.REPLICATION_FACTOR, replicationFactor,
+              ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode,
+              NUM_SLICES, 1,
+              SHARDS_PROP,"a,b");
 
-    Map<String,List<Integer>> collectionInfos = new HashMap<>();
+      Map<String,List<Integer>> collectionInfos = new HashMap<>();
 
-    String collectionName = "customcollreplicadeletion";
+      String collectionName = "customcollreplicadeletion";
 
-    createCollection(collectionInfos, collectionName, props, client);
+      createCollection(collectionInfos, collectionName, props, client);
 
-    waitForRecoveriesToFinish(collectionName, false);
+      waitForRecoveriesToFinish(collectionName, false);
 
-    DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
-        .getClusterState().getCollection(collectionName);
-    Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
+      DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
+              .getClusterState().getCollection(collectionName);
+      Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
 
-    removeAndWaitForLastReplicaGone(collectionName, replica, "a");
+      removeAndWaitForLastReplicaGone(client, collectionName, replica, "a");
+    }
   }
 
-  protected void removeAndWaitForLastReplicaGone(String COLL_NAME, Replica replica, String shard)
+  protected void removeAndWaitForLastReplicaGone(CloudSolrClient client, String COLL_NAME, Replica replica, String shard)
       throws SolrServerException, IOException, InterruptedException {
     Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
         shard, "replica", replica.getName());
     SolrParams params = new MapSolrParams(m);
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
-    this.client.request(request);
+    client.request(request);
     long endAt = System.currentTimeMillis() + 3000;
     boolean success = false;
     DocCollection testcoll = null;
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 598c510..dcf86de 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -48,21 +48,6 @@
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
 
 public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
-  private CloudSolrClient client;
-  
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-    client = createCloudClient(null);
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    client.close();
-  }
 
   protected String getSolrXml() {
     return "solr-no-core.xml";
@@ -70,7 +55,6 @@
 
   public DeleteReplicaTest() {
     sliceCount = 2;
-    checkCreatedVsState = false;
   }
 
   @Test
@@ -79,15 +63,15 @@
     String collectionName = "delLiveColl";
     try (CloudSolrClient client = createCloudClient(null)) {
       createCollection(collectionName, client);
-      
+
       waitForRecoveriesToFinish(collectionName, false);
-      
+
       DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
           .getClusterState().getCollection(collectionName);
-      
+
       Slice shard1 = null;
       Replica replica1 = null;
-      
+
       // Get an active replica
       for (Slice slice : testcoll.getSlices()) {
         if(replica1 != null)
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
index 844cd13..e3e0682 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
@@ -44,20 +44,6 @@
     sliceCount = 2;
   }
 
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", "2");
-    System.setProperty("solr.xml.persist", "true");
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-  }
-
   // TODO: Custom hash slice deletion test
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
index fc97448..71e336d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
@@ -234,7 +234,7 @@
 
     cloudClient.commit();
 
-    log.info("\n\n\n Total of "+deletedDocs.size()+" docs deleted \n\n\n");
+    log.info("Total of "+deletedDocs.size()+" docs deleted");
 
     maxOnLeader = getMaxVersionFromIndex(leader);
     maxOnReplica = getMaxVersionFromIndex(replica);
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 06efc95..b6a4067 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -36,7 +36,10 @@
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.servlet.SolrDispatchFilter;
+import org.apache.solr.update.UpdateHandler;
+import org.apache.solr.update.UpdateLog;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -78,12 +81,6 @@
     fixShardCount(3);
   }
   
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
   /**
    * Overrides the parent implementation to install a SocketProxy in-front of the Jetty server.
    */
@@ -207,7 +204,22 @@
     
     // sent 3 docs in so far, verify they are on the leader and replica
     assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 3);
-        
+
+    // Get the max version from the replica core to make sure it gets updated after recovery (see SOLR-7625)
+    JettySolrRunner replicaJetty = getJettyOnPort(getReplicaPort(notLeader));
+    SolrDispatchFilter filter = (SolrDispatchFilter)replicaJetty.getDispatchFilter().getFilter();
+    CoreContainer coreContainer = filter.getCores();
+    ZkCoreNodeProps replicaCoreNodeProps = new ZkCoreNodeProps(notLeader);
+    String coreName = replicaCoreNodeProps.getCoreName();
+    Long maxVersionBefore = null;
+    try (SolrCore core = coreContainer.getCore(coreName)) {
+      assertNotNull("Core '"+coreName+"' not found for replica: "+notLeader.getName(), core);
+      UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
+      maxVersionBefore = ulog.getCurrentMaxVersion();
+    }
+    assertNotNull("max version bucket seed not set for core " + coreName, maxVersionBefore);
+    log.info("Looked up max version bucket seed "+maxVersionBefore+" for core "+coreName);
+
     // now up the stakes and do more docs
     int numDocs = 1000;
     boolean hasPartition = false;
@@ -234,7 +246,15 @@
     }
     
     notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive);
-    
+
+    try (SolrCore core = coreContainer.getCore(coreName)) {
+      assertNotNull("Core '" + coreName + "' not found for replica: " + notLeader.getName(), core);
+      Long currentMaxVersion = core.getUpdateHandler().getUpdateLog().getCurrentMaxVersion();
+      log.info("After recovery, looked up NEW max version bucket seed " + currentMaxVersion +
+          " for core " + coreName + ", was: " + maxVersionBefore);
+      assertTrue("max version bucket seed not updated after recovery!", currentMaxVersion > maxVersionBefore);
+    }
+
     // verify all docs received
     assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, numDocs + 3);
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/KerberosTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/KerberosTestUtil.java
index 9c57fb8..c4ffe9c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/KerberosTestUtil.java
+++ b/solr/core/src/test/org/apache/solr/cloud/KerberosTestUtil.java
@@ -1,7 +1,10 @@
 package org.apache.solr.cloud;
 
 import java.io.File;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Properties;
 
@@ -119,4 +122,27 @@
       return krb5LoginModuleName;
     }
   }
+
+  /**
+   *  These Locales don't generate dates that are compatibile with Hadoop MiniKdc.
+   */
+  private final static List<String> brokenLanguagesWithMiniKdc =
+      Arrays.asList(
+          new Locale("th").getLanguage(), 
+          new Locale("ja").getLanguage(), 
+          new Locale("hi").getLanguage()
+          );
+  /** 
+   *returns the currently set locale, and overrides it with {@link Locale#US} if it's 
+   * currently something MiniKdc can not handle
+   *
+   * @see Locale#setDefault
+   */
+  public static final Locale overrideLocaleIfNotSpportedByMiniKdc() {
+    Locale old = Locale.getDefault();
+    if (brokenLanguagesWithMiniKdc.contains(Locale.getDefault().getLanguage())) {
+      Locale.setDefault(Locale.US);
+    }
+    return old;
+  }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
index b1f8401..657c7f2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
@@ -39,26 +39,6 @@
   }
 
   @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    System.clearProperty("numShards");
-
-    super.distribTearDown();
-
-    // close socket proxies after super.distribTearDown
-    if (!proxies.isEmpty()) {
-      for (SocketProxy proxy : proxies.values()) {
-        proxy.close();
-      }
-    }
-  }
-
-  @Override
   @Test
   public void test() throws Exception {
     oneShardTest();
diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
index 2f5ed87..95315d2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
@@ -153,7 +153,7 @@
     Indexer indexer = new Indexer(cloudClient, splitKey, 1, 30);
     indexer.start();
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), targetCollection);
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), targetCollection);
 
     try (HttpSolrClient collectionClient = new HttpSolrClient(url)) {
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java b/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
index 2ef4d12..a5e9614 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
@@ -49,16 +49,6 @@
 
   private static final int NUM_COLLECTIONS = 4;
 
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-
-    useJettyDataDir = false;
-
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-  }
-
   public MultiThreadedOCPTest() {
     sliceCount = 2;
   }
@@ -296,16 +286,6 @@
     return (String) innerResponse.get("state");
   }
 
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("solr.xml.persist");
-    
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
-  }
-
 }
 
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
index 49b7b60..e8a6d30 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
@@ -50,28 +50,8 @@
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 
 @LuceneTestCase.Slow
-@SuppressSSL     // See SOLR-5776
+@SuppressSSL(bugUrl = "SOLR-5776")
 public class OverseerRolesTest  extends AbstractFullDistribZkTestBase{
-  private CloudSolrClient client;
-
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
-
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-    client = createCloudClient(null);
-  }
-
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    client.close();
-  }
 
   protected String getSolrXml() {
     return "solr-no-core.xml";
@@ -80,17 +60,17 @@
   public OverseerRolesTest() {
     sliceCount = 2;
     fixShardCount(TEST_NIGHTLY ? 6 : 2);
-
-    checkCreatedVsState = false;
   }
 
   @Test
   public void test() throws Exception {
-    testQuitCommand();
-    testOverseerRole();
+    try (CloudSolrClient client = createCloudClient(null))  {
+      testQuitCommand(client);
+      testOverseerRole(client);
+    }
   }
 
-  private void testQuitCommand() throws Exception{
+  private void testQuitCommand(CloudSolrClient client) throws Exception{
     String collectionName = "testOverseerQuit";
 
     createCollection(collectionName, client);
@@ -121,7 +101,7 @@
 
 
 
-  private void testOverseerRole() throws Exception {
+  private void testOverseerRole(CloudSolrClient client) throws Exception {
     String collectionName = "testOverseerCol";
 
     createCollection(collectionName, client);
@@ -137,7 +117,7 @@
     Collections.shuffle(l, random());
     String overseerDesignate = l.get(0);
     log.info("overseerDesignate {}",overseerDesignate);
-    setOverseerRole(CollectionAction.ADDROLE,overseerDesignate);
+    setOverseerRole(client, CollectionAction.ADDROLE,overseerDesignate);
 
     long timeout = System.currentTimeMillis()+15000;
 
@@ -166,7 +146,7 @@
 
     String anotherOverseer = l.get(0);
     log.info("Adding another overseer designate {}", anotherOverseer);
-    setOverseerRole(CollectionAction.ADDROLE, anotherOverseer);
+    setOverseerRole(client, CollectionAction.ADDROLE, anotherOverseer);
 
     String currentOverseer = getLeaderNode(client.getZkStateReader().getZkClient());
 
@@ -211,7 +191,7 @@
     assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(client.getZkStateReader().getZkClient()), leaderchanged);
   }
 
-  private void setOverseerRole(CollectionAction action, String overseerDesignate) throws Exception, IOException {
+  private void setOverseerRole(CloudSolrClient client, CollectionAction action, String overseerDesignate) throws Exception, IOException {
     log.info("Adding overseer designate {} ", overseerDesignate);
     Map m = makeMap(
         "action", action.toString().toLowerCase(Locale.ROOT),
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
index 941baf6..1208232 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.util.NamedList;
@@ -27,24 +28,18 @@
 
   public OverseerStatusTest() {
     schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
+    sliceCount = 1;
   }
 
   @Test
+  @ShardsFixed(num = 1)
   public void test() throws Exception {
 
     waitForThingsToLevelOut(15);
 
     // find existing command counts because collection may be created by base test class too
     int numCollectionCreates = 0, numOverseerCreates = 0;
-    NamedList<Object> resp = invokeCollectionApi("action",
-        CollectionParams.CollectionAction.OVERSEERSTATUS.toLower());
+    NamedList<Object> resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
     if (resp != null) {
       NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
       if (collection_operations != null)  {
@@ -64,8 +59,7 @@
 
     String collectionName = "overseer_status_test";
     CollectionAdminResponse response = createCollection(collectionName, 1, 1, 1);
-    resp = invokeCollectionApi("action",
-        CollectionParams.CollectionAction.OVERSEERSTATUS.toLower());
+    resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
     NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
     NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
     SimpleOrderedMap<Object> createcollection = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
@@ -73,22 +67,25 @@
     createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
     assertEquals("No stats for create in Overseer", numOverseerCreates + 1, createcollection.get("requests"));
 
-    invokeCollectionApi("action", CollectionParams.CollectionAction.RELOAD.toLower(), "name", collectionName);
-    resp = invokeCollectionApi("action",
-        CollectionParams.CollectionAction.OVERSEERSTATUS.toLower());
+    // Reload the collection
+    new CollectionAdminRequest.Reload().setCollectionName(collectionName).process(cloudClient);
+
+
+    resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
     collection_operations = (NamedList<Object>) resp.get("collection_operations");
     SimpleOrderedMap<Object> reload = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.RELOAD.toLower());
     assertEquals("No stats for reload in OverseerCollectionProcessor", 1, reload.get("requests"));
 
     try {
-      invokeCollectionApi("action", CollectionParams.CollectionAction.SPLITSHARD.toLower(),
-          "collection", "non_existent_collection",
-          "shard", "non_existent_shard");
+      new CollectionAdminRequest.SplitShard()
+              .setCollectionName("non_existent_collection")
+              .setShardName("non_existent_shard")
+              .process(cloudClient);
+      fail("Split shard for non existent collection should have failed");
     } catch (Exception e) {
       // expected because we did not correctly specify required params for split
     }
-    resp = invokeCollectionApi("action",
-        CollectionParams.CollectionAction.OVERSEERSTATUS.toLower());
+    resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
     collection_operations = (NamedList<Object>) resp.get("collection_operations");
     SimpleOrderedMap<Object> split = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.SPLITSHARD.toLower());
     assertEquals("No stats for split in OverseerCollectionProcessor", 1, split.get("errors"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
index a610b78..404cbdd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
@@ -59,29 +59,6 @@
     fixShardCount(3);
   }
   
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
-  @Override
-  public void distribTearDown() throws Exception {
-    
-    log.info("tearing down replicationFactorTest!");
-    
-    System.clearProperty("numShards");
-    
-    super.distribTearDown();
-
-    log.info("super.distribTearDown complete, closing all socket proxies");
-    if (!proxies.isEmpty()) {
-      for (SocketProxy proxy : proxies.values()) {
-        proxy.close();
-      }
-    }    
-  }
-  
   /**
    * Overrides the parent implementation so that we can configure a socket proxy
    * to sit infront of each Jetty server, which gives us the ability to simulate
diff --git a/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java b/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
index 4523d52..f9897a6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
@@ -18,6 +18,7 @@
  */
 
 import org.apache.commons.collections.CollectionUtils;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.zookeeper.KeeperException;
@@ -42,16 +43,9 @@
   @Override
   public void distribSetUp() throws Exception {
     super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
     useFactory("solr.StandardDirectoryFactory");
   }
 
-  @Override
-  public void distribTearDown() throws Exception {
-    System.clearProperty("numShards");
-    super.distribTearDown();
-  }
-
   @Test
   public void test() throws Exception {
     waitForRecoveriesToFinish(false);
@@ -77,7 +71,7 @@
       int n = random().nextInt(getShardCount());
       String nodeName = cloudJettys.get(n).nodeName;
       log.info("Chose {} as overseer designate", nodeName);
-      invokeCollectionApi(CollectionParams.ACTION, CollectionParams.CollectionAction.ADDROLE.toLower(), "role", "overseer", "node", nodeName);
+      new CollectionAdminRequest.AddRole().setRole("overseer").setNode(nodeName).process(cloudClient);
       designates.add(nodeName);
       designateJettys.add(cloudJettys.get(n));
     }
diff --git a/solr/core/src/test/org/apache/solr/cloud/SaslZkACLProviderTest.java b/solr/core/src/test/org/apache/solr/cloud/SaslZkACLProviderTest.java
index f7b8e28..26fece9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SaslZkACLProviderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SaslZkACLProviderTest.java
@@ -3,8 +3,6 @@
 import java.io.File;
 import java.io.IOException;
 import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Locale;
 
 import javax.security.auth.login.Configuration;
@@ -52,12 +50,7 @@
       .getLogger(SaslZkACLProviderTest.class);
 
   private static final Charset DATA_ENCODING = Charset.forName("UTF-8");
-  // These Locales don't generate dates that are compatibile with Hadoop MiniKdc.
-  protected final static List<String> brokenLocales =
-    Arrays.asList(
-      "th_TH_TH_#u-nu-thai",
-      "ja_JP_JP_#u-ca-japanese",
-      "hi_IN");
+
   protected Locale savedLocale = null;
 
   protected ZkTestServer zkServer;
@@ -77,10 +70,7 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    if (brokenLocales.contains(Locale.getDefault().toString())) {
-      savedLocale = Locale.getDefault();
-      Locale.setDefault(Locale.US);
-    }
+    savedLocale = KerberosTestUtil.overrideLocaleIfNotSpportedByMiniKdc();
     log.info("####SETUP_START " + getTestName());
     createTempDir();
 
@@ -124,10 +114,7 @@
   @Override
   public void tearDown() throws Exception {
     zkServer.shutdown();
-
-    if (savedLocale != null) {
-      Locale.setDefault(savedLocale);
-    }
+    Locale.setDefault(savedLocale);
     super.tearDown();
   }
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
index ae418fd..f060c55 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
@@ -17,6 +17,10 @@
  * limitations under the License.
  */
 
+import java.io.File;
+
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -51,7 +55,31 @@
   private void doCustomSharding() throws Exception {
     printLayout();
 
-    startCloudJetty(collection, "shardA");
+    int totalReplicas = getTotalReplicas(collection);
+
+    File jettyDir = createTempDir("jetty").toFile();
+    jettyDir.mkdirs();
+    setupJettySolrHome(jettyDir);
+    JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
+    jettys.add(j);
+    SolrClient client = createNewSolrClient(j.getLocalPort());
+    clients.add(client);
+
+    int retries = 60;
+    while (--retries >= 0) {
+      // total replicas changed.. assume it was us
+      if (getTotalReplicas(collection) != totalReplicas) {
+       break;
+      }
+      Thread.sleep(500);
+    }
+
+    if (retries <= 0) {
+      fail("Timeout waiting for " + j + " to appear in clusterstate");
+      printLayout();
+    }
+
+    updateMappingsFromZk(this.jettys, this.clients);
 
     printLayout();
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 922caa6..1d8c6cf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -64,13 +64,6 @@
     schemaString = "schema15.xml";      // we need a string id
   }
 
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-    System.setProperty("solr.xml.persist", "true");
-  }
-
   @Test
   public void test() throws Exception {
 
@@ -248,7 +241,7 @@
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
     try (HttpSolrClient collectionClient = new HttpSolrClient(url)) {
 
@@ -322,7 +315,7 @@
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
     try (HttpSolrClient collectionClient = new HttpSolrClient(url)) {
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
index 5ce9721..80b8f13 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
@@ -88,7 +88,6 @@
   public void distribSetUp() throws Exception {
     super.distribSetUp();
     useJettyDataDir = false;
-    System.setProperty("solr.xml.persist", "true");
   }
   
   protected String getSolrXml() {
@@ -100,8 +99,6 @@
     sliceCount = 2;
     completionService = new ExecutorCompletionService<>(executor);
     pending = new HashSet<>();
-    checkCreatedVsState = false;
-    
   }
 
   @Test
@@ -263,6 +260,5 @@
   @Override
   public void distribTearDown() throws Exception {
     super.distribTearDown();
-    System.clearProperty("solr.xml.persist");
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
index a7155cf..7284cc7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
@@ -51,12 +51,6 @@
     sliceCount = 2;
   }
 
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-
   @Test
   public void testLoadDocsIntoGettingStartedCollection() throws Exception {
     waitForThingsToLevelOut(30000);
diff --git a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
index a35f69e..97f0682 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
@@ -50,24 +50,7 @@
 @Slow
 public class SyncSliceTest extends AbstractFullDistribZkTestBase {
   private boolean success = false;
-  
-  @BeforeClass
-  public static void beforeSuperClass() throws Exception {
-  }
-  
-  @AfterClass
-  public static void afterSuperClass() {
-    
-  }
-  
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    // we expect this time of exception as shards go up and down...
-    //ignoreException(".*");
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
+
   @Override
   public void distribTearDown() throws Exception {
     if (!success) {
@@ -75,7 +58,7 @@
     }
     super.distribTearDown();
   }
-  
+
   public SyncSliceTest() {
     super();
     sliceCount = 1;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java b/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
index f3f3391..910a221 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
@@ -23,9 +23,6 @@
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 import org.apache.http.HttpException;
@@ -63,12 +60,6 @@
   static String requestUsername = MockAuthenticationPlugin.expectedUsername;
   static String requestPassword = MockAuthenticationPlugin.expectedPassword;
   
-  protected final static List<String> brokenLocales =
-      Arrays.asList(
-        "th_TH_TH_#u-nu-thai",
-        "ja_JP_JP_#u-ca-japanese",
-        "hi_IN");
-
   @Rule
   public TestRule solrTestRules = RuleChain
       .outerRule(new SystemPropertiesRestoreRule());
@@ -80,9 +71,6 @@
 
   @Override
   public void setUp() throws Exception {
-    if (brokenLocales.contains(Locale.getDefault().toString())) {
-      Locale.setDefault(Locale.US);
-    }
     setupAuthenticationPlugin();
     super.setUp();
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
index cc4602d..d60532e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
@@ -52,12 +52,7 @@
 
   public TestCollectionAPI() {
     schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
     sliceCount = 2;
-    super.distribSetUp();
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCryptoKeys.java b/solr/core/src/test/org/apache/solr/cloud/TestCryptoKeys.java
index 80774cc..49a7817 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCryptoKeys.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCryptoKeys.java
@@ -71,12 +71,6 @@
   }
 
 
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-
   public TestCryptoKeys() {
     super();
     sliceCount = 1;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
index 3e67c27..51e55fc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
@@ -22,10 +22,13 @@
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettyConfig;
+import org.apache.solr.client.solrj.embedded.JettyConfig.Builder;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
@@ -88,7 +91,9 @@
   protected void testCollectionCreateSearchDelete() throws Exception {
 
     File solrXml = new File(SolrTestCaseJ4.TEST_HOME(), "solr-no-core.xml");
-    MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, null, createTempDir().toFile(), solrXml, null, null);
+    Builder jettyConfig = JettyConfig.builder();
+    jettyConfig.waitForLoadingCoresToFinish(null);
+    MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir().toFile(), solrXml, jettyConfig.build());
 
     try {
       assertNotNull(miniCluster.getZkServer());
@@ -174,6 +179,16 @@
         startedServer = miniCluster.startJettySolrRunner(null, null, null);
         assertTrue(startedServer.isRunning());
         assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
+        Thread.sleep(15000);
+        try {
+          cloudSolrClient.query(query);
+          fail("Expected exception on query because collection should not be ready - we have turned on async core loading");
+        } catch (SolrServerException e) {
+          SolrException rc = (SolrException) e.getRootCause();
+          assertTrue(rc.code() >= 500 && rc.code() < 600);
+        } catch (SolrException e) {
+          assertTrue(e.code() >= 500 && e.code() < 600);
+        }
 
         // delete the collection we created earlier
         miniCluster.deleteCollection(collectionName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
index a124ad4..47c2f34 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
@@ -19,8 +19,6 @@
 
 import javax.security.auth.login.Configuration;
 import java.io.File;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Locale;
 
 import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
@@ -62,14 +60,9 @@
     REPLICATION_FACTOR = 2;
   }
   
-  protected final static List<String> brokenLocales =
-      Arrays.asList(
-        "th_TH_TH_#u-nu-thai",
-        "ja_JP_JP_#u-ca-japanese",
-        "hi_IN");
-
   private MiniKdc kdc;
 
+  private Locale savedLocale; // in case locale is broken and we need to fill in a working locale
   @Rule
   public TestRule solrTestRules = RuleChain
       .outerRule(new SystemPropertiesRestoreRule());
@@ -81,9 +74,7 @@
 
   @Override
   public void setUp() throws Exception {
-    if (brokenLocales.contains(Locale.getDefault().toString())) {
-      Locale.setDefault(Locale.US);
-    }
+    savedLocale = KerberosTestUtil.overrideLocaleIfNotSpportedByMiniKdc();
     super.setUp();
     setupMiniKdc();
   }
@@ -171,6 +162,7 @@
     if (kdc != null) {
       kdc.stop();
     }
+    Locale.setDefault(savedLocale);
     super.tearDown();
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberos.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberos.java
index e693764..cc2696b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberos.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberos.java
@@ -18,10 +18,9 @@
  */
 
 import javax.security.auth.login.Configuration;
+
 import java.io.File;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Locale;
 
 import org.apache.commons.io.Charsets;
@@ -49,20 +48,14 @@
   static final int TIMEOUT = 10000;
   private MiniKdc kdc;
 
-  protected final static List<String> brokenLocales =
-      Arrays.asList(
-          "th_TH_TH_#u-nu-thai",
-          "ja_JP_JP_#u-ca-japanese",
-          "hi_IN");
+  private Locale savedLocale; // in case locale is broken and we need to fill in a working locale
 
   Configuration originalConfig = Configuration.getConfiguration();
   
   @Override
   public void distribSetUp() throws Exception {
     //SSLTestConfig.setSSLSystemProperties();
-    if (brokenLocales.contains(Locale.getDefault().toString())) {
-      Locale.setDefault(Locale.US);
-    }
+    savedLocale = KerberosTestUtil.overrideLocaleIfNotSpportedByMiniKdc();
     // Use just one jetty
     this.sliceCount = 0;
     this.fixShardCount(1);
@@ -198,6 +191,7 @@
       kdc.stop();
     }
     //SSLTestConfig.clearSSLSystemProperties();
+    Locale.setDefault(savedLocale);
     super.distribTearDown();
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
index 9790d7e..d91f83f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
@@ -20,7 +20,6 @@
 import javax.security.auth.login.Configuration;
 
 import java.io.File;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Locale;
 import java.util.Properties;
@@ -76,14 +75,9 @@
     REPLICATION_FACTOR = 1;
   }
 
-  protected final static List<String> brokenLocales =
-      Arrays.asList(
-          "th_TH_TH_#u-nu-thai",
-          "ja_JP_JP_#u-ca-japanese",
-          "hi_IN");
-
   private MiniKdc kdc;
 
+  private Locale savedLocale; // in case locale is broken and we need to fill in a working locale
   @Rule
   public TestRule solrTestRules = RuleChain
       .outerRule(new SystemPropertiesRestoreRule());
@@ -95,9 +89,7 @@
 
   @Override
   public void setUp() throws Exception {
-    if (brokenLocales.contains(Locale.getDefault().toString())) {
-      Locale.setDefault(Locale.US);
-    }
+    savedLocale = KerberosTestUtil.overrideLocaleIfNotSpportedByMiniKdc();
     super.setUp();
     setupMiniKdc();
     HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer());
@@ -237,6 +229,7 @@
     if (kdc != null) {
       kdc.stop();
     }
+    Locale.setDefault(savedLocale);
     super.tearDown();
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
index 138de11..31dd3fd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
@@ -56,7 +56,6 @@
   
   public UnloadDistributedZkTest() {
     super();
-    checkCreatedVsState = false;
   }
 
   @Test
@@ -375,7 +374,7 @@
         // create the cores
         createCores(adminClient, executor, "multiunload", 2, cnt);
       } finally {
-        ExecutorUtil.shutdownAndAwaitTermination(executor, 120, TimeUnit.SECONDS);
+        ExecutorUtil.shutdownAndAwaitTermination(executor);
       }
 
       executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5,
@@ -399,7 +398,7 @@
           Thread.sleep(random().nextInt(50));
         }
       } finally {
-        ExecutorUtil.shutdownAndAwaitTermination(executor, 120, TimeUnit.SECONDS);
+        ExecutorUtil.shutdownAndAwaitTermination(executor);
       }
     }
   }
diff --git a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
index 6ad07f0..979222b 100644
--- a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
@@ -18,7 +18,11 @@
 package org.apache.solr.core;
 
 import java.nio.file.Path;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Locale;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.NoLockFactory;
@@ -26,6 +30,7 @@
 import org.apache.solr.cloud.hdfs.HdfsTestUtil;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.DirectoryFactory.DirContext;
+import org.apache.solr.handler.SnapShooter;
 import org.apache.solr.util.BadHdfsThreadsFilter;
 import org.apache.solr.util.MockCoreContainer.MockCoreDescriptor;
 import org.junit.AfterClass;
@@ -53,7 +58,7 @@
     System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
     dfsCluster = null;
   }
-  
+
   @Test
   public void testInitArgsOrSysPropConfig() throws Exception {
     
@@ -130,4 +135,35 @@
     hdfsFactory.close();
   }
 
+  @Test
+  public void testCleanupOldIndexDirectories() throws Exception {
+
+    HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory();
+
+    System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
+    hdfsFactory.init(new NamedList<>());
+    String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
+    assertTrue(dataHome.endsWith("/solr1/mock/data"));
+    System.clearProperty("solr.hdfs.home");
+
+    FileSystem hdfs = dfsCluster.getFileSystem();
+
+    org.apache.hadoop.fs.Path dataHomePath = new org.apache.hadoop.fs.Path(dataHome);
+    org.apache.hadoop.fs.Path currentIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index");
+    assertTrue(!hdfs.isDirectory(currentIndexDirPath));
+    hdfs.mkdirs(currentIndexDirPath);
+    assertTrue(hdfs.isDirectory(currentIndexDirPath));
+
+    String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
+    org.apache.hadoop.fs.Path oldIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index."+timestamp1);
+    assertTrue(!hdfs.isDirectory(oldIndexDirPath));
+    hdfs.mkdirs(oldIndexDirPath);
+    assertTrue(hdfs.isDirectory(oldIndexDirPath));
+
+    hdfsFactory.cleanupOldIndexDirectories(dataHomePath.toString(), currentIndexDirPath.toString());
+
+    assertTrue(hdfs.isDirectory(currentIndexDirPath));
+    assertTrue(!hdfs.isDirectory(oldIndexDirPath));
+  }
+
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
index c81d9f7..ed913a6 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
@@ -114,16 +114,6 @@
     assertTrue(response1.isSuccess());
   }
 
-  @Override
-  public void distribTearDown() throws Exception {
-    super.distribTearDown();
-    System.clearProperty("numShards");
-    System.clearProperty("zkHost");
-
-    // insurance
-    DirectUpdateHandler2.commitOnClose = true;
-  }
-
   public static void postAndCheck(CloudSolrClient cloudClient, String baseUrl, String blobName, ByteBuffer bytes, int count) throws Exception {
     postData(cloudClient, baseUrl, blobName, bytes);
 
diff --git a/solr/core/src/test/org/apache/solr/response/TestCustomDocTransformer.java b/solr/core/src/test/org/apache/solr/response/TestCustomDocTransformer.java
new file mode 100644
index 0000000..dab99df
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/response/TestCustomDocTransformer.java
@@ -0,0 +1,118 @@
+package org.apache.solr.response;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.transform.DocTransformer;
+import org.apache.solr.response.transform.TransformerFactory;
+import org.bouncycastle.util.Strings;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestCustomDocTransformer extends SolrTestCaseJ4 {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig-doctransformers.xml","schema.xml");
+  }
+
+  @After
+  public void cleanup() throws Exception {
+    assertU(delQ("*:*"));
+    assertU(commit());
+  }
+
+  @Test
+  public void testCustomTransformer() throws Exception {
+    // Build a simple index
+    int max = 10;
+    for(int i=0; i<max; i++) {
+      SolrInputDocument sdoc = new SolrInputDocument();
+      sdoc.addField("id", i);
+      sdoc.addField("subject", "xx");
+      sdoc.addField("title", "title_"+i);
+      updateJ(jsonAdd(sdoc), null);
+    }
+    assertU(commit());
+    assertQ(req("q", "*:*"), "//*[@numFound='" + max + "']");
+    
+    assertQ( req(
+        "q", "*:*", 
+        "fl", "id,out:[custom extra=subject,title]"), 
+        // Check that the concatinated fields make it in the results
+        "//*[@numFound='" + max + "']",
+        "//str[.='xx#title_0#']",
+        "//str[.='xx#title_1#']",
+        "//str[.='xx#title_2#']",
+        "//str[.='xx#title_3#']");
+  }
+  
+  public static class CustomTransformerFactory extends TransformerFactory {
+    @Override
+    public DocTransformer create(String field, SolrParams params, SolrQueryRequest req) {
+      String[] extra = null;
+      String ext = params.get("extra");
+      if(ext!=null) {
+        extra = Strings.split(ext, ',');
+      }
+      return new CustomTransformer(field, extra);
+    }
+  }
+  
+  public static class CustomTransformer extends DocTransformer {
+    final String name;
+    final String[] extra;
+    final StringBuilder str = new StringBuilder();
+    
+    public CustomTransformer(String name, String[] extra) {
+      this.name = name;
+      this.extra = extra;
+    }
+    
+    @Override
+    public String getName() {
+      return "custom";
+    }
+
+    @Override
+    public String[] getExtraRequestFields() {
+      return extra;
+    }
+
+    /**
+     * This transformer simply concatinates the values of multipe fields
+     */
+    @Override
+    public void transform(SolrDocument doc, int docid) throws IOException {
+      str.setLength(0);
+      for(String s : extra) {
+        String v = ResponseWriterUtil.getAsString(s, doc);
+        str.append(v).append('#');
+      }
+      System.out.println( "HELLO: "+str );
+      doc.setField(name, str.toString());
+    }
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java b/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
index abfff36..b9ca811 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
@@ -47,19 +47,9 @@
  */
 @SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
 public class TestCloudSchemaless extends AbstractFullDistribZkTestBase {
-  private static final Logger log = LoggerFactory.getLogger(TestCloudManagedSchemaConcurrent.class);
+  private static final Logger log = LoggerFactory.getLogger(TestCloudSchemaless.class);
   private static final String SUCCESS_XPATH = "/response/lst[@name='responseHeader']/int[@name='status'][.='0']";
 
-  @Override
-  public void distribSetUp() throws Exception {
-
-    super.distribSetUp();
-
-    useJettyDataDir = false;
-
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
   @After
   public void teardDown() throws Exception {
     super.tearDown();
@@ -76,7 +66,6 @@
   @BeforeClass
   public static void initSysProperties() {
     System.setProperty("managed.schema.mutable", "true");
-    System.setProperty("enable.update.log", "true");
   }
 
   @Override
diff --git a/solr/core/src/test/org/apache/solr/search/TestSearcherReuse.java b/solr/core/src/test/org/apache/solr/search/TestSearcherReuse.java
index 6d75971..b47b030 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSearcherReuse.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSearcherReuse.java
@@ -117,7 +117,7 @@
       assertSearcherHasNotChanged(expectedSearcher);
 
       assertU(delI("0")); // no doc has this id, yet
-      assertU(commit());
+      assertU(commit("softCommit","true"));
       assertSearcherHasNotChanged(expectedSearcher);
 
     } finally {
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java
index c00bb6e..c190310 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java
@@ -126,18 +126,23 @@
     assertEquals("1", cache.getStatistics().get("cumulative_hits").toString());
 
     assertEquals("1 segment",
-        1, ((SolrIndexSearcher) h.getCore().getInfoRegistry().get("searcher")).getRawReader().leaves().size());
+        1, getSearcher().getRawReader().leaves().size());
+    // Get key of first leaf reader -- this one contains the match for sure.
+    Object leafKey1 = getFirstLeafReaderKey();
+
     // add new segment
     assertU(adoc("id", "3"));
+
     assertU(commit()); // sometimes merges (to one seg), sometimes won't
-    boolean newSeg =
-      (((SolrIndexSearcher)h.getCore().getInfoRegistry().get("searcher")).getRawReader().leaves().size() > 1);
 
     // can still find the same document
     assertJQ(sameReq, "/response/numFound==1", "/response/docs/[0]/id=='1'");
 
-    // when there are new segments, we accumulate another hit. This tests the cache was not blown away on commit.
-    assertEquals(newSeg ? "2" : "1", cache.getStatistics().get("cumulative_hits").toString());
+    // When there are new segments, we accumulate another hit. This tests the cache was not blown away on commit.
+    // Checking equality for the first reader's cache key indicates wether the cache should still be valid.
+    Object leafKey2 = getFirstLeafReaderKey();
+    assertEquals(leafKey1.equals(leafKey2) ? "2" : "1", cache.getStatistics().get("cumulative_hits").toString());
+
 
     // Now try to see if heatmaps work:
     assertJQ(req("q", "*:*", "facet", "true", FacetParams.FACET_HEATMAP, fieldName, "json.nl", "map"),
@@ -145,4 +150,14 @@
 
   }
 
+  protected SolrIndexSearcher getSearcher() {
+    // neat trick; needn't deal with the hassle RefCounted
+    return (SolrIndexSearcher) h.getCore().getInfoRegistry().get("searcher");
+  }
+
+
+  protected Object getFirstLeafReaderKey() {
+    return getSearcher().getRawReader().leaves().get(0).reader().getCoreCacheKey();
+  }
+
 }
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
index 9978df2..9df7453 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
@@ -174,18 +174,23 @@
   }
 
 
+  public void indexSimple(Client client) throws Exception {
+    client.deleteByQuery("*:*", null);
+    client.add(sdoc("id", "1", "cat_s", "A", "where_s", "NY", "num_d", "4", "num_i", "2", "val_b", "true", "sparse_s", "one"), null);
+    client.add(sdoc("id", "2", "cat_s", "B", "where_s", "NJ", "num_d", "-9", "num_i", "-5", "val_b", "false"), null);
+    client.add(sdoc("id", "3"), null);
+    client.commit();
+    client.add(sdoc("id", "4", "cat_s", "A", "where_s", "NJ", "num_d", "2", "num_i", "3"), null);
+    client.add(sdoc("id", "5", "cat_s", "B", "where_s", "NJ", "num_d", "11", "num_i", "7", "sparse_s", "two"),null);
+    client.commit();
+    client.add(sdoc("id", "6", "cat_s", "B", "where_s", "NY", "num_d", "-5", "num_i", "-5"),null);
+    client.commit();
+  }
+
 
   public void testStatsSimple() throws Exception {
-    assertU(delQ("*:*"));
-    assertU(add(doc("id", "1", "cat_s", "A", "where_s", "NY", "num_d", "4", "num_i", "2", "val_b", "true",      "sparse_s","one")));
-    assertU(add(doc("id", "2", "cat_s", "B", "where_s", "NJ", "num_d", "-9", "num_i", "-5", "val_b", "false")));
-    assertU(add(doc("id", "3")));
-    assertU(commit());
-    assertU(add(doc("id", "4", "cat_s", "A", "where_s", "NJ", "num_d", "2", "num_i", "3")));
-    assertU(add(doc("id", "5", "cat_s", "B", "where_s", "NJ", "num_d", "11", "num_i", "7",                      "sparse_s","two")));
-    assertU(commit());
-    assertU(add(doc("id", "6", "cat_s", "B", "where_s", "NY", "num_d", "-5", "num_i", "-5")));
-    assertU(commit());
+    Client client = Client.localClient();
+    indexSimple(client);
 
     // test multiple json.facet commands
     assertJQ(req("q", "*:*", "rows", "0"
@@ -932,6 +937,7 @@
                 ",f8:{ type:field, field:${num_i}, sort:'index desc', offset:100, numBuckets:true }" +   // test high offset
                 ",f9:{ type:field, field:${num_i}, sort:'x desc', facet:{x:'avg(${num_d})'}, missing:true, allBuckets:true, numBuckets:true }" +            // test stats
                 ",f10:{ type:field, field:${num_i}, facet:{a:{query:'${cat_s}:A'}}, missing:true, allBuckets:true, numBuckets:true }" +     // test subfacets
+                ",f11:{ type:field, field:${num_i}, facet:{a:'unique(${num_d})'} ,missing:true, allBuckets:true, sort:'a desc' }" +     // test subfacet using unique on numeric field (this previously triggered a resizing bug)
                 "}"
         )
         , "facets=={count:6 " +
@@ -945,6 +951,7 @@
             ",f8:{ buckets:[] , numBuckets:4 } " +
             ",f9:{ buckets:[{val:7,count:1,x:11.0},{val:2,count:1,x:4.0},{val:3,count:1,x:2.0},{val:-5,count:2,x:-7.0} ],  numBuckets:4, allBuckets:{count:5,x:0.6},missing:{count:1,x:0.0} } " +  // TODO: should missing exclude "x" because no values were collected?
             ",f10:{ buckets:[{val:-5,count:2,a:{count:0}},{val:2,count:1,a:{count:1}},{val:3,count:1,a:{count:1}},{val:7,count:1,a:{count:0}} ],  numBuckets:4, allBuckets:{count:5},missing:{count:1,a:{count:0}} } " +
+            ",f11:{ buckets:[{val:-5,count:2,a:2},{val:2,count:1,a:1},{val:3,count:1,a:1},{val:7,count:1,a:1} ] , missing:{count:1,a:0} , allBuckets:{count:5,a:5}  } " +
             "}"
     );
 
@@ -1037,7 +1044,33 @@
 
   }
 
+  public void testTolerant() throws Exception {
+    initServers();
+    Client client = servers.getClient(random().nextInt());
+    client.queryDefaults().set("shards", servers.getShards() + ",[ff01::114]:33332:/ignore_exception");
+    indexSimple(client);
 
+    try {
+      client.testJQ(params("ignore_exception", "true", "shards.tolerant", "false", "q", "*:*"
+              , "json.facet", "{f:{type:terms, field:cat_s}}"
+          )
+          , "facets=={ count:6," +
+              "f:{ buckets:[{val:B,count:3},{val:A,count:2}] }" +
+              "}"
+      );
+      fail("we should have failed");
+    } catch (Exception e) {
+      // ok
+    }
+
+    client.testJQ(params("ignore_exception", "true", "shards.tolerant", "true", "q", "*:*"
+            , "json.facet", "{f:{type:terms, field:cat_s}}"
+        )
+        , "facets=={ count:6," +
+            "f:{ buckets:[{val:B,count:3},{val:A,count:2}] }" +
+            "}"
+    );
+  }
 
   public void XtestPercentiles() {
     AVLTreeDigest catA = new AVLTreeDigest(100);
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
index df3244e..6e3a1b6 100644
--- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
@@ -18,18 +18,16 @@
  */
 
 import java.io.IOException;
-import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.cloud.hdfs.HdfsTestUtil;
 import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -53,34 +51,32 @@
     dfsCluster = null;
   }
   
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-  }
-  
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-  
   @Test
   public void testBasic() throws IOException {
     String uri = HdfsTestUtil.getURI(dfsCluster);
     Path lockPath = new Path(uri, "/basedir/lock");
     Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
     HdfsDirectory dir = new HdfsDirectory(lockPath, conf);
-    Lock lock = dir.makeLock("testlock");
-    boolean success = lock.obtain();
-    assertTrue("We could not get the lock when it should be available", success);
-    success = lock.obtain();
-    assertFalse("We got the lock but it should be unavailble", success);
-    lock.close();
-    success = lock.obtain();
-    assertTrue("We could not get the lock when it should be available", success);
-    success = lock.obtain();
-    assertFalse("We got the lock but it should be unavailble", success);
+    
+    try (Lock lock = dir.obtainLock("testlock")) {
+      assert lock != null;
+      try (Lock lock2 = dir.obtainLock("testlock")) {
+        assert lock2 != null;
+        fail("Locking should fail");
+      } catch (LockObtainFailedException lofe) {
+        // pass
+      }
+    }
+    // now repeat after close()
+    try (Lock lock = dir.obtainLock("testlock")) {
+      assert lock != null;
+      try (Lock lock2 = dir.obtainLock("testlock")) {
+        assert lock2 != null;
+        fail("Locking should fail");
+      } catch (LockObtainFailedException lofe) {
+        // pass
+      }
+    }
     dir.close();
   }
-  
-
-}
\ No newline at end of file
+}
diff --git a/solr/core/src/test/org/apache/solr/update/processor/UpdateRequestProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/UpdateRequestProcessorFactoryTest.java
index fe94994..101be70 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/UpdateRequestProcessorFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/UpdateRequestProcessorFactoryTest.java
@@ -20,22 +20,36 @@
 import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
 import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.util.AbstractSolrTestCase;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
 /**
  * 
  */
 public class UpdateRequestProcessorFactoryTest extends AbstractSolrTestCase {
+
+  private static org.apache.log4j.Level SAVED_LEVEL = null; // SOLR-7603 - remove
   
   @BeforeClass
   public static void beforeClass() throws Exception {
+
+    // SOLR-7603 - remove
+    SAVED_LEVEL = org.apache.log4j.LogManager.getRootLogger().getLevel();
+    org.apache.log4j.LogManager.getRootLogger().setLevel(org.apache.log4j.Level.DEBUG);
+    
     initCore("solrconfig-transformers.xml", "schema.xml");
   }
   
+  @AfterClass
+  public static void fixLogLevelAfterClass() throws Exception { // SOLR-7603 - remove
+    org.apache.log4j.LogManager.getRootLogger().setLevel(SAVED_LEVEL);
+  }
 
   public void testConfiguration() throws Exception 
   {
@@ -70,48 +84,95 @@
   }
 
   public void testUpdateDistribChainSkipping() throws Exception {
+
+    // a key part of this test is verifying that LogUpdateProcessor is found in all chains because it
+    // is a @RunAllways processor -- but in order for that to work, we have to sanity check that the log
+    // level is at least "INFO" otherwise the factory won't even produce a processor and all our assertions
+    // are for nought.  (see LogUpdateProcessorFactory.getInstance)
+    //
+    // TODO: maybe create a new mock Processor w/ @RunAlways annot if folks feel requiring INFO is evil.
+    assertTrue("Tests must be run with INFO level logging "+
+               "otherwise LogUpdateProcessor isn't used and can't be tested.",
+               LogUpdateProcessor.log.isInfoEnabled());
+    
+    final int EXPECTED_CHAIN_LENGTH = 5;
     SolrCore core = h.getCore();
     for (final String name : Arrays.asList("distrib-chain-explicit",
                                            "distrib-chain-implicit",
                                            "distrib-chain-noop")) {
 
       UpdateRequestProcessor proc;
+      List<UpdateRequestProcessor> procs;
+      
       UpdateRequestProcessorChain chain = core.getUpdateProcessingChain(name);
       assertNotNull(name, chain);
 
       // either explicitly, or because of injection
-      assertEquals(name + " chain length", 5,
+      assertEquals(name + " chain length: " + chain.toString(), EXPECTED_CHAIN_LENGTH,
                    chain.getFactories().length);
 
-      // Custom comes first in all three of our chains
+      // test a basic (non distrib) chain
       proc = chain.createProcessor(req(), new SolrQueryResponse());
-      assertTrue(name + " first processor isn't a CustomUpdateRequestProcessor: " 
-                 + proc.getClass().getName(),
-                 proc instanceof CustomUpdateRequestProcessor);
+      procs = procToList(proc);
+      assertEquals(name + " procs size: " + procs.toString(),
+                   // -1 = NoOpDistributingUpdateProcessorFactory produces no processor
+                   EXPECTED_CHAIN_LENGTH - ("distrib-chain-noop".equals(name) ? 1 : 0),
+                   procs.size());
+      
+      // Custom comes first in all three of our chains
+      assertTrue(name + " first processor isn't a CustomUpdateRequestProcessor: " + procs.toString(),
+                 ( // compare them both just because i'm going insane and the more checks the better
+                   proc instanceof CustomUpdateRequestProcessor
+                   && procs.get(0) instanceof CustomUpdateRequestProcessor));
 
-      // varies depending on chain, but definitely shouldn't be Custom
+      // Log should always come second in our chain.
+      assertNotNull(name + " proc.next is null", proc.next);
+      assertNotNull(name + " second proc is null", procs.get(1));
+
+      assertTrue(name + " second proc isn't LogUpdateProcessor: " + procs.toString(),
+                 ( // compare them both just because i'm going insane and the more checks the better
+                   proc.next instanceof LogUpdateProcessor
+                   && procs.get(1) instanceof LogUpdateProcessor));
+
+      // fetch the distributed version of this chain
       proc = chain.createProcessor(req(DISTRIB_UPDATE_PARAM, "non_blank_value"),
                                    new SolrQueryResponse());
-      assertFalse(name + " post distrib proc should not be a CustomUpdateRequestProcessor: " 
-                 + proc.getClass().getName(),
-                 proc instanceof CustomUpdateRequestProcessor);
+      procs = procToList(proc);
+      assertNotNull(name + " (distrib) chain produced null proc", proc);
+      assertFalse(name + " (distrib) procs is empty", procs.isEmpty());
 
-      int n=0;
-      boolean foundLog = false;
-      for (;;) {
-        n++;
-        if (proc instanceof LogUpdateProcessor) {
-          foundLog = true;
-        }
-        proc = proc.next;
-        if (proc == null) break;
-      }
+      // for these 3 (distrib) chains, the first proc should always be LogUpdateProcessor
+      assertTrue(name + " (distrib) first proc should be LogUpdateProcessor because of @RunAllways: "
+                 + procs.toString(),
+                 ( // compare them both just because i'm going insane and the more checks the better
+                   proc instanceof LogUpdateProcessor
+                   && procs.get(0) instanceof LogUpdateProcessor));
 
-      assertTrue( n < chain.getFactories().length );   // some processors should have been dropped
-      assertTrue( foundLog );  // make sure the marker interface was successful in keeping the log processor
+      // for these 3 (distrib) chains, the last proc should always be RunUpdateProcessor
+      assertTrue(name + " (distrib) last processor isn't a RunUpdateProcessor: " + procs.toString(),
+                 procs.get(procs.size()-1) instanceof RunUpdateProcessor );
 
+      // either 1 proc was droped in distrib mode, or 1 for the "implicit" chain
+      assertEquals(name + " (distrib) chain has wrong length: " + procs.toString(),
+                   // -1 = all chains lose CustomUpdateRequestProcessorFactory
+                   // -1 = distrib-chain-noop: NoOpDistributingUpdateProcessorFactory produces no processor
+                   // -1 = distrib-chain-implicit: does RemoveBlank before distrib
+                   EXPECTED_CHAIN_LENGTH - ( "distrib-chain-explicit".equals(name) ? 1 : 2),
+                   procs.size());
     }
 
   }
 
+  /**
+   * walks the "next" values of the proc building up a List of the procs for easier testing
+   */
+  public static List<UpdateRequestProcessor> procToList(UpdateRequestProcessor proc) {
+    List<UpdateRequestProcessor> result = new ArrayList<UpdateRequestProcessor>(7);
+    while (null != proc) {
+      result.add(proc);
+      proc = proc.next;
+    }
+    return result;
+  }
 }
+
diff --git a/solr/example/example-DIH/solr/db/conf/solrconfig.xml b/solr/example/example-DIH/solr/db/conf/solrconfig.xml
index 2b32375..0663dcf 100644
--- a/solr/example/example-DIH/solr/db/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/db/conf/solrconfig.xml
@@ -263,19 +263,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/example-DIH/solr/mail/conf/solrconfig.xml b/solr/example/example-DIH/solr/mail/conf/solrconfig.xml
index 44c1a37..e199ae7 100644
--- a/solr/example/example-DIH/solr/mail/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/mail/conf/solrconfig.xml
@@ -266,19 +266,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml b/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
index cba64b8..a2e81da 100644
--- a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
@@ -263,19 +263,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/example-DIH/solr/solr/conf/solrconfig.xml b/solr/example/example-DIH/solr/solr/conf/solrconfig.xml
index e640a4c..e70af1b 100644
--- a/solr/example/example-DIH/solr/solr/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/solr/conf/solrconfig.xml
@@ -263,19 +263,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
index 5240cf0..d94553c 100644
--- a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
@@ -264,19 +264,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/files/README.txt b/solr/example/files/README.txt
index 12affd1..680d94e 100644
--- a/solr/example/files/README.txt
+++ b/solr/example/files/README.txt
@@ -40,7 +40,7 @@
 
 		bin/solr create -c files -d example/files/conf
 
-* Now you’ve created a core called “files” using a configuration tuned for indexing and query rich text files.
+* Now you’ve created a core called “files” using a configuration tuned for indexing and querying rich text files.
 
 * You should see the following response:
 
@@ -65,6 +65,10 @@
 		<some number> files indexed.
 		COMMITting Solr index changes to http://localhost:8983/solr/files/update...
 		Time spent: <some amount of time>
+		
+* To see a list of accepted file types, do:
+  	  	bin/post -h
+	
 
 <hr>
 ##BROWSING DOCUMENTS
@@ -73,7 +77,7 @@
 
 * To view your document information in the HTML interface view, adjust the URL in your address bar to [http://localhost:8983/solr/files/browse](http://localhost:8983/solr/files/browse)
 
-* To view your document information in XML or other formats, add &wt (for writer type) to the end of that URL. i.e.:
+* To view your document information in XML or other formats, add &wt (for writer type) to the end of that URL. i.e. To view your results in xml format direct your browser to:
 	[http://localhost:8983/solr/files/browse?&wt=xml](http://localhost:8983/solr/files/browse?&wt=xml)
 
 <hr>
@@ -81,9 +85,9 @@
 
 * Another way to verify that your core has been created is to view it in the Admin User Interface.
 
-You can use the Admin_UI as a visual tool for most of the things you’ll be doing with your cores/collections in Solr.
+	- The Admin_UI serves as a visual tool for indexing and querying your index in Solr.
 
-* To access the Admin UI, go to your browser visit :
+* To access the Admin UI, go to your browser and visit :
 	[http://localhost:8983/solr/](http://localhost:8983/solr/)
 
 	- <i>The Admin UI is only accessible when Solr is running</i>
@@ -92,13 +96,14 @@
 * Alternatively, you could just go to the core page directly by visiting : [http://localhost:8983/solr/#/files](http://localhost:8983/solr/#/files)
 
 * Now you’ve opened the core page. On this page there are a multitude of different tools you can use to analyze and search your core. You will make use of these features after indexing your documents.
+* Take note of the "Num Docs" field in your core Statistics. If after indexing your documents, it shows Num Docs to be 0, that means there was a problem indexing.
 
 <hr>
 ##QUERYING INDEX
 
 * In the Admin UI, enter a term in the query box to see which documents contain the word. 
 
-* You can filter the results by switching between the different content type tabs.
+* You can filter the results by switching between the different content type tabs. To view an international version of this interface, hover over the globe icon in the top right hand section of the page.
 
 * Notice the tag cloud on the right side, which facets by top phrases extracted during indexing.
   Click on the phrases to see which documents contain them.
@@ -130,27 +135,18 @@
 
 * How can I change the /browse UI?
 
-	The primary templates are under example/files/conf/velocity.  In order to edit those files in place (without having to
-	re-create or patch a core/collection with an updated configuration), Solr can be started with a special system property
-	set to the _absolute_ path to the conf/velocity directory, like this:
-
-
-	 bin/solr start -Dvelocity.template.base.dir=</full/path/to>/example/files/conf/velocity/
+	The primary templates are under example/files/conf/velocity.  **In order to edit those files in place (without having to
+	re-create or patch a core/collection with an updated configuration)**, Solr can be started with a special system property
+	set to the _absolute_ path to the conf/velocity directory, like this: 
 	
+		bin/solr start -Dvelocity.template.base.dir=</full/path/to>/example/files/conf/velocity/
 	
-bin/solr stop
-rm -Rf server/solr/files/
+        If you want to adjust the browse templates for an existing collection, edit the core’s configuration
+        under server/solr/files/conf/velocity.
 
-# templates extracted with:
-#    unzip  -j dist/solr-velocity-*.jar velocity/* -x *.properties -d example/files/templates/
-bin/solr start -Dvelocity.template.base.dir=<absolute path to example/files/templates>
-# TODO: make it so an install dir relative path can be used somehow?
-bin/solr create_core -c files
-bin/post -c files ~/Documents
-curl http://localhost:8983/solr/files/config/params -H 'Content-type:application/json'  -d '{
-"update" : {
-  "facets": {
-    "facet.field":"content_type"
-    }
-  }
-}'
\ No newline at end of file
+
+=======
+
+* Provenance of free images used in this example:
+  - Globe icon: visualpharm.com
+  - Flag icons: freeflagicons.com
\ No newline at end of file
diff --git a/solr/example/files/browse-resources/velocity/resources.properties b/solr/example/files/browse-resources/velocity/resources.properties
new file mode 100644
index 0000000..4397bac
--- /dev/null
+++ b/solr/example/files/browse-resources/velocity/resources.properties
@@ -0,0 +1,24 @@
+# Title: "<Solr logo> Powered File Search"
+powered_file_search=Powered File Search
+
+# Search box and results
+find=Find
+submit=Submit
+page_of=Page <span class="page-num">{0}</span> of <span class="page-count">{1}</span>
+previous=previous
+next=next
+results_found_in=results found in {0}ms
+results_found=results found
+ 
+# Facets
+top_phrases=Top Phrases
+ 
+# Type labels
+type.all=All Types
+type.doc.label=Document
+type.html.label=HTML
+type.pdf.label=PDF
+type.presentation.label=Presentation
+type.spreadsheet.label=Spreadsheet
+type.text.label=text
+type.unknown=unknown
diff --git a/solr/example/files/browse-resources/velocity/resources_de_DE.properties b/solr/example/files/browse-resources/velocity/resources_de_DE.properties
new file mode 100644
index 0000000..bc5f72c
--- /dev/null
+++ b/solr/example/files/browse-resources/velocity/resources_de_DE.properties
@@ -0,0 +1,17 @@
+find=Durchsuchen
+page_of=Page <span class="page-num">{0}</span> von <span class="page-count">{1}</span>
+previous=vorherige Seite
+next=n\u00e4chste Seite
+results_found_in=Ergebnisse in {0}ms gefunden
+results_found=Ergebnisse gefunden
+powered_file_search= betriebene Dateisuche
+type.text.label=Text
+type.pdf.label=PDF
+type.html.label=HTML
+type.presentation.label=Pr\u00e4sentation
+type.doc.label=Dokument
+type.spreadsheet.label=Kalkulationstabelle
+type.unknown=unbekannt
+type.all=alle Arten
+top_phrases=Schl\u00fcssels\u00e4tze
+submit=einreichen
\ No newline at end of file
diff --git a/solr/example/files/browse-resources/velocity/resources_fr_FR.properties b/solr/example/files/browse-resources/velocity/resources_fr_FR.properties
new file mode 100644
index 0000000..049c259
--- /dev/null
+++ b/solr/example/files/browse-resources/velocity/resources_fr_FR.properties
@@ -0,0 +1,19 @@
+find=Recherche
+page_of=Page <span class="page-num">{0}</span> de <span class="page-count">{1}</span>
+previous=pr\u00e9c\u00e9dent
+next=suivant
+results_found_in=resultas ficher en {0}ms
+results_found=resultas ficher
+powered_file_search=Recherches de Fichiers
+type.text.label=Texte
+type.pdf.label=PDF
+type.html.label=HTML
+type.presentation.label=Pr\u00e9sentation
+type.doc.label=Documents
+type.spreadsheet.label=Tableur
+type.unknown=inconnu
+type.all=Tous les Types
+top_phrases=Phrases Cl\u00e9s
+submit=Recherche
+
+
diff --git a/solr/example/files/conf/email_url_types.txt b/solr/example/files/conf/email_url_types.txt
new file mode 100644
index 0000000..622b193
--- /dev/null
+++ b/solr/example/files/conf/email_url_types.txt
@@ -0,0 +1,2 @@
+<URL>
+<EMAIL>
diff --git a/solr/example/files/conf/managed-schema b/solr/example/files/conf/managed-schema
index 61c5c1c..9b1b820 100644
--- a/solr/example/files/conf/managed-schema
+++ b/solr/example/files/conf/managed-schema
@@ -398,6 +398,13 @@
   <fieldType name="tlong" class="solr.TrieLongField" positionIncrementGap="0" precisionStep="8"/>
   <fieldType name="tlongs" class="solr.TrieLongField" positionIncrementGap="0" multiValued="true" precisionStep="8"/>
 
+  <fieldType name="text_email_url" class="solr.TextField">
+    <analyzer>
+      <tokenizer class="solr.UAX29URLEmailTokenizerFactory"/>
+      <filter class="solr.TypeTokenFilterFactory" types="email_url_types.txt" useWhitelist="true"/>
+    </analyzer>
+  </fieldType>
+
   <fieldType name="text_shingles" class="solr.TextField" positionIncrementGap="100" multiValued="true">
     <analyzer type="index">
       <tokenizer class="solr.StandardTokenizerFactory"/>
diff --git a/solr/example/files/conf/params.json b/solr/example/files/conf/params.json
index 2c608b2..d8986cc 100644
--- a/solr/example/files/conf/params.json
+++ b/solr/example/files/conf/params.json
@@ -18,6 +18,7 @@
     "type_fq":"{!field f=doc_type v=$type}",
     "hl":"on",
     "hl.fl":"content",
+    "v.locale":"${locale}",
     "debug":"true",
     "":{"v":0}},
   "velocity":{
diff --git a/solr/example/files/conf/solrconfig.xml b/solr/example/files/conf/solrconfig.xml
index 1ea6484..f6b68b91 100644
--- a/solr/example/files/conf/solrconfig.xml
+++ b/solr/example/files/conf/solrconfig.xml
@@ -82,6 +82,7 @@
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
 
   <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib path="${solr.install.dir:../../../..}/example/files/browse-resources"/>
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
   <!-- an exact 'path' can be used instead of a 'dir' to specify a 
        specific jar file.  This will cause a serious error to be logged 
@@ -243,19 +244,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
diff --git a/solr/example/files/conf/update-script.js b/solr/example/files/conf/update-script.js
index 7e6069e..d2ac002 100644
--- a/solr/example/files/conf/update-script.js
+++ b/solr/example/files/conf/update-script.js
@@ -57,17 +57,27 @@
         break;
     }
 
-
     // TODO: error handling needed?   What if there is no slash?
     if(doc_type) { doc.setField("doc_type", doc_type); }
     doc.setField("content_type_type_s", ct_type);
     doc.setField("content_type_subtype_s", ct_subtype);
-
-// doc, image, unknown, ...
-    // application/pdf => doc
-    // application/msword => doc
-    // image/* => image
   }
+
+    var analyzer =
+         req.getCore().getLatestSchema()
+         .getFieldTypeByName("text_email_url")
+         .getIndexAnalyzer();
+
+  var token_stream =
+       analyzer.tokenStream("content", new java.io.StringReader(doc.getFieldValue("content")));
+  var term_att = token_stream.getAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute.class);
+  var type_att = token_stream.getAttribute(org.apache.lucene.analysis.tokenattributes.TypeAttribute.class);
+  token_stream.reset();
+  while (token_stream.incrementToken()) {
+    doc.addField(type_att.type().replace(/\<|\>/g,'').toLowerCase()+"_ss", term_att.toString());
+  }
+  token_stream.end();
+  token_stream.close();
 }
 
 function processDelete(cmd) {
diff --git a/solr/example/files/conf/velocity/browse.vm b/solr/example/files/conf/velocity/browse.vm
index 7267e88..1679c98 100644
--- a/solr/example/files/conf/velocity/browse.vm
+++ b/solr/example/files/conf/velocity/browse.vm
@@ -2,12 +2,13 @@
   <form id="query-form" action="#{url_for_home}" method="GET">
     $resource.find:
     <input type="text" id="q" name="q" style="width: 50%" value="$!esc.html($request.params.get('q'))"/>
-    <input type="submit"/>
+    <input type="submit" value="$resource.submit"/>
     <div id="debug_query" class="debug">
       <span id="parsed_query">$esc.html($response.response.debug.parsedquery)</span>
     </div>
 
     <input type="hidden" name="type" value="#current_type"/>
+    #if("#current_locale"!="")<input type="hidden" value="locale" value="#current_locale"/>#end
     #foreach($fq in $response.responseHeader.params.getAll("fq"))
       <input type="hidden" name="fq" id="allFQs" value="$esc.html($fq)"/>
     #end
@@ -33,7 +34,7 @@
 
   <div id="results_list">
     <div class="pagination">
-      <span class="results-found">$page.results_found</span> results found in ${response.responseHeader.QTime}ms
+      <span class="results-found">$page.results_found</span> $resource.results_found_in.insert(${response.responseHeader.QTime})
       $resource.page_of.insert($page.current_page_number,$page.page_count)
     </div>
 
@@ -41,7 +42,7 @@
 
     <div class="pagination">
       #link_to_previous_page
-      <span class="results-found">$page.results_found</span> results found.
+      <span class="results-found">$page.results_found</span> $resource.results_found.
       $resource.page_of.insert($page.current_page_number,$page.page_count)
       #link_to_next_page
     </div>
diff --git a/solr/example/files/conf/velocity/dropit.js b/solr/example/files/conf/velocity/dropit.js
new file mode 100644
index 0000000..3094414
--- /dev/null
+++ b/solr/example/files/conf/velocity/dropit.js
@@ -0,0 +1,97 @@
+/*
+ * Dropit v1.1.0
+ * http://dev7studios.com/dropit
+ *
+ * Copyright 2012, Dev7studios
+ * Free to use and abuse under the MIT license.
+ * http://www.opensource.org/licenses/mit-license.php
+ */
+
+;(function($) {
+
+    $.fn.dropit = function(method) {
+
+        var methods = {
+
+            init : function(options) {
+                this.dropit.settings = $.extend({}, this.dropit.defaults, options);
+                return this.each(function() {
+                    var $el = $(this),
+                         el = this,
+                         settings = $.fn.dropit.settings;
+
+                    // Hide initial submenus
+                    $el.addClass('dropit')
+                    .find('>'+ settings.triggerParentEl +':has('+ settings.submenuEl +')').addClass('dropit-trigger')
+                    .find(settings.submenuEl).addClass('dropit-submenu').hide();
+
+                    // Open on click
+                    $el.off(settings.action).on(settings.action, settings.triggerParentEl +':has('+ settings.submenuEl +') > '+ settings.triggerEl +'', function(){
+                        // Close click menu's if clicked again
+                        if(settings.action == 'click' && $(this).parents(settings.triggerParentEl).hasClass('dropit-open')){
+                            settings.beforeHide.call(this);
+                            $(this).parents(settings.triggerParentEl).removeClass('dropit-open').find(settings.submenuEl).hide();
+                            settings.afterHide.call(this);
+                            return false;
+                        }
+
+                        // Hide open menus
+                        settings.beforeHide.call(this);
+                        $('.dropit-open').removeClass('dropit-open').find('.dropit-submenu').hide();
+                        settings.afterHide.call(this);
+
+                        // Open this menu
+                        settings.beforeShow.call(this);
+                        $(this).parents(settings.triggerParentEl).addClass('dropit-open').find(settings.submenuEl).show();
+                        settings.afterShow.call(this);
+
+                        return false;
+                    });
+
+                    // Close if outside click
+                    $(document).on('click', function(){
+                        settings.beforeHide.call(this);
+                        $('.dropit-open').removeClass('dropit-open').find('.dropit-submenu').hide();
+                        settings.afterHide.call(this);
+                    });
+
+                    // If hover
+                    if(settings.action == 'mouseenter'){
+                        $el.on('mouseleave', '.dropit-open', function(){
+                            settings.beforeHide.call(this);
+                            $(this).removeClass('dropit-open').find(settings.submenuEl).hide();
+                            settings.afterHide.call(this);
+                        });
+                    }
+
+                    settings.afterLoad.call(this);
+                });
+            }
+
+        };
+
+        if (methods[method]) {
+            return methods[method].apply(this, Array.prototype.slice.call(arguments, 1));
+        } else if (typeof method === 'object' || !method) {
+            return methods.init.apply(this, arguments);
+        } else {
+            $.error( 'Method "' +  method + '" does not exist in dropit plugin!');
+        }
+
+    };
+
+    $.fn.dropit.defaults = {
+        action: 'mouseenter', // The open action for the trigger
+        submenuEl: 'ul', // The submenu element
+        triggerEl: 'a', // The trigger element
+        triggerParentEl: 'li', // The trigger parent element
+        afterLoad: function(){}, // Triggers when plugin has loaded
+        beforeShow: function(){}, // Triggers before submenu is shown
+        afterShow: function(){}, // Triggers after submenu is shown
+        beforeHide: function(){}, // Triggers before submenu is hidden
+        afterHide: function(){} // Triggers before submenu is hidden
+    };
+
+    $.fn.dropit.settings = {};
+
+})(jQuery);
diff --git a/solr/example/files/conf/velocity/facet_text_shingles.vm b/solr/example/files/conf/velocity/facet_text_shingles.vm
index 86ee400..4375bd2 100644
--- a/solr/example/files/conf/velocity/facet_text_shingles.vm
+++ b/solr/example/files/conf/velocity/facet_text_shingles.vm
@@ -1,5 +1,5 @@
 <div id="facet_$field.name">
-  <span class="facet-field">Top Phrases</span><br/>
+  <span class="facet-field">$resource.top_phrases</span><br/>
 
   <ul id="tagcloud">
     #foreach($facet in $sort.sort($field.values,"name"))
diff --git a/solr/example/files/conf/velocity/head.vm b/solr/example/files/conf/velocity/head.vm
index 9e49b12..7ce8979 100644
--- a/solr/example/files/conf/velocity/head.vm
+++ b/solr/example/files/conf/velocity/head.vm
@@ -9,13 +9,19 @@
 
 <script type="text/javascript" src="#{url_root}/js/lib/jquery-1.7.2.min.js"></script>
 <script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/jquery.tx3-tag-cloud.js&contentType=text/javascript"></script>
+   <script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/dropit.js&contentType=text/javascript"></script>
 
 <script type="text/javascript">
   $(document).ready(function(){
     $("#tagcloud").tx3TagCloud({
       multiplier: 5
     });
+
+
+     $('.menu').dropit();
   });
+
+
 </script>
 
 <style>
@@ -33,6 +39,11 @@
     font-size: 20pt;
   }
 
+  #header2{
+
+  margin-left:1200px;
+  }
+
   #logo {
     width: 115px;
     margin: 0px 0px 0px 0px;
@@ -42,6 +53,9 @@
   a {
     color: #305CB3;
   }
+  a.hidden{
+  display:none;
+  }
 
   em {
     color: #FF833D;
@@ -187,4 +201,22 @@
     transition: color 250ms linear;
   }
 
+  .dropit {
+  list-style: none;
+  padding: 0;
+  margin: 0;
+  }
+  .dropit .dropit-trigger { position: relative; }
+  .dropit .dropit-submenu {
+  position: absolute;
+  top: 100%;
+  left: 0; /* dropdown left or right */
+  z-index: 1000;
+  display: none;
+  min-width: 150px;
+  list-style: none;
+  padding: 0;
+  margin: 0;
+  }
+  .dropit .dropit-open .dropit-submenu { display: block; }
 </style>
\ No newline at end of file
diff --git a/solr/example/files/conf/velocity/img/english_640.png b/solr/example/files/conf/velocity/img/english_640.png
new file mode 100644
index 0000000..81256a1
--- /dev/null
+++ b/solr/example/files/conf/velocity/img/english_640.png
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/france_640.png b/solr/example/files/conf/velocity/img/france_640.png
new file mode 100644
index 0000000..16d4541
--- /dev/null
+++ b/solr/example/files/conf/velocity/img/france_640.png
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/germany_640.png b/solr/example/files/conf/velocity/img/germany_640.png
new file mode 100644
index 0000000..f5d6ae8
--- /dev/null
+++ b/solr/example/files/conf/velocity/img/germany_640.png
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/globe_256.png b/solr/example/files/conf/velocity/img/globe_256.png
new file mode 100644
index 0000000..514597b
--- /dev/null
+++ b/solr/example/files/conf/velocity/img/globe_256.png
Binary files differ
diff --git a/solr/example/files/conf/velocity/layout.vm b/solr/example/files/conf/velocity/layout.vm
index 9a5153d..ef6caf7 100644
--- a/solr/example/files/conf/velocity/layout.vm
+++ b/solr/example/files/conf/velocity/layout.vm
@@ -4,9 +4,25 @@
 </head>
   <body>
     <div id="header">
-      <a href="#url_for_home"><img src="#{url_root}/img/solr.svg" id="logo" title="Solr"/></a> Powered File Search
+      <a href="#url_for_home"><img src="#{url_root}/img/solr.svg" id="logo" title="Solr"/></a> $resource.powered_file_search
     </div>
 
+    <div id="header2" onclick="javascript:locale_select()">
+      <ul class="menu">
+
+        <li>
+          <a href="#"><img src="#{url_for_solr}/admin/file?file=/velocity/img/globe_256.png&contentType=image/png" id="locale_pic" title="locale_select" width="30px" height="27px"/></a>
+          <ul>
+            <li><a href="#url_for_locale('fr_FR')" #if("#current_locale"=="fr_FR")class="hidden"#end>
+              <img src="#{url_for_solr}/admin/file?file=/velocity/img/france_640.png&contentType=image/png" id="french_flag"  width="40px" height="40px"/>Fran&ccedil;ais</a></li>
+            <li><a href="#url_for_locale('de_DE')" #if("#current_locale"=="de_DE")class="hidden"#end>
+              <img src="#{url_for_solr}/admin/file?file=/velocity/img/germany_640.png&contentType=image/png" id="german_flag"  width="40px" height="40px"/>Deutsch</a></li>
+            <li><a href="#url_for_locale('')" #if("#current_locale"=="")class="hidden"#end>
+              <img src="#{url_for_solr}/admin/file?file=/velocity/img/english_640.png&contentType=image/png" id="english_flag"  width="40px" height="40px"/>English</a></li>
+          </ul>
+        </li>
+      </ul>
+    </div>
 
     #if($response.response.error.code)
       <div class="error">
diff --git a/solr/example/files/conf/velocity/macros.vm b/solr/example/files/conf/velocity/macros.vm
index 92a82d4..d2bdb69 100644
--- a/solr/example/files/conf/velocity/macros.vm
+++ b/solr/example/files/conf/velocity/macros.vm
@@ -1,7 +1,7 @@
 #macro(lensNoQ)?#if($list.size($response.responseHeader.params.getAll("fq")) > 0)&#fqs($response.responseHeader.params.getAll("fq"))#end#sort($request.params.getParams('sort'))#end
 
 ## lens modified for example/files - to use fq from responseHeader rather than request, and #debug removed too as it is built into browse params now, also added type to lens
-#macro(lens)#lensNoQ#q&type=#current_type#end
+#macro(lens)#lensNoQ#q&type=#current_type#if("#current_locale"!="")&locale=#current_locale#end#end
 
 
 # TODO: make this parameterized fully, no context sensitivity
@@ -44,5 +44,7 @@
 #end
 
 ## Macros defined custom for the "files" example
-#macro(url_for_type $type)#url_for_home#lensNoQ#q&type=$type#end
+#macro(url_for_type $type)#url_for_home#lensNoQ#q&type=$type#if("#current_locale"!="")&locale=#current_locale#end#end
 #macro(current_type)#if($response.responseHeader.params.type)${response.responseHeader.params.type}#{else}all#end#end
+#macro(url_for_locale $locale)#url_for_home#lensNoQ#q&type=#current_type#if($locale!="")&locale=$locale#end#end
+#macro(current_locale)$!{response.responseHeader.params.locale}#end
diff --git a/solr/example/files/conf/velocity/resources.properties b/solr/example/files/conf/velocity/resources.properties
deleted file mode 100644
index dff221c..0000000
--- a/solr/example/files/conf/velocity/resources.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-find=Find
-page_of=Page <span class="page-num">{0}</span> of <span class="page-count">{1}</span>
-previous=previous
-next=next
-
-
diff --git a/solr/example/files/conf/velocity/results_list.vm b/solr/example/files/conf/velocity/results_list.vm
index 774f106..dd1119a 100644
--- a/solr/example/files/conf/velocity/results_list.vm
+++ b/solr/example/files/conf/velocity/results_list.vm
@@ -1,11 +1,11 @@
 <ul id="tabs">
-  <li><a href="#url_for_type('all')" #if("#current_type"=="all")class="selected"#end>All Types ($response.response.facet_counts.facet_queries.all_types)</a></li>
+  <li><a href="#url_for_type('all')" #if("#current_type"=="all")class="selected"#end>$resource.type.all ($response.response.facet_counts.facet_queries.all_types)</a></li>
   #foreach($type in $response.response.facet_counts.facet_fields.doc_type)
     #if($type.key)
-      <li><a href="#url_for_type($type.key)" #if("#current_type"==$type.key)class="selected"#end>$type.key ($type.value)</a></li>
+      <li><a href="#url_for_type($type.key)" #if("#current_type"==$type.key)class="selected"#end> #if($resource.get("type.${type.key}.label"))$resource.get("type.${type.key}.label")#else$type.key#end ($type.value)</a></li>
     #else
       #if($type.value > 0)
-        <li><a href="#url_for_type('unknown')" #if("#current_type"=="unknown")class="selected"#end>Unknown ($type.value)</a></li>
+        <li><a href="#url_for_type('unknown')" #if("#current_type"=="unknown")class="selected"#end>$resource.type.unknown ($type.value)</a></li>
       #end
     #end
   #end
@@ -17,3 +17,5 @@
     #parse("hit.vm")
   #end
 </div>
+
+
diff --git a/solr/server/resources/log4j.properties b/solr/server/resources/log4j.properties
index 1f2916a..2465eae 100644
--- a/solr/server/resources/log4j.properties
+++ b/solr/server/resources/log4j.properties
@@ -4,8 +4,8 @@
 
 log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
 
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-4r [%t] %-5p %c %x [%X{collection} %X{shard} %X{replica} %X{core}] \u2013 %m%n
+log4j.appender.CONSOLE.layout=org.apache.log4j.EnhancedPatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%-4r %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n
 
 #- size rotation with log cleanup.
 log4j.appender.file=org.apache.log4j.RollingFileAppender
@@ -14,8 +14,8 @@
 
 #- File to log to and log format
 log4j.appender.file.File=${solr.log}/solr.log
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; [%X{collection} %X{shard} %X{replica} %X{core}] %C; %m\n
+log4j.appender.file.layout=org.apache.log4j.EnhancedPatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m\n
 
 log4j.logger.org.apache.zookeeper=WARN
 log4j.logger.org.apache.hadoop=WARN
diff --git a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
index 8e50a0e..58dea24 100644
--- a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
@@ -157,7 +157,7 @@
     -->
     <updateLog>
       <str name="dir">${solr.ulog.dir:}</str>
-      <int name="">${solr.ulog.numVersionBuckets:65536}</int>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
     </updateLog>
  
     <!-- AutoCommit
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
index 21392b6..ea0bf38 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
@@ -244,19 +244,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
@@ -335,7 +322,7 @@
     -->
     <updateLog>
       <str name="dir">${solr.ulog.dir:}</str>
-      <int name="">${solr.ulog.numVersionBuckets:65536}</int>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
     </updateLog>
 
     <!-- AutoCommit
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
index 24c8b42..967669f 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
@@ -246,19 +246,6 @@
     -->
     <lockType>${solr.lock.type:native}</lockType>
 
-    <!-- Unlock On Startup
-
-         If true, unlock any held write or commit locks on startup.
-         This defeats the locking mechanism that allows multiple
-         processes to safely access a lucene index, and should be used
-         with care. Default is "false".
-
-         This is not needed if lock type is 'single'
-     -->
-    <!--
-    <unlockOnStartup>false</unlockOnStartup>
-      -->
-
     <!-- Commit Deletion Policy
          Custom deletion policies can be specified here. The class must
          implement org.apache.lucene.index.IndexDeletionPolicy.
@@ -338,7 +325,7 @@
     -->
     <updateLog>
       <str name="dir">${solr.ulog.dir:}</str>
-      <int name="">${solr.ulog.numVersionBuckets:65536}</int>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
     </updateLog>
  
     <!-- AutoCommit
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index f036407..921c7a9 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@ -1068,7 +1068,7 @@
         for (String s : collectionNames) {
           if(s!=null) collectionStateCache.remove(s);
         }
-        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Not enough nodes to handle the request");
+        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Could not find a healthy node to handle the request.");
       }
 
       Collections.shuffle(theUrlList, rand);
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index a08e3a3..7328830 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -329,6 +329,7 @@
               // update volatile
               ZkStateReader.this.clusterState = constructState(ln, thisWatch);
             }
+            log.info("Updated cluster state version to " + ZkStateReader.this.clusterState.getZkClusterStateVersion());
           } catch (KeeperException e) {
             if (e.code() == KeeperException.Code.SESSIONEXPIRED
                 || e.code() == KeeperException.Code.CONNECTIONLOSS) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java b/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
index 55b183d..ba9db74 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
@@ -1,5 +1,7 @@
 package org.apache.solr.common.util;
 
+import java.util.Collection;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -35,42 +37,37 @@
 public class ExecutorUtil {
   public static Logger log = LoggerFactory.getLogger(ExecutorUtil.class);
   
+  // this will interrupt the threads! Lucene and Solr do not like this because it can close channels, so only use
+  // this if you know what you are doing - you probably want shutdownAndAwaitTermination
   public static void shutdownNowAndAwaitTermination(ExecutorService pool) {
     pool.shutdown(); // Disable new tasks from being submitted
-    pool.shutdownNow(); // Cancel currently executing tasks
+    pool.shutdownNow(); // Cancel currently executing tasks  - NOTE: this interrupts!
     boolean shutdown = false;
     while (!shutdown) {
       try {
         // Wait a while for existing tasks to terminate
-        shutdown = pool.awaitTermination(5, TimeUnit.SECONDS);
+        shutdown = pool.awaitTermination(1, TimeUnit.SECONDS);
       } catch (InterruptedException ie) {
         // Preserve interrupt status
         Thread.currentThread().interrupt();
       }
       if (!shutdown) {
-        pool.shutdownNow(); // Cancel currently executing tasks
+        pool.shutdownNow(); // Cancel currently executing tasks - NOTE: this interrupts!
       }
     }
   }
-  
-  public static void shutdownAndAwaitTermination(ExecutorService pool) {
-    shutdownAndAwaitTermination(pool, 60, TimeUnit.SECONDS);
-  }
 
-  public static void shutdownAndAwaitTermination(ExecutorService pool, long timeout, TimeUnit timeUnit) {
+  public static void shutdownAndAwaitTermination(ExecutorService pool) {
     pool.shutdown(); // Disable new tasks from being submitted
     boolean shutdown = false;
     while (!shutdown) {
       try {
         // Wait a while for existing tasks to terminate
-        shutdown = pool.awaitTermination(timeout, timeUnit);
+        shutdown = pool.awaitTermination(1, TimeUnit.SECONDS);
       } catch (InterruptedException ie) {
         // Preserve interrupt status
         Thread.currentThread().interrupt();
       }
-      if (!shutdown) {
-        pool.shutdownNow(); // Cancel currently executing tasks
-      }
     }
   }
 
@@ -128,8 +125,19 @@
     @Override
     public void execute(final Runnable command) {
       final Map<String, String> submitterContext = MDC.getCopyOfContextMap();
-      String ctxStr = submitterContext != null && !submitterContext.isEmpty() ?
-          submitterContext.toString().replace("/", "//") : "";
+      StringBuilder contextString = new StringBuilder();
+      if (submitterContext != null) {
+        Collection<String> values = submitterContext.values();
+        
+        for (String value : values) {
+          contextString.append(value + " ");
+        }
+        if (contextString.length() > 1) {
+          contextString.setLength(contextString.length() - 1);
+        }
+      }
+      
+      String ctxStr = contextString.toString().replace("/", "//");
       final String submitterContextStr = ctxStr.length() <= MAX_THREAD_NAME_LEN ? ctxStr : ctxStr.substring(0, MAX_THREAD_NAME_LEN);
       final Exception submitterStackTrace = new Exception("Submitter stack trace");
       super.execute(new Runnable() {
diff --git a/solr/solrj/src/test-files/log4j.properties b/solr/solrj/src/test-files/log4j.properties
index 9b74a5f..86446e9 100644
--- a/solr/solrj/src/test-files/log4j.properties
+++ b/solr/solrj/src/test-files/log4j.properties
@@ -3,8 +3,28 @@
 
 log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
 log4j.appender.CONSOLE.Target=System.err
-log4j.appender.CONSOLE.layout=org.apache.solr.util.SolrLogLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
-
+log4j.appender.CONSOLE.layout=org.apache.log4j.EnhancedPatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n
 log4j.logger.org.apache.zookeeper=WARN
 log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.directory=WARN
+log4j.logger.org.apache.solr.hadoop=INFO
+
+#log4j.logger.org.apache.solr.update.processor.LogUpdateProcessor=DEBUG
+#log4j.logger.org.apache.solr.update.processor.DistributedUpdateProcessor=DEBUG
+#log4j.logger.org.apache.solr.update.PeerSync=DEBUG
+#log4j.logger.org.apache.solr.core.CoreContainer=DEBUG
+#log4j.logger.org.apache.solr.cloud.RecoveryStrategy=DEBUG
+#log4j.logger.org.apache.solr.cloud.SyncStrategy=DEBUG
+#log4j.logger.org.apache.solr.handler.admin.CoreAdminHandler=DEBUG
+#log4j.logger.org.apache.solr.cloud.ZkController=DEBUG
+#log4j.logger.org.apache.solr.update.DefaultSolrCoreState=DEBUG
+#log4j.logger.org.apache.solr.common.cloud.ConnectionManager=DEBUG
+#log4j.logger.org.apache.solr.update.UpdateLog=DEBUG
+#log4j.logger.org.apache.solr.cloud.ChaosMonkey=DEBUG
+#log4j.logger.org.apache.solr.update.TransactionLog=DEBUG
+#log4j.logger.org.apache.solr.handler.ReplicationHandler=DEBUG
+#log4j.logger.org.apache.solr.handler.IndexFetcher=DEBUG
+
+#log4j.logger.org.apache.solr.common.cloud.ClusterStateUtil=DEBUG
+#log4j.logger.org.apache.solr.cloud.OverseerAutoReplicaFailoverThread=DEBUG
\ No newline at end of file
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 0d0869a..b4ca409 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -84,14 +84,12 @@
 
   @BeforeClass
   public static void beforeSuperClass() {
-      AbstractZkTestCase.SOLRHOME = new File(SOLR_HOME());
+    // this is necessary because AbstractZkTestCase.buildZooKeeper is used by AbstractDistribZkTestBase
+    // and the auto-detected SOLRHOME=TEST_HOME() does not exist for solrj tests
+    // todo fix this
+    AbstractZkTestCase.SOLRHOME = new File(SOLR_HOME());
   }
-  
-  @AfterClass
-  public static void afterSuperClass() {
-    
-  }
-  
+
   protected String getCloudSolrConfig() {
     return "solrconfig.xml";
   }
@@ -105,15 +103,6 @@
     return SOLR_HOME;
   }
   
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    // we expect this time of exception as shards go up and down...
-    //ignoreException(".*");
-    
-    System.setProperty("numShards", Integer.toString(sliceCount));
-  }
-  
   public CloudSolrClientTest() {
     super();
     sliceCount = 2;
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index f56a2c5..5d3ec61 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -17,9 +17,41 @@
 
 package org.apache.solr;
 
-import com.carrotsearch.randomizedtesting.RandomizedContext;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Level;
+
+import javax.xml.xpath.XPathExpressionException;
+
 import org.apache.commons.codec.Charsets;
 import org.apache.commons.io.FileUtils;
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -55,7 +87,6 @@
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.core.SolrXmlConfig;
-import org.apache.solr.core.ZkContainer;
 import org.apache.solr.handler.UpdateRequestHandler;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
@@ -83,42 +114,9 @@
 import org.slf4j.LoggerFactory;
 import org.xml.sax.SAXException;
 
-import javax.xml.xpath.XPathExpressionException;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Reader;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.lang.annotation.Documented;
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Inherited;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-import java.net.URL;
-import java.nio.charset.Charset;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-import java.util.logging.ConsoleHandler;
-import java.util.logging.Handler;
-import java.util.logging.Level;
-import java.util.regex.Pattern;
-
-import static com.google.common.base.Preconditions.checkNotNull;
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 
 /**
  * A junit4 Solr test harness that extends LuceneTestCaseJ4. To change which core is used when loading the schema and solrconfig.xml, simply
@@ -209,7 +207,6 @@
     System.setProperty("enable.update.log", usually() ? "true" : "false");
     System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong()));
     System.setProperty("solr.clustering.enabled", "false");
-    setupLogging();
     startTrackingSearchers();
     ignoreException("ignore_exception");
     newRandomConfig();
@@ -229,13 +226,18 @@
     try {
       deleteCore();
       resetExceptionIgnores();
-      endTrackingSearchers();
-      String orr = ObjectReleaseTracker.clearObjectTrackerAndCheckEmpty();
-      if (!RandomizedContext.current().getTargetClass().isAnnotationPresent(SuppressObjectReleaseTracker.class)) {
-        assertNull(orr, orr);
-      } else {
-        if (orr != null) {
-          log.warn("Some resources were not closed, shutdown, or released. This has been ignored due to the SuppressObjectReleaseTracker annotation.");
+      
+      if (suiteFailureMarker.wasSuccessful()) {
+        // if the tests passed, make sure everything was closed / released
+        endTrackingSearchers();
+        String orr = ObjectReleaseTracker.clearObjectTrackerAndCheckEmpty();
+        if (!RandomizedContext.current().getTargetClass().isAnnotationPresent(SuppressObjectReleaseTracker.class)) {
+          assertNull(orr, orr);
+        } else {
+          if (orr != null) {
+            log.warn(
+                "Some resources were not closed, shutdown, or released. This has been ignored due to the SuppressObjectReleaseTracker annotation.");
+          }
         }
       }
       resetFactory();
@@ -396,33 +398,6 @@
     super.tearDown();
   }
 
-  public static SolrLogFormatter formatter;
-
-  public static void setupLogging() {
-    boolean register = false;
-    Handler[] handlers = java.util.logging.Logger.getLogger("").getHandlers();
-    ConsoleHandler consoleHandler = null;
-    for (Handler handler : handlers) {
-      if (handler instanceof ConsoleHandler) {
-        consoleHandler = (ConsoleHandler)handler;
-        break;
-      }
-    }
-
-    if (consoleHandler == null) {
-      consoleHandler = new ConsoleHandler();
-      register = true;
-    }
-
-    consoleHandler.setLevel(Level.ALL);
-    formatter = new SolrLogFormatter();
-    consoleHandler.setFormatter(formatter);
-
-    if (register) {
-      java.util.logging.Logger.getLogger("").addHandler(consoleHandler);
-    }
-  }
-
   public static void setLoggingLevel(Level level) {
     java.util.logging.Logger logger = java.util.logging.Logger.getLogger("");
     logger.setLevel(level);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 6609256..935b209 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -17,9 +17,6 @@
  * limitations under the License.
  */
 
-import static org.apache.solr.cloud.OverseerCollectionProcessor.*;
-import static org.apache.solr.common.cloud.ZkNodeProps.*;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.ServerSocket;
@@ -82,6 +79,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.cloud.OverseerCollectionProcessor.CREATE_NODE_SET;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.SHARDS_PROP;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+
 /**
  * TODO: we should still test this works as a custom update chain as well as
  * what we test now - the default update chain
@@ -92,8 +94,6 @@
 
   @BeforeClass
   public static void beforeFullSolrCloudTest() {
-    // shorten the log output more for this test type
-    if (formatter != null) formatter.setShorterFormat();
   }
 
   public static final String SHARD1 = "shard1";
@@ -119,7 +119,6 @@
 
   protected Map<String,CloudJettyRunner> shardToLeaderJetty = new HashMap<>();
   private boolean cloudInit;
-  protected boolean checkCreatedVsState;
   protected boolean useJettyDataDir = true;
 
   protected Map<URI,SocketProxy> proxies = new HashMap<>();
@@ -252,10 +251,8 @@
     CloudSolrClient client = new CloudSolrClient(zkServer.getZkAddress(), random().nextBoolean());
     client.setParallelUpdates(random().nextBoolean());
     if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
-    client.getLbClient().getHttpClient().getParams()
-        .setParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 30000);
-    client.getLbClient().getHttpClient().getParams()
-    .setParameter(CoreConnectionPNames.SO_TIMEOUT, 60000);
+    client.getLbClient().setConnectionTimeout(30000);
+    client.getLbClient().setSoTimeout(60000);
     return client;
   }
 
@@ -306,7 +303,7 @@
 
     initCloud();
 
-    createJettys(numServers, checkCreatedVsState).size();
+    createJettys(numServers);
 
     int cnt = getTotalReplicas(DEFAULT_COLLECTION);
     if (cnt > 0) {
@@ -337,10 +334,6 @@
     }
   }
 
-  protected List<JettySolrRunner> createJettys(int numJettys) throws Exception {
-    return createJettys(numJettys, false);
-  }
-
   protected String defaultStateFormat = String.valueOf( 1 + random().nextInt(2));
 
   protected String getStateFormat()  {
@@ -351,13 +344,7 @@
     return defaultStateFormat; // random
   }
 
-  /**
-   * @param checkCreatedVsState
-   *          if true, make sure the number created (numJettys) matches the
-   *          number in the cluster state - if you add more jetties this may not
-   *          be the case
-   */
-  protected List<JettySolrRunner> createJettys(int numJettys, boolean checkCreatedVsState) throws Exception {
+  protected List<JettySolrRunner> createJettys(int numJettys) throws Exception {
     List<JettySolrRunner> jettys = new ArrayList<>();
     List<SolrClient> clients = new ArrayList<>();
     StringBuilder sb = new StringBuilder();
@@ -394,26 +381,24 @@
     this.clients.addAll(clients);
 
     int numShards = getTotalReplicas(DEFAULT_COLLECTION);
-    if (checkCreatedVsState) {
-      // now wait until we see that the number of shards in the cluster state
-      // matches what we expect
-      int retries = 0;
-      while (numShards != getShardCount()) {
-        numShards = getTotalReplicas(DEFAULT_COLLECTION);
-        if (numShards == getShardCount()) break;
-        if (retries++ == 60) {
-          printLayoutOnTearDown = true;
-          fail("Shards in the state does not match what we set:" + numShards
-              + " vs " + getShardCount());
-        }
-        Thread.sleep(500);
-      }
 
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-      // also make sure we have a leader for each shard
-      for (int i = 1; i <= sliceCount; i++) {
-        zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + i, 10000);
+    // now wait until we see that the number of shards in the cluster state
+    // matches what we expect
+    int retries = 0;
+    while (numShards != getShardCount()) {
+      numShards = getTotalReplicas(DEFAULT_COLLECTION);
+      if (numShards == getShardCount()) break;
+      if (retries++ == 60) {
+        printLayoutOnTearDown = true;
+        fail("Shards in the state does not match what we set:" + numShards + " vs " + getShardCount());
       }
+      Thread.sleep(500);
+    }
+    
+    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    // make sure we have a leader for each shard
+    for (int i = 1; i <= sliceCount; i++) {
+      zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + i, 10000);
     }
 
     if (numShards > 0) {
@@ -434,42 +419,6 @@
   }
 
 
-  protected SolrClient startCloudJetty(String collection, String shard) throws Exception {
-    // TODO: use the collection string!!!!
-    collection = DEFAULT_COLLECTION;
-
-    int totalReplicas = getTotalReplicas(collection);
-
-
-    int cnt = this.jettyIntCntr.incrementAndGet();
-
-    File jettyDir = createTempDir("jetty").toFile();
-    jettyDir.mkdirs();
-    setupJettySolrHome(jettyDir);
-    JettySolrRunner j = createJetty(jettyDir, testDir + "/jetty" + cnt, shard, "solrconfig.xml", null);
-    jettys.add(j);
-    SolrClient client = createNewSolrClient(j.getLocalPort());
-    clients.add(client);
-
-    int retries = 60;
-    while (--retries >= 0) {
-      // total replicas changed.. assume it was us
-      if (getTotalReplicas(collection) != totalReplicas) {
-       break;
-      }
-      Thread.sleep(500);
-    }
-
-    if (retries <= 0) {
-      fail("Timeout waiting for " + j + " to appear in clusterstate");
-      printLayout();
-    }
-
-    updateMappingsFromZk(this.jettys, this.clients);
-    return client;
-  }
-
-
   /* Total number of replicas (number of cores serving an index to the collection) shown by the cluster state */
   protected int getTotalReplicas(String collection) {
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
@@ -1521,8 +1470,8 @@
     if (VERBOSE || printLayoutOnTearDown) {
       super.printLayout();
     }
-    if (commondCloudSolrClient != null) {
-      commondCloudSolrClient.close();
+    if (commonCloudSolrClient != null) {
+      commonCloudSolrClient.close();
     }
     if (controlClient != null) {
       controlClient.close();
@@ -1747,20 +1696,24 @@
     }
   }
   
-  private CloudSolrClient commondCloudSolrClient;
+  private CloudSolrClient commonCloudSolrClient;
   
   protected CloudSolrClient getCommonCloudSolrClient() {
     synchronized (this) {
-      if (commondCloudSolrClient == null) {
-        commondCloudSolrClient = new CloudSolrClient(zkServer.getZkAddress(),
-            random().nextBoolean());
-        commondCloudSolrClient.getLbClient().setConnectionTimeout(30000);
-        commondCloudSolrClient.setParallelUpdates(random().nextBoolean());
-        commondCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
-        commondCloudSolrClient.connect();
+      if (commonCloudSolrClient == null) {
+        boolean updatesToLeaders = random().nextBoolean();
+        boolean parallelUpdates = random().nextBoolean();
+        commonCloudSolrClient = new CloudSolrClient(zkServer.getZkAddress(),
+                updatesToLeaders);
+        commonCloudSolrClient.getLbClient().setConnectionTimeout(5000);
+        commonCloudSolrClient.getLbClient().setSoTimeout(120000);
+        commonCloudSolrClient.setParallelUpdates(parallelUpdates);
+        commonCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
+        commonCloudSolrClient.connect();
+        log.info("Created commonCloudSolrClient with updatesToLeaders={} and parallelUpdates={}", updatesToLeaders, parallelUpdates);
       }
     }
-    return commondCloudSolrClient;
+    return commonCloudSolrClient;
   }
 
   public static String getUrlFromZk(ClusterState clusterState, String collection) {
@@ -1785,7 +1738,7 @@
     throw new RuntimeException("Could not find a live node for collection:" + collection);
   }
 
- public  static void waitForNon403or404or503(HttpSolrClient collectionClient)
+ public static void waitForNon403or404or503(HttpSolrClient collectionClient)
       throws Exception {
     SolrException exp = null;
     long timeoutAt = System.currentTimeMillis() + 30000;
@@ -1811,43 +1764,10 @@
     fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
   }
 
-  protected void checkForMissingCollection(String collectionName)
-      throws Exception {
-    // check for a  collection - we poll the state
-    long timeoutAt = System.currentTimeMillis() + 45000;
-    boolean found = true;
-    while (System.currentTimeMillis() < timeoutAt) {
-      getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
-      ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        found = false;
-        break;
-      }
-      Thread.sleep(100);
-    }
-    if (found) {
-      fail("Found collection that should be gone " + collectionName);
-    }
+  protected void assertCollectionNotExists(String collectionName, int timeoutSeconds) throws Exception {
+    waitForCollectionToDisappear(collectionName, getCommonCloudSolrClient().getZkStateReader(), false, true, timeoutSeconds);
   }
 
-  protected NamedList<Object> invokeCollectionApi(String... args) throws SolrServerException, IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    SolrRequest request = new QueryRequest(params);
-    for (int i = 0; i < args.length - 1; i+=2) {
-      params.add(args[i], args[i+1]);
-    }
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
-        .getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseClient = new HttpSolrClient(baseUrl)) {
-      baseClient.setConnectionTimeout(15000);
-      baseClient.setSoTimeout(60000 * 5);
-      return baseClient.request(request);
-    }
-  }
 
   protected void createCollection(String collName,
                                   CloudSolrClient client,
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
index ec89f42..b23fbcf 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
@@ -533,8 +533,8 @@
     DirectUpdateHandler2.commitOnClose = true;
     
     float runtime = (System.currentTimeMillis() - startTime)/1000.0f;
-    if (runtime > 20 && stops.get() == 0) {
-      LuceneTestCase.fail("The Monkey ran for over 20 seconds and no jetties were stopped - this is worth investigating!");
+    if (runtime > 30 && stops.get() == 0) {
+      LuceneTestCase.fail("The Monkey ran for over 30 seconds and no jetties were stopped - this is worth investigating!");
     }
   }
 
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 51f12c4..064ad21 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -167,6 +167,29 @@
       throw startupError;
     }
 
+    try (SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(),
+        AbstractZkTestCase.TIMEOUT, 45000, null)) {
+      int numliveNodes = 0;
+      int retries = 60;
+      String liveNodesPath = "/solr/live_nodes";
+      // Wait up to 60 seconds for number of live_nodes to match up number of servers
+      do {
+        if (zkClient.exists(liveNodesPath, true)) {
+          numliveNodes = zkClient.getChildren(liveNodesPath, null, true).size();
+          if (numliveNodes == numServers) {
+            break;
+          }
+        }
+        retries--;
+        if (retries == 0) {
+          throw new IllegalStateException("Solr servers failed to register with ZK."
+              + " Current count: " + numliveNodes + "; Expected count: " + numServers);
+        }
+
+        Thread.sleep(1000);
+      } while (numliveNodes != numServers);
+    }
+
     solrClient = buildSolrClient();
   }