Merge branch 'master' into solr-6733
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 9eecc42..1b4bb2c 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -107,6 +107,9 @@
   or phrase queries as sub queries, which know how to leverage this information
   to run faster. (Adrien Grand)
 
+======================= Lucene 7.5.0 =======================
+(No Changes)
+
 ======================= Lucene 7.4.0 =======================
 
 API Changes
@@ -267,6 +270,9 @@
 
 * LUCENE-8301: Update randomizedtesting to 2.6.0. (Dawid Weiss)
 
+* LUCENE-8299: Geo3D wrapper uses new polygon method factory that gives better
+  support for polygons with many points (>100). (Ignacio vera)
+
 * LUCENE-8261: InterpolatedProperties.interpolate and recursive property
   references. (Steve Rowe, Dawid Weiss)
 
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
index 7885daa..f42aac3 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
@@ -62,7 +62,6 @@
 import org.apache.lucene.util.OfflineSorter.ByteSequencesReader;
 import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
 import org.apache.lucene.util.OfflineSorter;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
 import org.apache.lucene.util.automaton.RegExp;
 import org.apache.lucene.util.fst.Builder;
@@ -927,10 +926,7 @@
         if (hasStemExceptions && end+1 < line.length()) {
           String stemException = parseStemException(line.substring(end+1));
           if (stemException != null) {
-            if (stemExceptionCount == stemExceptions.length) {
-              int newSize = ArrayUtil.oversize(stemExceptionCount+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
-              stemExceptions = Arrays.copyOf(stemExceptions, newSize);
-            }
+            stemExceptions = ArrayUtil.grow(stemExceptions, stemExceptionCount+1);
             stemExceptionID = stemExceptionCount+1; // we use '0' to indicate no exception for the form
             stemExceptions[stemExceptionCount++] = stemException;
           }
@@ -1125,7 +1121,7 @@
       }
 
       if (upto < flags.length) {
-        flags = Arrays.copyOf(flags, upto);
+        flags = ArrayUtil.copyOfSubArray(flags, 0, upto);
       }
       return flags;
     }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
index 16edb3d..4124e84 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.analysis.miscellaneous;
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.analysis.CharArraySet;
 import org.apache.lucene.analysis.TokenFilter;
@@ -427,9 +426,9 @@
   private void buffer() {
     if (bufferedLen == buffered.length) {
       int newSize = ArrayUtil.oversize(bufferedLen+1, 8);
-      buffered = Arrays.copyOf(buffered, newSize);
-      startOff = Arrays.copyOf(startOff, newSize);
-      posInc = Arrays.copyOf(posInc, newSize);
+      buffered = ArrayUtil.growExact(buffered, newSize);
+      startOff = ArrayUtil.growExact(startOff, newSize);
+      posInc = ArrayUtil.growExact(posInc, newSize);
     }
     startOff[bufferedLen] = offsetAttribute.startOffset();
     posInc[bufferedLen] = posIncAttribute.getPositionIncrement();
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
index ec2676f..a51edb5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.analysis.synonym;
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
@@ -33,7 +32,6 @@
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.fst.FST;
 
 /**
@@ -206,7 +204,7 @@
 
     public void add(char[] output, int offset, int len, int endOffset, int posLength) {
       if (count == outputs.length) {
-        outputs = Arrays.copyOf(outputs, ArrayUtil.oversize(1+count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+        outputs = ArrayUtil.grow(outputs, count+1);
       }
       if (count == endOffsets.length) {
         final int[] next = new int[ArrayUtil.oversize(1+count, Integer.BYTES)];
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java
index b74e371..a4183d7 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java
@@ -21,9 +21,9 @@
 import java.io.LineNumberReader;
 import java.io.Reader;
 import java.text.ParseException;
-import java.util.Arrays;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 
@@ -59,10 +59,7 @@
           synsetSize = 0;
         }
 
-        if (synset.length <= synsetSize+1) {
-          synset = Arrays.copyOf(synset, synset.length * 2);
-        }
-        
+        synset = ArrayUtil.grow(synset, synsetSize + 1);
         synset[synsetSize] = parseSynonym(line, new CharsRefBuilder());
         synsetSize++;
         lastSynSetID = synSetID;
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
index d94b396..fd6f4b5 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
@@ -88,6 +88,7 @@
 import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
 import org.apache.lucene.analysis.payloads.IdentityEncoder;
 import org.apache.lucene.analysis.payloads.PayloadEncoder;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
 import org.apache.lucene.analysis.snowball.TestSnowball;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.synonym.SynonymMap;
@@ -124,6 +125,10 @@
     avoidConditionals.add(FingerprintFilter.class);
     avoidConditionals.add(MinHashFilter.class);
     avoidConditionals.add(ConcatenateGraphFilter.class);
+    // ShingleFilter doesn't handle input graphs correctly, so wrapping it in a condition can
+    // expose inconsistent offsets
+    // https://issues.apache.org/jira/browse/LUCENE-4170
+    avoidConditionals.add(ShingleFilter.class);
   }
 
   private static final Map<Constructor<?>,Predicate<Object[]>> brokenConstructors = new HashMap<>();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
index 46a0c1c..cb54fa2 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
@@ -29,6 +29,7 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.TestUtil;
 
 import com.carrotsearch.randomizedtesting.generators.RandomStrings;
@@ -179,7 +180,7 @@
           }
         }
         assertTrue(grams.incrementToken());
-        assertArrayEquals(Arrays.copyOfRange(codePoints, start, end), toCodePoints(termAtt));
+        assertArrayEquals(ArrayUtil.copyOfSubArray(codePoints, start, end), toCodePoints(termAtt));
         assertEquals(1, posIncAtt.getPositionIncrement());
         assertEquals(1, posLenAtt.getPositionLength());
         assertEquals(offsets[start], offsetAtt.startOffset());
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
index 9ed87b5..9de9d73 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
@@ -20,7 +20,6 @@
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.lucene.codecs.BlockTermState;
@@ -44,7 +43,6 @@
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.RamUsageEstimator;
 
 // TODO: currently we encode all terms between two indexed
 // terms as a block; but, we could decouple the two, ie
@@ -260,11 +258,9 @@
         //System.out.println("  index term!");
       }
 
-      if (pendingTerms.length == pendingCount) {
-        pendingTerms = Arrays.copyOf(pendingTerms, ArrayUtil.oversize(pendingCount+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
-        for(int i=pendingCount;i<pendingTerms.length;i++) {
-          pendingTerms[i] = new TermEntry();
-        }
+      pendingTerms = ArrayUtil.grow(pendingTerms, pendingCount + 1);
+      for (int i = pendingCount; i < pendingTerms.length; i++) {
+        pendingTerms[i] = new TermEntry();
       }
       final TermEntry te = pendingTerms[pendingCount];
       te.term.copyBytes(text);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
index 034202b..cd74aca 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
@@ -505,7 +505,7 @@
 
       if (leafBlockFPs.size() > 0) {
         // Save the first (minimum) value in each leaf block except the first, to build the split value index in the end:
-        leafBlockStartValues.add(Arrays.copyOf(leafValues, packedBytesLength));
+        leafBlockStartValues.add(ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength));
       }
       leafBlockFPs.add(out.getFilePointer());
       checkMaxLeafNodeCount(leafBlockFPs.size());
@@ -539,8 +539,8 @@
           return scratch;
         }
       };
-      assert valuesInOrderAndBounds(leafCount, 0, Arrays.copyOf(leafValues, packedBytesLength),
-          Arrays.copyOfRange(leafValues, (leafCount - 1) * packedBytesLength, leafCount * packedBytesLength),
+      assert valuesInOrderAndBounds(leafCount, 0, ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength),
+          ArrayUtil.copyOfSubArray(leafValues, (leafCount - 1) * packedBytesLength, leafCount * packedBytesLength),
           packedValues, leafDocs, 0);
       writeLeafBlockPackedValues(out, commonPrefixLengths, leafCount, 0, packedValues);
     }
@@ -1206,8 +1206,8 @@
       reader.getValue(mid, scratchBytesRef1);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
 
-      byte[] minSplitPackedValue = Arrays.copyOf(minPackedValue, packedBytesLength);
-      byte[] maxSplitPackedValue = Arrays.copyOf(maxPackedValue, packedBytesLength);
+      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedBytesLength);
+      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedBytesLength);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
           minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java
index 8a71c6d..6d5bfe4 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java
@@ -42,6 +42,7 @@
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.StringHelper;
@@ -158,7 +159,7 @@
       
       SimpleTextUtil.readLine(input, scratch);
       assert StringHelper.startsWith(scratch.get(), SI_ID);
-      final byte[] id = Arrays.copyOfRange(scratch.bytes(), SI_ID.length, scratch.length());
+      final byte[] id = ArrayUtil.copyOfSubArray(scratch.bytes(), SI_ID.length, scratch.length());
       
       if (!Arrays.equals(segmentID, id)) {
         throw new CorruptIndexException("file mismatch, expected: " + StringHelper.idToString(segmentID)
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
index 0685d79..61410b6 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
@@ -21,7 +21,6 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -73,12 +72,12 @@
       }
       if (blockCount == docBases.length) {
         final int newSize = ArrayUtil.oversize(blockCount + 1, 8);
-        docBases = Arrays.copyOf(docBases, newSize);
-        startPointers = Arrays.copyOf(startPointers, newSize);
-        avgChunkDocs = Arrays.copyOf(avgChunkDocs, newSize);
-        avgChunkSizes = Arrays.copyOf(avgChunkSizes, newSize);
-        docBasesDeltas = Arrays.copyOf(docBasesDeltas, newSize);
-        startPointersDeltas = Arrays.copyOf(startPointersDeltas, newSize);
+        docBases = ArrayUtil.growExact(docBases, newSize);
+        startPointers = ArrayUtil.growExact(startPointers, newSize);
+        avgChunkDocs = ArrayUtil.growExact(avgChunkDocs, newSize);
+        avgChunkSizes = ArrayUtil.growExact(avgChunkSizes, newSize);
+        docBasesDeltas = ArrayUtil.growExact(docBasesDeltas, newSize);
+        startPointersDeltas = ArrayUtil.growExact(startPointersDeltas, newSize);
       }
 
       // doc bases
@@ -102,12 +101,12 @@
       ++blockCount;
     }
 
-    this.docBases = Arrays.copyOf(docBases, blockCount);
-    this.startPointers = Arrays.copyOf(startPointers, blockCount);
-    this.avgChunkDocs = Arrays.copyOf(avgChunkDocs, blockCount);
-    this.avgChunkSizes = Arrays.copyOf(avgChunkSizes, blockCount);
-    this.docBasesDeltas = Arrays.copyOf(docBasesDeltas, blockCount);
-    this.startPointersDeltas = Arrays.copyOf(startPointersDeltas, blockCount);
+    this.docBases = ArrayUtil.copyOfSubArray(docBases, 0, blockCount);
+    this.startPointers = ArrayUtil.copyOfSubArray(startPointers, 0, blockCount);
+    this.avgChunkDocs = ArrayUtil.copyOfSubArray(avgChunkDocs, 0, blockCount);
+    this.avgChunkSizes = ArrayUtil.copyOfSubArray(avgChunkSizes, 0, blockCount);
+    this.docBasesDeltas = ArrayUtil.copyOfSubArray(docBasesDeltas, 0, blockCount);
+    this.startPointersDeltas = ArrayUtil.copyOfSubArray(startPointersDeltas, 0, blockCount);
   }
 
   private int block(int docID) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
index 8cd8ccb..5b8eb9e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.lucene.codecs.CodecUtil;
@@ -158,8 +157,8 @@
   public void finishDocument() throws IOException {
     if (numBufferedDocs == this.numStoredFields.length) {
       final int newLength = ArrayUtil.oversize(numBufferedDocs + 1, 4);
-      this.numStoredFields = Arrays.copyOf(this.numStoredFields, newLength);
-      endOffsets = Arrays.copyOf(endOffsets, newLength);
+      this.numStoredFields = ArrayUtil.growExact(this.numStoredFields, newLength);
+      endOffsets = ArrayUtil.growExact(endOffsets, newLength);
     }
     this.numStoredFields[numBufferedDocs] = numStoredFieldsInDoc;
     numStoredFieldsInDoc = 0;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
index ee948c3..4f8d004 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
@@ -176,8 +176,8 @@
       if (hasOffsets) {
         if (offStart + totalPositions == startOffsetsBuf.length) {
           final int newLength = ArrayUtil.oversize(offStart + totalPositions, 4);
-          startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
-          lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
+          startOffsetsBuf = ArrayUtil.growExact(startOffsetsBuf, newLength);
+          lengthsBuf = ArrayUtil.growExact(lengthsBuf, newLength);
         }
         startOffsetsBuf[offStart + totalPositions] = startOffset;
         lengthsBuf[offStart + totalPositions] = length;
@@ -705,8 +705,8 @@
       final int offStart = curField.offStart + curField.totalPositions;
       if (offStart + numProx > startOffsetsBuf.length) {
         final int newLength = ArrayUtil.oversize(offStart + numProx, 4);
-        startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
-        lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
+        startOffsetsBuf = ArrayUtil.growExact(startOffsetsBuf, newLength);
+        lengthsBuf = ArrayUtil.growExact(lengthsBuf, newLength);
       }
       int lastOffset = 0, startOffset, endOffset;
       for (int i = 0; i < numProx; ++i) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
index 700090a..71a14a5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesWriter.java
@@ -164,7 +164,7 @@
         }
         docOrds[upto++] = ord;
       }
-      ords[newDocID] = Arrays.copyOfRange(docOrds, 0, upto);
+      ords[newDocID] = ArrayUtil.copyOfSubArray(docOrds, 0, upto);
     }
     return ords;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
index eb9f7ed..55e4d20 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -890,8 +890,8 @@
       while ((doc = in.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
         if (i == docs.length) {
           final int newLength = ArrayUtil.oversize(i + 1, 4);
-          docs = Arrays.copyOf(docs, newLength);
-          offsets = Arrays.copyOf(offsets, newLength);
+          docs = ArrayUtil.growExact(docs, newLength);
+          offsets = ArrayUtil.growExact(offsets, newLength);
         }
         docs[i] = docMap.oldToNew(doc);
         offsets[i] = out.getFilePointer();
@@ -1230,7 +1230,7 @@
             }
             docOrds[upto++] = ord;
           }
-          ords[newDocID] = Arrays.copyOfRange(docOrds, 0, upto);
+          ords[newDocID] = ArrayUtil.copyOfSubArray(docOrds, 0, upto);
         }
         cachedSortedSetDVs.put(field, ords);
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
index cca6675..8f85e25 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
@@ -102,9 +102,9 @@
     /** Build the {@link BlendedTermQuery}. */
     public BlendedTermQuery build() {
       return new BlendedTermQuery(
-          Arrays.copyOf(terms, numTerms),
-          Arrays.copyOf(boosts, numTerms),
-          Arrays.copyOf(contexts, numTerms),
+          ArrayUtil.copyOfSubArray(terms, 0, numTerms),
+          ArrayUtil.copyOfSubArray(boosts, 0, numTerms),
+          ArrayUtil.copyOfSubArray(contexts, 0, numTerms),
           rewriteMethod);
     }
 
@@ -263,7 +263,7 @@
 
   @Override
   public final Query rewrite(IndexReader reader) throws IOException {
-    final TermStates[] contexts = Arrays.copyOf(this.contexts, this.contexts.length);
+    final TermStates[] contexts = ArrayUtil.copyOfSubArray(this.contexts, 0, this.contexts.length);
     for (int i = 0; i < contexts.length; ++i) {
       if (contexts[i] == null || contexts[i].wasBuiltFor(reader.getContext()) == false) {
         contexts[i] = TermStates.build(reader.getContext(), terms[i], true);
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index 3bed88d..6681c59 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -18,7 +18,6 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.lucene.index.LeafReaderContext;
@@ -213,7 +212,7 @@
     }
 
     protected void grow(int newLen) {
-      docs = Arrays.copyOf(docs, newLen);
+      docs = ArrayUtil.growExact(docs, newLen);
     }
 
     protected void invalidate() {
@@ -250,7 +249,7 @@
     }
 
     int[] cachedDocs() {
-      return docs == null ? null : Arrays.copyOf(docs, docCount);
+      return docs == null ? null : ArrayUtil.copyOfSubArray(docs, 0, docCount);
     }
 
   }
@@ -274,7 +273,7 @@
     @Override
     protected void grow(int newLen) {
       super.grow(newLen);
-      scores = Arrays.copyOf(scores, newLen);
+      scores = ArrayUtil.growExact(scores, newLen);
     }
 
     @Override
@@ -290,7 +289,7 @@
     }
 
     float[] cachedScores() {
-      return docs == null ? null : Arrays.copyOf(scores, docCount);
+      return docs == null ? null : ArrayUtil.copyOfSubArray(scores, 0, docCount);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/MaxScoreCache.java b/lucene/core/src/java/org/apache/lucene/search/MaxScoreCache.java
index 17e4efc..51fee1a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MaxScoreCache.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MaxScoreCache.java
@@ -53,7 +53,7 @@
     if (maxScoreCache.length < size) {
       int oldLength = maxScoreCache.length;
       maxScoreCache = ArrayUtil.grow(maxScoreCache, size);
-      maxScoreCacheUpTo = Arrays.copyOf(maxScoreCacheUpTo, maxScoreCache.length);
+      maxScoreCacheUpTo = ArrayUtil.growExact(maxScoreCacheUpTo, maxScoreCache.length);
       Arrays.fill(maxScoreCacheUpTo, oldLength, maxScoreCacheUpTo.length, -1);
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index 16642e5..70d2e09 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -413,7 +413,7 @@
           }
         }
         if (termUpTo > 0) {
-          return similarity.scorer(boost, searcher.collectionStatistics(field), Arrays.copyOf(termStats, termUpTo));
+          return similarity.scorer(boost, searcher.collectionStatistics(field), ArrayUtil.copyOfSubArray(termStats, 0, termUpTo));
         } else {
           return null; // no terms at all, we won't use similarity
         }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
index 689d64a..149fa20 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
@@ -333,7 +333,7 @@
 
             upto++;
             BytesRef next = iterator.next();
-            return Arrays.copyOfRange(next.bytes, next.offset, next.length);
+            return BytesRef.deepCopyOf(next).bytes;
           }
         };
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 7e48383..3af001b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.index.PointValues;
 import org.apache.lucene.index.PointValues.IntersectVisitor;
 import org.apache.lucene.index.PointValues.Relation;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BitSetIterator;
 import org.apache.lucene.util.DocIdSetBuilder;
 import org.apache.lucene.util.FixedBitSet;
@@ -392,9 +393,9 @@
       int startOffset = bytesPerDim * i;
 
       sb.append('[');
-      sb.append(toString(i, Arrays.copyOfRange(lowerPoint, startOffset, startOffset + bytesPerDim)));
+      sb.append(toString(i, ArrayUtil.copyOfSubArray(lowerPoint, startOffset, startOffset + bytesPerDim)));
       sb.append(" TO ");
-      sb.append(toString(i, Arrays.copyOfRange(upperPoint, startOffset, startOffset + bytesPerDim)));
+      sb.append(toString(i, ArrayUtil.copyOfSubArray(upperPoint, startOffset, startOffset + bytesPerDim)));
       sb.append(']');
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 35c36d7..0313d56 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -18,7 +18,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Map;
 
 import org.apache.lucene.index.LeafReaderContext;
@@ -32,6 +31,7 @@
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.ArrayUtil;
 
 /**
  * Expert-only.  Public for use by other weight implementations
@@ -103,7 +103,7 @@
     }
     CollectionStatistics collectionStats = searcher.collectionStatistics(query.getField());
     if (termUpTo > 0) {
-      return similarity.scorer(boost, collectionStats, Arrays.copyOf(termStats, termUpTo));
+      return similarity.scorer(boost, collectionStats, ArrayUtil.copyOfSubArray(termStats, 0, termUpTo));
     } else {
       return null; // no terms at all exist, we won't use similarity
     }
diff --git a/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java b/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
index 3c5897f..f6bab10 100644
--- a/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
+++ b/lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java
@@ -16,7 +16,7 @@
  */
 package org.apache.lucene.util;
 
-import java.util.Arrays;
+import java.lang.reflect.Array;
 import java.util.Comparator;
 
 /**
@@ -211,94 +211,172 @@
     }
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static <T> T[] growExact(T[] array, int newLength) {
+    Class<? extends Object[]> type = array.getClass();
+    @SuppressWarnings("unchecked")
+    T[] copy = (type == Object[].class)
+        ? (T[]) new Object[newLength]
+        : (T[]) Array.newInstance(type.getComponentType(), newLength);
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static <T> T[] grow(T[] array, int minSize) {
-    assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
+    assert minSize >= 0 : "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+      final int newLength = oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+      return growExact(array, newLength);
     } else
       return array;
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static short[] growExact(short[] array, int newLength) {
+    short[] copy = new short[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static short[] grow(short[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Short.BYTES));
+      return growExact(array, oversize(minSize, Short.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static short[] grow(short[] array) {
     return grow(array, 1 + array.length);
   }
-  
+
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static float[] growExact(float[] array, int newLength) {
+    float[] copy = new float[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static float[] grow(float[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Float.BYTES));
+      float[] copy = new float[oversize(minSize, Float.BYTES)];
+      System.arraycopy(array, 0, copy, 0, array.length);
+      return copy;
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static float[] grow(float[] array) {
     return grow(array, 1 + array.length);
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static double[] growExact(double[] array, int newLength) {
+    double[] copy = new double[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static double[] grow(double[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Double.BYTES));
+      return growExact(array, oversize(minSize, Double.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static double[] grow(double[] array) {
     return grow(array, 1 + array.length);
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static int[] growExact(int[] array, int newLength) {
+    int[] copy = new int[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static int[] grow(int[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Integer.BYTES));
+      return growExact(array, oversize(minSize, Integer.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static int[] grow(int[] array) {
     return grow(array, 1 + array.length);
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static long[] growExact(long[] array, int newLength) {
+    long[] copy = new long[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static long[] grow(long[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Long.BYTES));
+      return growExact(array, oversize(minSize, Long.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static long[] grow(long[] array) {
     return grow(array, 1 + array.length);
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static byte[] growExact(byte[] array, int newLength) {
+    byte[] copy = new byte[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static byte[] grow(byte[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Byte.BYTES));
+      return growExact(array, oversize(minSize, Byte.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static byte[] grow(byte[] array) {
     return grow(array, 1 + array.length);
   }
 
+  /** Returns a new array whose size is exact the specified {@code newLength} without over-allocating */
+  public static char[] growExact(char[] array, int newLength) {
+    char[] copy = new char[newLength];
+    System.arraycopy(array, 0, copy, 0, array.length);
+    return copy;
+  }
+
+  /** Returns an array whose size is at least {@code minSize}, generally over-allocating exponentially */
   public static char[] grow(char[] array, int minSize) {
     assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
     if (array.length < minSize) {
-      return Arrays.copyOf(array, oversize(minSize, Character.BYTES));
+      return growExact(array, oversize(minSize, Character.BYTES));
     } else
       return array;
   }
 
+  /** Returns a larger array, generally over-allocating exponentially */
   public static char[] grow(char[] array) {
     return grow(array, 1 + array.length);
   }
@@ -429,4 +507,104 @@
     }.select(from, to, k);
   }
 
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static byte[] copyOfSubArray(byte[] array, int from, int to) {
+    final byte[] copy = new byte[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static char[] copyOfSubArray(char[] array, int from, int to) {
+    final char[] copy = new char[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static short[] copyOfSubArray(short[] array, int from, int to) {
+    final short[] copy = new short[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static int[] copyOfSubArray(int[] array, int from, int to) {
+    final int[] copy = new int[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static long[] copyOfSubArray(long[] array, int from, int to) {
+    final long[] copy = new long[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static float[] copyOfSubArray(float[] array, int from, int to) {
+    final float[] copy = new float[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static double[] copyOfSubArray(double[] array, int from, int to) {
+    final double[] copy = new double[to-from];
+    System.arraycopy(array, from, copy, 0, to-from);
+    return copy;
+  }
+
+  /**
+   * Copies the specified range of the given array into a new sub array.
+   * @param array the input array
+   * @param from  the initial index of range to be copied (inclusive)
+   * @param to    the final index of range to be copied (exclusive)
+   */
+  public static <T> T[] copyOfSubArray(T[] array, int from, int to) {
+    final int subLength = to - from;
+    final Class<? extends Object[]> type = array.getClass();
+    @SuppressWarnings("unchecked")
+    final T[] copy = (type == Object[].class)
+        ? (T[]) new Object[subLength]
+        : (T[]) Array.newInstance(type.getComponentType(), subLength);
+    System.arraycopy(array, from, copy, 0, subLength);
+    return copy;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/util/BytesRef.java b/lucene/core/src/java/org/apache/lucene/util/BytesRef.java
index 42c4e87..a9a05e6 100644
--- a/lucene/core/src/java/org/apache/lucene/util/BytesRef.java
+++ b/lucene/core/src/java/org/apache/lucene/util/BytesRef.java
@@ -16,9 +16,6 @@
  */
 package org.apache.lucene.util;
 
-
-import java.util.Arrays;
-
 /** Represents byte[], as a slice (offset + length) into an
  *  existing byte[].  The {@link #bytes} member should never be null;
  *  use {@link #EMPTY_BYTES} if necessary.
@@ -172,11 +169,7 @@
    * and an offset of zero.
    */
   public static BytesRef deepCopyOf(BytesRef other) {
-    BytesRef copy = new BytesRef();
-    copy.bytes = Arrays.copyOfRange(other.bytes, other.offset, other.offset + other.length);
-    copy.offset = 0;
-    copy.length = other.length;
-    return copy;
+    return new BytesRef(ArrayUtil.copyOfSubArray(other.bytes, other.offset, other.offset + other.length), 0, other.length);
   }
   
   /** 
diff --git a/lucene/core/src/java/org/apache/lucene/util/BytesRefBuilder.java b/lucene/core/src/java/org/apache/lucene/util/BytesRefBuilder.java
index 08fda91..6abd866 100644
--- a/lucene/core/src/java/org/apache/lucene/util/BytesRefBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/BytesRefBuilder.java
@@ -17,8 +17,6 @@
 package org.apache.lucene.util;
 
 
-import java.util.Arrays;
-
 /**
  * A builder for {@link BytesRef} instances.
  * @lucene.internal
@@ -170,7 +168,7 @@
    * Build a new {@link BytesRef} that has the same content as this buffer.
    */
   public BytesRef toBytesRef() {
-    return new BytesRef(Arrays.copyOf(ref.bytes, ref.length));
+    return new BytesRef(ArrayUtil.copyOfSubArray(ref.bytes, 0, ref.length));
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/util/CharsRef.java b/lucene/core/src/java/org/apache/lucene/util/CharsRef.java
index eb839a8..7cb8ce9 100644
--- a/lucene/core/src/java/org/apache/lucene/util/CharsRef.java
+++ b/lucene/core/src/java/org/apache/lucene/util/CharsRef.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.util;
 
 
-import java.util.Arrays;
 import java.util.Comparator;
 
 /**
@@ -202,7 +201,7 @@
    * and an offset of zero.
    */
   public static CharsRef deepCopyOf(CharsRef other) {
-    return new CharsRef(Arrays.copyOfRange(other.chars, other.offset, other.offset + other.length), 0, other.length);
+    return new CharsRef(ArrayUtil.copyOfSubArray(other.chars, other.offset, other.offset + other.length), 0, other.length);
   }
   
   /** 
diff --git a/lucene/core/src/java/org/apache/lucene/util/CharsRefBuilder.java b/lucene/core/src/java/org/apache/lucene/util/CharsRefBuilder.java
index 09830e6..fdc16f4 100644
--- a/lucene/core/src/java/org/apache/lucene/util/CharsRefBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/CharsRefBuilder.java
@@ -17,8 +17,6 @@
 package org.apache.lucene.util;
 
 
-import java.util.Arrays;
-
 /**
  * A builder for {@link CharsRef} instances.
  * @lucene.internal
@@ -153,7 +151,7 @@
 
   /** Build a new {@link CharsRef} that has the same content as this builder. */
   public CharsRef toCharsRef() {
-    return new CharsRef(Arrays.copyOf(ref.chars, ref.length), 0, ref.length);
+    return new CharsRef(ArrayUtil.copyOfSubArray(ref.chars, 0, ref.length), 0, ref.length);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
index 954614b..e57292b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
@@ -18,7 +18,6 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.lucene.index.PointValues;
@@ -226,7 +225,7 @@
   }
 
   private void growBuffer(Buffer buffer, int additionalCapacity) {
-    buffer.array = Arrays.copyOf(buffer.array, buffer.array.length + additionalCapacity);
+    buffer.array = ArrayUtil.growExact(buffer.array, buffer.array.length + additionalCapacity);
     totalAllocated += additionalCapacity;
   }
 
@@ -297,7 +296,7 @@
     }
     int[] docs = largestBuffer.array;
     if (docs.length < totalLength + 1) {
-      docs = Arrays.copyOf(docs, totalLength + 1);
+      docs = ArrayUtil.growExact(docs, totalLength + 1);
     }
     totalLength = largestBuffer.length;
     for (Buffer buffer : buffers) {
diff --git a/lucene/core/src/java/org/apache/lucene/util/IntsRef.java b/lucene/core/src/java/org/apache/lucene/util/IntsRef.java
index aa7bbce..0c6cfa5 100644
--- a/lucene/core/src/java/org/apache/lucene/util/IntsRef.java
+++ b/lucene/core/src/java/org/apache/lucene/util/IntsRef.java
@@ -16,8 +16,6 @@
  */
 package org.apache.lucene.util;
 
-import java.util.Arrays;
-
 
 /** Represents int[], as a slice (offset + length) into an
  *  existing int[].  The {@link #ints} member should never be null; use
@@ -127,7 +125,7 @@
    * and an offset of zero.
    */
   public static IntsRef deepCopyOf(IntsRef other) {
-    return new IntsRef(Arrays.copyOfRange(other.ints, other.offset, other.offset + other.length), 0, other.length);
+    return new IntsRef(ArrayUtil.copyOfSubArray(other.ints, other.offset, other.offset + other.length), 0, other.length);
   }
   
   /** 
diff --git a/lucene/core/src/java/org/apache/lucene/util/LongsRef.java b/lucene/core/src/java/org/apache/lucene/util/LongsRef.java
index 952d189..e4ca3ef 100644
--- a/lucene/core/src/java/org/apache/lucene/util/LongsRef.java
+++ b/lucene/core/src/java/org/apache/lucene/util/LongsRef.java
@@ -16,8 +16,6 @@
  */
 package org.apache.lucene.util;
 
-import java.util.Arrays;
-
 
 /** Represents long[], as a slice (offset + length) into an
  *  existing long[].  The {@link #longs} member should never be null; use
@@ -126,7 +124,7 @@
    * and an offset of zero.
    */
   public static LongsRef deepCopyOf(LongsRef other) {
-    return new LongsRef(Arrays.copyOfRange(other.longs, other.offset, other.offset + other.length), 0, other.length);
+    return new LongsRef(ArrayUtil.copyOfSubArray(other.longs, other.offset, other.offset + other.length), 0, other.length);
   }
   
   /** 
diff --git a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
index ad26f85..e07046c 100644
--- a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
+++ b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
@@ -18,7 +18,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.store.DataInput;
 import org.apache.lucene.store.DataOutput;
@@ -62,7 +61,7 @@
     private final long bytesUsedPerBlock;
 
     private Reader(PagedBytes pagedBytes) {
-      blocks = Arrays.copyOf(pagedBytes.blocks, pagedBytes.numBlocks);
+      blocks = ArrayUtil.copyOfSubArray(pagedBytes.blocks, 0, pagedBytes.numBlocks);
       blockBits = pagedBytes.blockBits;
       blockMask = pagedBytes.blockMask;
       blockSize = pagedBytes.blockSize;
@@ -154,9 +153,7 @@
   }
 
   private void addBlock(byte[] block) {
-    if (blocks.length == numBlocks) {
-      blocks = Arrays.copyOf(blocks, ArrayUtil.oversize(numBlocks, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
-    }
+    blocks = ArrayUtil.grow(blocks, numBlocks + 1);
     blocks[numBlocks++] = block;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java
index 2cb066b..697e3bb 100644
--- a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java
@@ -346,9 +346,9 @@
 
   /**
    * Creates a span query from the tokenstream.  In the case of a single token, a simple <code>SpanTermQuery</code> is
-   * returned.  When multiple tokens, an ordered <code>SpanNearQuery</code> with slop of 0 is returned.
+   * returned.  When multiple tokens, an ordered <code>SpanNearQuery</code> with slop 0 is returned.
    */
-  protected final SpanQuery createSpanQuery(TokenStream in, String field) throws IOException {
+  protected SpanQuery createSpanQuery(TokenStream in, String field) throws IOException {
     TermToBytesRefAttribute termAtt = in.getAttribute(TermToBytesRefAttribute.class);
     if (termAtt == null) {
       return null;
diff --git a/lucene/core/src/java/org/apache/lucene/util/RoaringDocIdSet.java b/lucene/core/src/java/org/apache/lucene/util/RoaringDocIdSet.java
index 9709c30..5f704bb 100644
--- a/lucene/core/src/java/org/apache/lucene/util/RoaringDocIdSet.java
+++ b/lucene/core/src/java/org/apache/lucene/util/RoaringDocIdSet.java
@@ -18,7 +18,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -73,7 +72,7 @@
         // Use sparse encoding
         assert denseBuffer == null;
         if (currentBlockCardinality > 0) {
-          sets[currentBlock] = new ShortArrayDocIdSet(Arrays.copyOf(buffer, currentBlockCardinality));
+          sets[currentBlock] = new ShortArrayDocIdSet(ArrayUtil.copyOfSubArray(buffer, 0, currentBlockCardinality));
         }
       } else {
         assert denseBuffer != null;
diff --git a/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java b/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
index 0324291..4fcbbef 100644
--- a/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
+++ b/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
@@ -18,7 +18,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.search.DocIdSetIterator;
 
@@ -372,7 +371,7 @@
       // fast path: if we currently have nothing in the block, just copy the data
       // this especially happens all the time if you call OR on an empty set
       indices[i4096] = index;
-      this.bits[i4096] = Arrays.copyOf(bits, nonZeroLongCount);
+      this.bits[i4096] = ArrayUtil.copyOfSubArray(bits, 0, nonZeroLongCount);
       this.nonZeroLongCount += nonZeroLongCount;
       return;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/util/StringHelper.java b/lucene/core/src/java/org/apache/lucene/util/StringHelper.java
index 4c6d4fa..bdab07b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/StringHelper.java
+++ b/lucene/core/src/java/org/apache/lucene/util/StringHelper.java
@@ -307,7 +307,7 @@
     if (bits.length > ID_LENGTH) {
       assert bits.length == ID_LENGTH + 1;
       assert bits[0] == 0;
-      return Arrays.copyOfRange(bits, 1, bits.length);
+      return ArrayUtil.copyOfSubArray(bits, 1, bits.length);
     } else {
       byte[] result = new byte[ID_LENGTH];
       System.arraycopy(bits, 0, result, result.length - bits.length, bits.length);
diff --git a/lucene/core/src/java/org/apache/lucene/util/Version.java b/lucene/core/src/java/org/apache/lucene/util/Version.java
index 1067f93..80368da 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Version.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Version.java
@@ -90,6 +90,13 @@
   public static final Version LUCENE_7_4_0 = new Version(7, 4, 0);
 
   /**
+   * Match settings and bugs in Lucene's 7.5.0 release.
+   * @deprecated Use latest
+   */
+  @Deprecated
+  public static final Version LUCENE_7_5_0 = new Version(7, 5, 0);
+
+  /**
    * Match settings and bugs in Lucene's 8.0.0 release.
    * <p>
    * Use this to get the latest &amp; greatest settings, bug
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java b/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java
index 704a6c4..60ec865 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java
@@ -137,8 +137,8 @@
       assert Arrays.binarySearch(labels, label) < 0 : "State already has transition labeled: "
           + label;
       
-      labels = Arrays.copyOf(labels, labels.length + 1);
-      states = Arrays.copyOf(states, states.length + 1);
+      labels = ArrayUtil.growExact(labels, labels.length + 1);
+      states = ArrayUtil.growExact(states, states.length + 1);
 
       labels[labels.length - 1] = label;
       return states[states.length - 1] = new State();
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index 7f55895..fb7e1ce 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -656,7 +656,7 @@
 
       if (leafBlockFPs.size() > 0) {
         // Save the first (minimum) value in each leaf block except the first, to build the split value index in the end:
-        leafBlockStartValues.add(Arrays.copyOf(leafValues, packedBytesLength));
+        leafBlockStartValues.add(ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength));
       }
       leafBlockFPs.add(out.getFilePointer());
       checkMaxLeafNodeCount(leafBlockFPs.size());
@@ -687,8 +687,8 @@
           return scratchBytesRef1;
         }
       };
-      assert valuesInOrderAndBounds(leafCount, 0, Arrays.copyOf(leafValues, packedBytesLength),
-          Arrays.copyOfRange(leafValues, (leafCount - 1) * packedBytesLength, leafCount * packedBytesLength),
+      assert valuesInOrderAndBounds(leafCount, 0, ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength),
+          ArrayUtil.copyOfSubArray(leafValues, (leafCount - 1) * packedBytesLength, leafCount * packedBytesLength),
           packedValues, leafDocs, 0);
       writeLeafBlockPackedValues(scratchOut, commonPrefixLengths, leafCount, 0, packedValues);
       out.writeBytes(scratchOut.getBytes(), 0, scratchOut.getPosition());
@@ -1591,8 +1591,8 @@
       reader.getValue(mid, scratchBytesRef1);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
 
-      byte[] minSplitPackedValue = Arrays.copyOf(minPackedValue, packedBytesLength);
-      byte[] maxSplitPackedValue = Arrays.copyOf(maxPackedValue, packedBytesLength);
+      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedBytesLength);
+      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedBytesLength);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
           minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/HeapPointWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/HeapPointWriter.java
index e102651..eb1d48b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/HeapPointWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/HeapPointWriter.java
@@ -18,7 +18,6 @@
 
 import java.io.Closeable;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.lucene.util.ArrayUtil;
@@ -116,12 +115,12 @@
     if (docIDs.length == nextWrite) {
       int nextSize = Math.min(maxSize, ArrayUtil.oversize(nextWrite+1, Integer.BYTES));
       assert nextSize > nextWrite: "nextSize=" + nextSize + " vs nextWrite=" + nextWrite;
-      docIDs = Arrays.copyOf(docIDs, nextSize);
+      docIDs = ArrayUtil.growExact(docIDs, nextSize);
       if (singleValuePerDoc == false) {
         if (ordsLong != null) {
-          ordsLong = Arrays.copyOf(ordsLong, nextSize);
+          ordsLong = ArrayUtil.growExact(ordsLong, nextSize);
         } else {
-          ords = Arrays.copyOf(ords, nextSize);
+          ords = ArrayUtil.growExact(ords, nextSize);
         }
       }
     }
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/DeltaPackedLongValues.java b/lucene/core/src/java/org/apache/lucene/util/packed/DeltaPackedLongValues.java
index 80534da..6aabb07 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/DeltaPackedLongValues.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/DeltaPackedLongValues.java
@@ -17,8 +17,7 @@
 package org.apache.lucene.util.packed;
 
 
-import java.util.Arrays;
-
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.packed.PackedInts.Reader;
 
@@ -70,8 +69,8 @@
     public DeltaPackedLongValues build() {
       finish();
       pending = null;
-      final PackedInts.Reader[] values = Arrays.copyOf(this.values, valuesOff);
-      final long[] mins = Arrays.copyOf(this.mins, valuesOff);
+      final PackedInts.Reader[] values = ArrayUtil.copyOfSubArray(this.values, 0, valuesOff);
+      final long[] mins = ArrayUtil.copyOfSubArray(this.mins, 0, valuesOff);
       final long ramBytesUsed = DeltaPackedLongValues.BASE_RAM_BYTES_USED
           + RamUsageEstimator.sizeOf(values) + RamUsageEstimator.sizeOf(mins);
       return new DeltaPackedLongValues(pageShift, pageMask, values, mins, size, ramBytesUsed);
@@ -94,7 +93,7 @@
     void grow(int newBlockCount) {
       super.grow(newBlockCount);
       ramBytesUsed -= RamUsageEstimator.sizeOf(mins);
-      mins = Arrays.copyOf(mins, newBlockCount);
+      mins = ArrayUtil.growExact(mins, newBlockCount);
       ramBytesUsed += RamUsageEstimator.sizeOf(mins);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicLongValues.java b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicLongValues.java
index 09b3ecd..89ad5ab 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicLongValues.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicLongValues.java
@@ -17,10 +17,9 @@
 package org.apache.lucene.util.packed;
 
 
-import java.util.Arrays;
-
 import static org.apache.lucene.util.packed.MonotonicBlockPackedReader.expected;
 
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.packed.PackedInts.Reader;
 
@@ -72,9 +71,9 @@
     public MonotonicLongValues build() {
       finish();
       pending = null;
-      final PackedInts.Reader[] values = Arrays.copyOf(this.values, valuesOff);
-      final long[] mins = Arrays.copyOf(this.mins, valuesOff);
-      final float[] averages = Arrays.copyOf(this.averages, valuesOff);
+      final PackedInts.Reader[] values = ArrayUtil.copyOfSubArray(this.values, 0, valuesOff);
+      final long[] mins = ArrayUtil.copyOfSubArray(this.mins, 0, valuesOff);
+      final float[] averages = ArrayUtil.copyOfSubArray(this.averages, 0, valuesOff);
       final long ramBytesUsed = MonotonicLongValues.BASE_RAM_BYTES_USED
           + RamUsageEstimator.sizeOf(values) + RamUsageEstimator.sizeOf(mins)
           + RamUsageEstimator.sizeOf(averages);
@@ -95,7 +94,7 @@
     void grow(int newBlockCount) {
       super.grow(newBlockCount);
       ramBytesUsed -= RamUsageEstimator.sizeOf(averages);
-      averages = Arrays.copyOf(averages, newBlockCount);
+      averages = ArrayUtil.growExact(averages, newBlockCount);
       ramBytesUsed += RamUsageEstimator.sizeOf(averages);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/PackedLongValues.java b/lucene/core/src/java/org/apache/lucene/util/packed/PackedLongValues.java
index 0daf062..19788b7 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/PackedLongValues.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/PackedLongValues.java
@@ -19,8 +19,6 @@
 
 import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
 
-import java.util.Arrays;
-
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LongValues;
@@ -194,7 +192,7 @@
     public PackedLongValues build() {
       finish();
       pending = null;
-      final PackedInts.Reader[] values = Arrays.copyOf(this.values, valuesOff);
+      final PackedInts.Reader[] values = ArrayUtil.copyOfSubArray(this.values, 0, valuesOff);
       final long ramBytesUsed = PackedLongValues.BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values);
       return new PackedLongValues(pageShift, pageMask, values, size, ramBytesUsed);
     }
@@ -273,7 +271,7 @@
 
     void grow(int newBlockCount) {
       ramBytesUsed -= RamUsageEstimator.shallowSizeOf(values);
-      values = Arrays.copyOf(values, newBlockCount);
+      values = ArrayUtil.growExact(values, newBlockCount);
       ramBytesUsed += RamUsageEstimator.shallowSizeOf(values);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCharacterUtils.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCharacterUtils.java
index 53b3f56..438e5e3 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCharacterUtils.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCharacterUtils.java
@@ -20,9 +20,9 @@
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.Arrays;
 
 import org.apache.lucene.analysis.CharacterUtils.CharacterBuffer;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
@@ -42,7 +42,7 @@
     final int codePointCount = CharacterUtils.toCodePoints(orig, o1, orig.length - o1, buf, o2);
     final int charCount = CharacterUtils.toChars(buf, o2, codePointCount, restored, o3);
     assertEquals(orig.length - o1, charCount);
-    assertArrayEquals(Arrays.copyOfRange(orig, o1, o1 + charCount), Arrays.copyOfRange(restored, o3, o3 + charCount));
+    assertArrayEquals(ArrayUtil.copyOfSubArray(orig, o1, o1 + charCount), ArrayUtil.copyOfSubArray(restored, o3, o3 + charCount));
   }
 
   @Test
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java b/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java
index 045b19a..62d06d8 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/compressing/AbstractTestCompressionMode.java
@@ -22,6 +22,7 @@
 
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -60,7 +61,7 @@
     ByteArrayDataOutput out = new ByteArrayDataOutput(compressed);
     compressor.compress(decompressed, off, len, out);
     final int compressedLen = out.getPosition();
-    return Arrays.copyOf(compressed, compressedLen);
+    return ArrayUtil.copyOfSubArray(compressed, 0, compressedLen);
   }
 
   byte[] decompress(byte[] compressed, int originalLength) throws IOException {
@@ -71,14 +72,14 @@
   static byte[] decompress(Decompressor decompressor, byte[] compressed, int originalLength) throws IOException {
     final BytesRef bytes = new BytesRef();
     decompressor.decompress(new ByteArrayDataInput(compressed), originalLength, 0, originalLength, bytes);
-    return Arrays.copyOfRange(bytes.bytes, bytes.offset, bytes.offset + bytes.length);
+    return BytesRef.deepCopyOf(bytes).bytes;
   }
 
   byte[] decompress(byte[] compressed, int originalLength, int offset, int length) throws IOException {
     Decompressor decompressor = mode.newDecompressor();
     final BytesRef bytes = new BytesRef();
     decompressor.decompress(new ByteArrayDataInput(compressed), originalLength, offset, length, bytes);
-    return Arrays.copyOfRange(bytes.bytes, bytes.offset, bytes.offset + bytes.length);
+    return BytesRef.deepCopyOf(bytes).bytes;
   }
 
   public void testDecompress() throws IOException {
@@ -89,7 +90,7 @@
       final int len = random().nextBoolean() ? decompressed.length - off : TestUtil.nextInt(random(), 0, decompressed.length - off);
       final byte[] compressed = compress(decompressed, off, len);
       final byte[] restored = decompress(compressed, len);
-      assertArrayEquals(Arrays.copyOfRange(decompressed, off, off+len), restored);
+      assertArrayEquals(ArrayUtil.copyOfSubArray(decompressed, off, off+len), restored);
     }
   }
 
@@ -106,7 +107,7 @@
         length = random().nextInt(decompressed.length - offset);
       }
       final byte[] restored = decompress(compressed, decompressed.length, offset, length);
-      assertArrayEquals(Arrays.copyOfRange(decompressed, offset, offset + length), restored);
+      assertArrayEquals(ArrayUtil.copyOfSubArray(decompressed, offset, offset + length), restored);
     }
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestForUtil.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestForUtil.java
index 3fe003e..e13645f 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestForUtil.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestForUtil.java
@@ -22,13 +22,13 @@
 import static org.apache.lucene.codecs.lucene50.ForUtil.MAX_ENCODED_SIZE;
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.packed.PackedInts;
 
@@ -39,7 +39,7 @@
   public void testEncodeDecode() throws IOException {
     final int iterations = RandomNumbers.randomIntBetween(random(), 1, 1000);
     final float acceptableOverheadRatio = random().nextFloat();
-    final int[] values = new int[(iterations - 1) * BLOCK_SIZE + ForUtil.MAX_DATA_SIZE];
+    final int[] values = new int[iterations * BLOCK_SIZE];
     for (int i = 0; i < iterations; ++i) {
       final int bpv = random().nextInt(32);
       if (bpv == 0) {
@@ -64,9 +64,9 @@
       final ForUtil forUtil = new ForUtil(acceptableOverheadRatio, out);
       
       for (int i = 0; i < iterations; ++i) {
-        forUtil.writeBlock(
-            Arrays.copyOfRange(values, i * BLOCK_SIZE, values.length),
-            new byte[MAX_ENCODED_SIZE], out);
+        // Although values after BLOCK_SIZE are garbage, we need to allocate extra bytes to avoid AIOOBE.
+        int[] block = ArrayUtil.grow(ArrayUtil.copyOfSubArray(values, i*BLOCK_SIZE, (i+1)*BLOCK_SIZE));
+        forUtil.writeBlock(ArrayUtil.grow(block, MAX_DATA_SIZE), new byte[MAX_ENCODED_SIZE], out);
       }
       endPointer = out.getFilePointer();
       out.close();
@@ -83,8 +83,8 @@
         }
         final int[] restored = new int[MAX_DATA_SIZE];
         forUtil.readBlock(in, new byte[MAX_ENCODED_SIZE], restored);
-        assertArrayEquals(Arrays.copyOfRange(values, i * BLOCK_SIZE, (i + 1) * BLOCK_SIZE),
-            Arrays.copyOf(restored, BLOCK_SIZE));
+        assertArrayEquals(ArrayUtil.copyOfSubArray(values, i*BLOCK_SIZE, (i+1)*BLOCK_SIZE),
+            ArrayUtil.copyOfSubArray(restored, 0, BLOCK_SIZE));
       }
       assertEquals(endPointer, in.getFilePointer());
       in.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 4da5059..6e2bd13 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -18,7 +18,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Map;
 import java.util.Random;
 
@@ -238,7 +237,7 @@
       docs = ArrayUtil.grow(docs, numDocs + 1);
       docs[numDocs + 1] = docID;
     }
-    return Arrays.copyOf(docs, numDocs);
+    return ArrayUtil.copyOfSubArray(docs, 0, numDocs);
   }
 
   public static class RangeMergePolicy extends MergePolicy {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
index a9e2891..66b895a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -19,7 +19,6 @@
 
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Random;
 
@@ -38,6 +37,7 @@
 import org.apache.lucene.search.similarities.ClassicSimilarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -227,7 +227,7 @@
 
     // adjust the expected doc numbers according to our filler docs
     if (0 < NUM_FILLER_DOCS) {
-      expDocNrs = Arrays.copyOf(expDocNrs, expDocNrs.length);
+      expDocNrs = ArrayUtil.copyOfSubArray(expDocNrs, 0, expDocNrs.length);
       for (int i=0; i < expDocNrs.length; i++) {
         expDocNrs[i] = PRE_FILLER_DOCS + ((NUM_FILLER_DOCS + 1) * expDocNrs[i]);
       }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java b/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java
index 5a5a3ae..49b0e11 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDoubleValuesSource.java
@@ -31,6 +31,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -122,7 +123,7 @@
     };
     Collections.shuffle(Arrays.asList(fields), random());
     int numSorts = TestUtil.nextInt(random(), 1, fields.length);
-    return new Sort(Arrays.copyOfRange(fields, 0, numSorts));
+    return new Sort(ArrayUtil.copyOfSubArray(fields, 0, numSorts));
   }
 
   // Take a Sort, and replace any field sorts with Sortables
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
index 8b20be5..4c77e7e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
@@ -27,6 +27,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -103,7 +104,7 @@
     };
     Collections.shuffle(Arrays.asList(fields), random());
     int numSorts = TestUtil.nextInt(random(), 1, fields.length);
-    return new Sort(Arrays.copyOfRange(fields, 0, numSorts));
+    return new Sort(ArrayUtil.copyOfSubArray(fields, 0, numSorts));
   }
 
   // Take a Sort, and replace any field sorts with Sortables
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
index eb31128..7bd235e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -45,6 +45,7 @@
 import org.apache.lucene.search.similarities.BM25Similarity;
 import org.apache.lucene.search.similarities.ClassicSimilarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.junit.AfterClass;
@@ -731,7 +732,7 @@
   public void testTopPhrases() throws IOException {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
-    String[] docs = Arrays.copyOf(DOCS, DOCS.length);
+    String[] docs = ArrayUtil.copyOfSubArray(DOCS, 0, DOCS.length);
     Collections.shuffle(Arrays.asList(docs), random());
     for (String value : DOCS) {
       Document doc = new Document();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanationsWithFillerDocs.java b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanationsWithFillerDocs.java
index 9f50668..7e8a7614 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanationsWithFillerDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanationsWithFillerDocs.java
@@ -16,12 +16,11 @@
  */
 package org.apache.lucene.search;
 
-import java.util.Arrays;
-
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 
@@ -101,7 +100,7 @@
   @Override
   public void qtest(Query q, int[] expDocNrs) throws Exception {
 
-    expDocNrs = Arrays.copyOf(expDocNrs, expDocNrs.length);
+    expDocNrs = ArrayUtil.copyOfSubArray(expDocNrs, 0, expDocNrs.length);
     for (int i=0; i < expDocNrs.length; i++) {
       expDocNrs[i] = PRE_FILLER_DOCS + ((NUM_FILLER_DOCS + 1) * expDocNrs[i]);
     }
diff --git a/lucene/core/src/test/org/apache/lucene/util/BaseSortTestCase.java b/lucene/core/src/test/org/apache/lucene/util/BaseSortTestCase.java
index 2db901b..5053223 100644
--- a/lucene/core/src/test/org/apache/lucene/util/BaseSortTestCase.java
+++ b/lucene/core/src/test/org/apache/lucene/util/BaseSortTestCase.java
@@ -48,7 +48,7 @@
 
   public void assertSorted(Entry[] original, Entry[] sorted) {
     assertEquals(original.length, sorted.length);
-    Entry[] actuallySorted = Arrays.copyOf(original, original.length);
+    Entry[] actuallySorted = ArrayUtil.copyOfSubArray(original, 0, original.length);
     Arrays.sort(actuallySorted);
     for (int i = 0; i < original.length; ++i) {
       assertEquals(actuallySorted[i].value, sorted[i].value);
@@ -64,7 +64,7 @@
     System.arraycopy(arr, 0, toSort, o, arr.length);
     final Sorter sorter = newSorter(toSort);
     sorter.sort(o, o + arr.length);
-    assertSorted(arr, Arrays.copyOfRange(toSort, o, o + arr.length));
+    assertSorted(arr, ArrayUtil.copyOfSubArray(toSort, o, o + arr.length));
   }
 
   enum Strategy {
diff --git a/lucene/core/src/test/org/apache/lucene/util/StressRamUsageEstimator.java b/lucene/core/src/test/org/apache/lucene/util/StressRamUsageEstimator.java
index 7a2712f..cb743f1 100644
--- a/lucene/core/src/test/org/apache/lucene/util/StressRamUsageEstimator.java
+++ b/lucene/core/src/test/org/apache/lucene/util/StressRamUsageEstimator.java
@@ -16,9 +16,6 @@
  */
 package org.apache.lucene.util;
 
-
-import java.util.Arrays;
-
 /**
  * Estimates how {@link RamUsageEstimator} estimates physical memory consumption
  * of Java objects. 
@@ -88,7 +85,7 @@
 
         // Make another batch of objects.
         Object[] seg =  new Object[10000];
-        all = Arrays.copyOf(all, all.length + 1);
+        all = ArrayUtil.growExact(all, all.length + 1);
         all[all.length - 1] = seg;
         for (int i = 0; i < seg.length; i++) {
           seg[i] = new byte[random().nextInt(7)];
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestArrayUtil.java b/lucene/core/src/test/org/apache/lucene/util/TestArrayUtil.java
index 0cda337..285df74 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestArrayUtil.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestArrayUtil.java
@@ -21,6 +21,9 @@
 import java.util.Comparator;
 import java.util.Random;
 
+import static org.apache.lucene.util.ArrayUtil.copyOfSubArray;
+import static org.apache.lucene.util.ArrayUtil.growExact;
+
 public class TestArrayUtil extends LuceneTestCase {
 
   // Ensure ArrayUtil.getNextSize gives linear amortized cost of realloc/copy
@@ -294,4 +297,88 @@
       }
     }
   }
+
+  public void testGrowExact() {
+    assertArrayEquals(new short[]{1, 2, 3, 0}, growExact(new short[]{1, 2, 3}, 4));
+    assertArrayEquals(new short[]{1, 2, 3, 0, 0}, growExact(new short[]{1, 2, 3}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new short[]{1, 2, 3}, random().nextInt(3)));
+
+    assertArrayEquals(new int[]{1, 2, 3, 0}, growExact(new int[]{1, 2, 3}, 4));
+    assertArrayEquals(new int[]{1, 2, 3, 0, 0}, growExact(new int[]{1, 2, 3}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new int[]{1, 2, 3}, random().nextInt(3)));
+
+    assertArrayEquals(new long[]{1, 2, 3, 0}, growExact(new long[]{1, 2, 3}, 4));
+    assertArrayEquals(new long[]{1, 2, 3, 0, 0}, growExact(new long[]{1, 2, 3}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new long[]{1, 2, 3}, random().nextInt(3)));
+
+    assertArrayEquals(new float[]{0.1f, 0.2f, 0.3f, 0}, growExact(new float[]{0.1f, 0.2f, 0.3f}, 4), 0.001f);
+    assertArrayEquals(new float[]{0.1f, 0.2f, 0.3f, 0, 0}, growExact(new float[]{0.1f, 0.2f, 0.3f}, 5), 0.001f);
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new float[]{1, 2, 3}, random().nextInt(3)));
+
+    assertArrayEquals(new double[]{0.1, 0.2, 0.3, 0.0}, growExact(new double[]{0.1, 0.2, 0.3}, 4), 0.001);
+    assertArrayEquals(new double[]{0.1, 0.2, 0.3, 0.0, 0.0}, growExact(new double[]{0.1, 0.2, 0.3}, 5), 0.001);
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new double[]{0.1, 0.2, 0.3}, random().nextInt(3)));
+
+    assertArrayEquals(new byte[]{1, 2, 3, 0}, growExact(new byte[]{1, 2, 3}, 4));
+    assertArrayEquals(new byte[]{1, 2, 3, 0, 0}, growExact(new byte[]{1, 2, 3}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new byte[]{1, 2, 3}, random().nextInt(3)));
+
+    assertArrayEquals(new char[]{'a', 'b', 'c', '\0'}, growExact(new char[]{'a', 'b', 'c'}, 4));
+    assertArrayEquals(new char[]{'a', 'b', 'c', '\0', '\0'}, growExact(new char[]{'a', 'b', 'c'}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new byte[]{'a', 'b', 'c'}, random().nextInt(3)));
+
+    assertArrayEquals(new String[]{"a1", "b2", "c3", null}, growExact(new String[]{"a1", "b2", "c3"}, 4));
+    assertArrayEquals(new String[]{"a1", "b2", "c3", null, null}, growExact(new String[]{"a1", "b2", "c3"}, 5));
+    expectThrows(IndexOutOfBoundsException.class, () -> growExact(new String[]{"a", "b", "c"}, random().nextInt(3)));
+  }
+
+  public void testCopyOfSubArray() {
+    short[] shortArray = {1, 2, 3};
+    assertArrayEquals(new short[]{1}, copyOfSubArray(shortArray, 0, 1));
+    assertArrayEquals(new short[]{1, 2, 3}, copyOfSubArray(shortArray, 0, 3));
+    assertEquals(0, copyOfSubArray(shortArray, 0, 0).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(shortArray, 0, 4 + random().nextInt(10)));
+
+    int[] intArray = {1, 2, 3};
+    assertArrayEquals(new int[]{1, 2}, copyOfSubArray(intArray, 0, 2));
+    assertArrayEquals(new int[]{1, 2, 3}, copyOfSubArray(intArray, 0, 3));
+    assertEquals(0, copyOfSubArray(intArray, 1, 1).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(intArray, 1, 4 + random().nextInt(10)));
+
+    long[] longArray = {1, 2, 3};
+    assertArrayEquals(new long[]{2}, copyOfSubArray(longArray, 1, 2));
+    assertArrayEquals(new long[]{1, 2, 3}, copyOfSubArray(longArray, 0, 3));
+    assertEquals(0, copyOfSubArray(longArray, 2, 2).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(longArray, 2, 4 + random().nextInt(10)));
+
+    float[] floatArray = {0.1f, 0.2f, 0.3f};
+    assertArrayEquals(new float[]{0.2f, 0.3f}, copyOfSubArray(floatArray, 1, 3), 0.001f);
+    assertArrayEquals(new float[]{0.1f, 0.2f, 0.3f}, copyOfSubArray(floatArray, 0, 3), 0.001f);
+    assertEquals(0, copyOfSubArray(floatArray, 0, 0).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(floatArray, 3, 4 + random().nextInt(10)));
+
+    double[] doubleArray = {0.1, 0.2, 0.3};
+    assertArrayEquals(new double[]{0.3}, copyOfSubArray(doubleArray, 2, 3), 0.001);
+    assertArrayEquals(new double[]{0.1, 0.2, 0.3}, copyOfSubArray(doubleArray, 0, 3), 0.001);
+    assertEquals(0, copyOfSubArray(doubleArray, 1, 1).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(doubleArray, 0, 4 + random().nextInt(10)));
+
+    byte[] byteArray = {1, 2, 3};
+    assertArrayEquals(new byte[]{1}, copyOfSubArray(byteArray, 0, 1));
+    assertArrayEquals(new byte[]{1, 2, 3}, copyOfSubArray(byteArray, 0, 3));
+    assertEquals(0, copyOfSubArray(byteArray, 1, 1).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(byteArray, 1, 4 + random().nextInt(10)));
+
+    char[] charArray = {'a', 'b', 'c'};
+    assertArrayEquals(new char[]{'a', 'b'}, copyOfSubArray(charArray, 0, 2));
+    assertArrayEquals(new char[]{'a', 'b', 'c'}, copyOfSubArray(charArray, 0, 3));
+    assertEquals(0, copyOfSubArray(charArray, 1, 1).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(charArray, 3, 4));
+
+    String[] objectArray = {"a1", "b2", "c3"};
+    assertArrayEquals(new String[]{"a1"}, copyOfSubArray(objectArray, 0, 1));
+    assertArrayEquals(new String[]{"a1", "b2", "c3"}, copyOfSubArray(objectArray, 0, 3));
+    assertEquals(0, copyOfSubArray(objectArray, 1, 1).length);
+    expectThrows(IndexOutOfBoundsException.class, () -> copyOfSubArray(objectArray, 2, 5));
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java
index 3a5bb53..2a869ad 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java
@@ -48,4 +48,12 @@
     // only for 4.x
     assertEquals("\uFFFF", new BytesRef("\uFFFF").utf8ToString());
   }
+
+  public void testInvalidDeepCopy() {
+    BytesRef from = new BytesRef(new byte[] { 1, 2 });
+    from.offset += 1; // now invalid
+    expectThrows(IndexOutOfBoundsException.class, () -> {
+      BytesRef.deepCopyOf(from);
+    });
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java
index 0a4c884..079b3b7 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java
@@ -125,4 +125,12 @@
       c.subSequence(2, 1);
     });
   }
+  
+  public void testInvalidDeepCopy() {
+    CharsRef from = new CharsRef(new char[] { 'a', 'b' }, 0, 2);
+    from.offset += 1; // now invalid
+    expectThrows(IndexOutOfBoundsException.class, () -> {
+      CharsRef.deepCopyOf(from);
+    });
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java
index b997659..654e77d 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestIntsRef.java
@@ -37,4 +37,12 @@
     
     assertFalse(i.equals(i2));
   }
+  
+  public void testInvalidDeepCopy() {
+    IntsRef from = new IntsRef(new int[] { 1, 2 }, 0, 2);
+    from.offset += 1; // now invalid
+    expectThrows(IndexOutOfBoundsException.class, () -> {
+      IntsRef.deepCopyOf(from);
+    });
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestLSBRadixSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestLSBRadixSorter.java
index ba8bd02..b7696c2 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestLSBRadixSorter.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestLSBRadixSorter.java
@@ -37,7 +37,7 @@
   }
 
   public void test(LSBRadixSorter sorter, int[] arr, int len) {
-    final int[] expected = Arrays.copyOf(arr, len);
+    final int[] expected = ArrayUtil.copyOfSubArray(arr, 0, len);
     Arrays.sort(expected);
 
     int numBits = 0;
@@ -50,7 +50,7 @@
     }
 
     sorter.sort(numBits, arr, len);
-    final int[] actual = Arrays.copyOf(arr, len);
+    final int[] actual = ArrayUtil.copyOfSubArray(arr, 0, len);
     assertArrayEquals(expected, actual);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java b/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java
new file mode 100644
index 0000000..ec4575f
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/util/TestLongsRef.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.util;
+
+public class TestLongsRef extends LuceneTestCase {
+  public void testEmpty() {
+    LongsRef i = new LongsRef();
+    assertEquals(LongsRef.EMPTY_LONGS, i.longs);
+    assertEquals(0, i.offset);
+    assertEquals(0, i.length);
+  }
+  
+  public void testFromLongs() {
+    long longs[] = new long[] { 1, 2, 3, 4 };
+    LongsRef i = new LongsRef(longs, 0, 4);
+    assertEquals(longs, i.longs);
+    assertEquals(0, i.offset);
+    assertEquals(4, i.length);
+    
+    LongsRef i2 = new LongsRef(longs, 1, 3);
+    assertEquals(new LongsRef(new long[] { 2, 3, 4 }, 0, 3), i2);
+    
+    assertFalse(i.equals(i2));
+  }
+  
+  public void testInvalidDeepCopy() {
+    LongsRef from = new LongsRef(new long[] { 1, 2 }, 0, 2);
+    from.offset += 1; // now invalid
+    expectThrows(IndexOutOfBoundsException.class, () -> {
+      LongsRef.deepCopyOf(from);
+    });
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestMSBRadixSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestMSBRadixSorter.java
index 52eb494..efd1f03 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestMSBRadixSorter.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestMSBRadixSorter.java
@@ -23,7 +23,7 @@
 public class TestMSBRadixSorter extends LuceneTestCase {
 
   private void test(BytesRef[] refs, int len) {
-    BytesRef[] expected = Arrays.copyOf(refs, len);
+    BytesRef[] expected = ArrayUtil.copyOfSubArray(refs, 0, len);
     Arrays.sort(expected);
 
     int maxLength = 0;
@@ -63,7 +63,7 @@
         refs[j] = tmp;
       }
     }.sort(0, len);
-    BytesRef[] actual = Arrays.copyOf(refs, len);
+    BytesRef[] actual = ArrayUtil.copyOfSubArray(refs, 0, len);
     assertArrayEquals(expected, actual);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestStringMSBRadixSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestStringMSBRadixSorter.java
index c83ff67..c4ee68b 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestStringMSBRadixSorter.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestStringMSBRadixSorter.java
@@ -21,7 +21,7 @@
 public class TestStringMSBRadixSorter extends LuceneTestCase {
 
   private void test(BytesRef[] refs, int len) {
-    BytesRef[] expected = Arrays.copyOf(refs, len);
+    BytesRef[] expected = ArrayUtil.copyOfSubArray(refs, 0, len);
     Arrays.sort(expected);
 
     new StringMSBRadixSorter() {
@@ -38,7 +38,7 @@
         refs[j] = tmp;
       }
     }.sort(0, len);
-    BytesRef[] actual = Arrays.copyOf(refs, len);
+    BytesRef[] actual = ArrayUtil.copyOfSubArray(refs, 0, len);
     assertArrayEquals(expected, actual);
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
index a675e0b..69c1b3f 100644
--- a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
+++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
@@ -34,6 +34,7 @@
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.LongValues;
 import org.apache.lucene.util.LongsRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -903,8 +904,8 @@
         // 3. re-encode
         final long[] blocks2 = new long[blocksOffset2 + blocksLen];
         encoder.encode(values, valuesOffset, blocks2, blocksOffset2, longIterations);
-        assertArrayEquals(msg, Arrays.copyOfRange(blocks, blocksOffset, blocks.length),
-            Arrays.copyOfRange(blocks2, blocksOffset2, blocks2.length));
+        assertArrayEquals(msg, ArrayUtil.copyOfSubArray(blocks, blocksOffset, blocks.length),
+            ArrayUtil.copyOfSubArray(blocks2, blocksOffset2, blocks2.length));
         // test encoding from int[]
         if (bpv <= 32) {
           final long[] blocks3 = new long[blocks2.length];
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
index cb24f6f..fe5317e 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSorts.java
@@ -40,6 +40,7 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -109,7 +110,7 @@
       };
       Collections.shuffle(Arrays.asList(fields), random());
       int numSorts = TestUtil.nextInt(random(), 1, fields.length);
-      assertQuery(query, new Sort(Arrays.copyOfRange(fields, 0, numSorts)));
+      assertQuery(query, new Sort(ArrayUtil.copyOfSubArray(fields, 0, numSorts)));
     }
   }
 
diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties
index 981bbe6..49a1190 100644
--- a/lucene/ivy-versions.properties
+++ b/lucene/ivy-versions.properties
@@ -60,7 +60,7 @@
 /dom4j/dom4j = 1.6.1
 /info.ganglia.gmetric4j/gmetric4j = 1.0.7
 
-io.dropwizard.metrics.version = 3.2.2
+io.dropwizard.metrics.version = 3.2.6
 /io.dropwizard.metrics/metrics-core = ${io.dropwizard.metrics.version}
 /io.dropwizard.metrics/metrics-ganglia = ${io.dropwizard.metrics.version}
 /io.dropwizard.metrics/metrics-graphite = ${io.dropwizard.metrics.version}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalQuery.java
index 306c059..c1125c2 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalQuery.java
@@ -18,7 +18,6 @@
 package org.apache.lucene.search.intervals;
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Objects;
 import java.util.Set;
@@ -36,6 +35,7 @@
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.ArrayUtil;
 
 /**
  * A query that retrieves documents containing intervals returned from an
@@ -89,7 +89,7 @@
       return null;
     }
     CollectionStatistics collectionStats = searcher.collectionStatistics(field);
-    return searcher.getSimilarity().scorer(boost, collectionStats, Arrays.copyOf(termStats, termUpTo));
+    return searcher.getSimilarity().scorer(boost, collectionStats, ArrayUtil.copyOfSubArray(termStats, 0, termUpTo));
   }
 
   @Override
diff --git a/lucene/sandbox/src/test/org/apache/lucene/document/TestHalfFloatPoint.java b/lucene/sandbox/src/test/org/apache/lucene/document/TestHalfFloatPoint.java
index 0bcb3f8..9f70808 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/document/TestHalfFloatPoint.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/document/TestHalfFloatPoint.java
@@ -89,7 +89,7 @@
         values[o++] = v;
       }
     }
-    values = Arrays.copyOf(values, o);
+    values = ArrayUtil.copyOfSubArray(values, 0, o);
 
     int iters = atLeast(1000000);
     for (int iter = 0; iter < iters; ++iter) {
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
index 3242e7e..9bc947f 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java
@@ -19,7 +19,6 @@
 import java.io.PrintStream;
 import java.text.NumberFormat;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Locale;
@@ -245,7 +244,8 @@
 
     protected BytesRef concat(BytesRef source, byte b) {
       //+2 for new char + potential leaf
-      final byte[] buffer = Arrays.copyOfRange(source.bytes, source.offset, source.offset + source.length + 2);
+      final byte[] buffer = new byte[source.length + 2];
+      System.arraycopy(source.bytes, source.offset, buffer, 0, source.length);
       BytesRef target = new BytesRef(buffer);
       target.length = source.length;
       target.bytes[target.length++] = b;
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
index a6147df..ccbd6df 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
@@ -311,7 +311,7 @@
    */
   private class Geo3dPolygonBuilder extends Geo3dPointBuilder<PolygonBuilder> implements PolygonBuilder {
 
-    List<GeoPolygon> polyHoles;
+    List<GeoPolygonFactory.PolygonDescription> polyHoles = new ArrayList<>();
 
     @Override
     public HoleBuilder hole() {
@@ -321,10 +321,7 @@
     class Geo3dHoleBuilder extends Geo3dPointBuilder<PolygonBuilder.HoleBuilder> implements PolygonBuilder.HoleBuilder {
       @Override
       public PolygonBuilder endHole() {
-        if (polyHoles == null) {
-          polyHoles = new ArrayList<>();
-        }
-        polyHoles.add(GeoPolygonFactory.makeGeoPolygon(planetModel, points));
+        polyHoles.add(new GeoPolygonFactory.PolygonDescription(points));
         return Geo3dPolygonBuilder.this;
       }
     }
@@ -332,7 +329,8 @@
     @SuppressWarnings("unchecked")
     @Override
     public Shape build() {
-      GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, points, polyHoles);
+      GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points, polyHoles);
+      GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, description);
       return new Geo3dShape<>(polygon, context);
     }
 
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
index 3e3b2e2..989252e 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
@@ -18,7 +18,6 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
@@ -31,6 +30,7 @@
 import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
 import org.apache.lucene.spatial.serialized.SerializedDVStrategy;
 import org.apache.lucene.spatial.vector.PointVectorStrategy;
+import org.apache.lucene.util.ArrayUtil;
 import org.junit.Test;
 import org.locationtech.spatial4j.context.SpatialContext;
 import org.locationtech.spatial4j.shape.Point;
@@ -107,7 +107,7 @@
 
   void checkDistValueSource(Point pt, float... distances) throws IOException {
     float multiplier = random().nextFloat() * 100f;
-    float[] dists2 = Arrays.copyOf(distances, distances.length);
+    float[] dists2 = ArrayUtil.copyOfSubArray(distances, 0, distances.length);
     for (int i = 0; i < dists2.length; i++) {
       dists2[i] *= multiplier;
     }
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
index d3b144f..eb6ed5b 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
@@ -18,9 +18,11 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import org.apache.lucene.spatial.SpatialTestData;
 import org.apache.lucene.spatial.composite.CompositeSpatialStrategy;
 import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase;
 import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
@@ -96,7 +98,7 @@
     points.add(new GeoPoint(planetModel, 14 * DEGREES_TO_RADIANS, -180 * DEGREES_TO_RADIANS));
     points.add(new GeoPoint(planetModel, -15 * DEGREES_TO_RADIANS, 153 * DEGREES_TO_RADIANS));
 
-    final Shape triangle = new Geo3dShape(GeoPolygonFactory.makeGeoPolygon(planetModel, points),ctx);
+    final Shape triangle = new Geo3dShape<>(GeoPolygonFactory.makeGeoPolygon(planetModel, points),ctx);
     final Rectangle rect = ctx.makeRectangle(-49, -45, 73, 86);
     testOperation(rect,SpatialOperation.Intersects,triangle, false);
   }
@@ -116,7 +118,7 @@
         new GeoPoint(planetModel, 54.0 * DEGREES_TO_RADIANS, 165.0 * DEGREES_TO_RADIANS),
         new GeoPoint(planetModel, -90.0 * DEGREES_TO_RADIANS, 0.0)};
     final GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 29 * DEGREES_TO_RADIANS, pathPoints);
-    final Shape shape = new Geo3dShape(path,ctx);
+    final Shape shape = new Geo3dShape<>(path,ctx);
     final Rectangle rect = ctx.makeRectangle(131, 143, 39, 54);
     testOperation(rect,SpatialOperation.Intersects,shape,true);
   }
@@ -146,6 +148,23 @@
     return new Geo3dShape<>(areaShape, ctx);
   }
 
+  @Test
+  public void testOperationsFromFile() throws IOException {
+    setupStrategy();
+    final Iterator<SpatialTestData> indexedSpatialData = getSampleData( "states-poly.txt");
+    final List<Shape> indexedShapes = new ArrayList<>();
+    while(indexedSpatialData.hasNext()) {
+      indexedShapes.add(indexedSpatialData.next().shape);
+    }
+    final Iterator<SpatialTestData> querySpatialData = getSampleData( "states-bbox.txt");
+    final List<Shape> queryShapes = new ArrayList<>();
+    while(querySpatialData.hasNext()) {
+      queryShapes.add(querySpatialData.next().shape);
+      queryShapes.add(randomQueryShape());
+    }
+    testOperation(SpatialOperation.Intersects, indexedShapes, queryShapes, random().nextBoolean());
+  }
+
   //TODO move to a new test class?
   @Test
   public void testWKT() throws Exception {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java
index 2c5dcd8..c25b44d 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestContextQuery.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.search.suggest.document;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
@@ -521,7 +520,7 @@
           query.addContext(contexts.get(i), i + 1);
         }
         TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false);
-        assertSuggestions(suggest, Arrays.copyOfRange(expectedResults, 0, 4));
+        assertSuggestions(suggest, ArrayUtil.copyOfSubArray(expectedResults, 0, 4));
       }
     }
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index 60e2cca..82d8adb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -559,7 +559,7 @@
       for (int j = 0; j < data[docId].length; ++j) {
         final byte[] arr = data[docId][j];
         final BytesRef arr2Ref = doc.getBinaryValue("bytes" + j);
-        final byte[] arr2 = Arrays.copyOfRange(arr2Ref.bytes, arr2Ref.offset, arr2Ref.offset + arr2Ref.length);
+        final byte[] arr2 = BytesRef.deepCopyOf(arr2Ref).bytes;
         assertArrayEquals(arr, arr2);
       }
     }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/BlockScoreQueryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/search/BlockScoreQueryWrapper.java
index 3b9a740..b15fa28 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/BlockScoreQueryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/BlockScoreQueryWrapper.java
@@ -97,10 +97,8 @@
         DocIdSetIterator it = inScorer.iterator();
         int i = 1;
         for (int doc = it.nextDoc(); ; doc = it.nextDoc()) {
-          if (i == tmpDocs.length) {
-            tmpDocs = ArrayUtil.grow(tmpDocs);
-            tmpScores = Arrays.copyOf(tmpScores, tmpDocs.length);
-          }
+          tmpDocs = ArrayUtil.grow(tmpDocs, i + 1);
+          tmpScores = ArrayUtil.grow(tmpScores, i + 1);
           tmpDocs[i] = doc;
           if (doc == DocIdSetIterator.NO_MORE_DOCS) {
             i++;
@@ -109,8 +107,8 @@
           tmpScores[i] = inScorer.score();
           i++;
         }
-        final int[] docs = Arrays.copyOf(tmpDocs, i);
-        final float[] scores = Arrays.copyOf(tmpScores, i);
+        final int[] docs = ArrayUtil.copyOfSubArray(tmpDocs, 0, i);
+        final float[] scores = ArrayUtil.copyOfSubArray(tmpScores, 0, i);
 
         return new Scorer(inWeight) {
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java
index 8ef4feb..e3f26e4 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.util.automaton;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -253,8 +252,7 @@
         codePoints[codepointCount++] = getRandomCodePoint(r, t.min, t.max);
         s = t.dest;
       }
-
-      return Arrays.copyOf(codePoints, codepointCount);
+      return ArrayUtil.copyOfSubArray(codePoints, 0, codepointCount);
     }
   }
 
diff --git a/lucene/tools/forbiddenApis/lucene.txt b/lucene/tools/forbiddenApis/lucene.txt
index e02bd40..2a1d883 100644
--- a/lucene/tools/forbiddenApis/lucene.txt
+++ b/lucene/tools/forbiddenApis/lucene.txt
@@ -27,3 +27,27 @@
 @defaultMessage Use home-grown methods instead
 java.lang.Math#toRadians(double)
 java.lang.Math#toDegrees(double)
+
+@defaultMessage Prefer using ArrayUtil as Arrays#copyOfRange fills zeros for bad bounds
+java.util.Arrays#copyOfRange(byte[],int,int)
+java.util.Arrays#copyOfRange(char[],int,int)
+java.util.Arrays#copyOfRange(short[],int,int)
+java.util.Arrays#copyOfRange(int[],int,int)
+java.util.Arrays#copyOfRange(long[],int,int)
+java.util.Arrays#copyOfRange(float[],int,int)
+java.util.Arrays#copyOfRange(double[],int,int)
+java.util.Arrays#copyOfRange(boolean[],int,int)
+java.util.Arrays#copyOfRange(java.lang.Object[],int,int)
+java.util.Arrays#copyOfRange(java.lang.Object[],int,int,java.lang.Class)
+
+@defaultMessage Prefer using ArrayUtil as Arrays#copyOf fills zeros for bad bounds
+java.util.Arrays#copyOf(byte[],int)
+java.util.Arrays#copyOf(char[],int)
+java.util.Arrays#copyOf(short[],int)
+java.util.Arrays#copyOf(int[],int)
+java.util.Arrays#copyOf(long[],int)
+java.util.Arrays#copyOf(float[],int)
+java.util.Arrays#copyOf(double[],int)
+java.util.Arrays#copyOf(boolean[],int)
+java.util.Arrays#copyOf(java.lang.Object[],int)
+java.util.Arrays#copyOf(java.lang.Object[],int,java.lang.Class)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6dd4889..3838213 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -45,6 +45,23 @@
   MemoryDocValues). If you used postingsFormat="Memory" or docValuesFormat="Memory"
   switch to "Direct" instead. (Dawid Weiss)
 
+==================  7.5.0 ==================
+
+Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
+
+Versions of Major Components
+---------------------
+Apache Tika 1.17
+Carrot2 3.16.0
+Velocity 1.7 and Velocity Tools 2.0
+Apache UIMA 2.3.1
+Apache ZooKeeper 3.4.11
+Jetty 9.4.10.v20180503
+
+
+(No Changes)
+
+
 ==================  7.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@@ -78,7 +95,7 @@
 
 * SOLR-12396: Upgrade Carrot2 to 3.16.0, HPPC to 0.8.1, morfologik to 2.1.5. (Dawid Weiss)
 
-* SOLR-11200: A new CMS config option 'ioThrottle' to manually enable/disable 
+* SOLR-11200: A new CMS config option 'ioThrottle' to manually enable/disable
   ConcurrentMergeSchedule.doAutoIOThrottle. (Amrit Sarkar, Nawab Zada Asad iqbal via Dawid Weiss)
 
 * SOLR-11670: Implement a periodic house-keeping task. This uses a scheduled autoscaling trigger and
@@ -92,7 +109,7 @@
 
 * SOLR-12139: The "eq" (equals) function query now works with string fields, string literals, and perhaps anything.
   (Andrey Kudryavtsev, David Smiley)
-  
+
 * SOLR-10783: Add support for Hadoop Credential Provider as SSL/TLS store password source.
   (Mano Kovacs via Mark Miller)
 
@@ -126,15 +143,15 @@
 
 * SOLR-12378: Support missing versionField on indexed docs in DocBasedVersionConstraintsURP.
   (Oliver Bates, Michael Braun via Mark Miller)
-  
+
 * SOLR-12388: Enable a strict ZooKeeper-connected search request mode, in which search
-  requests will fail when the coordinating node can't communicate with ZooKeeper, 
+  requests will fail when the coordinating node can't communicate with ZooKeeper,
   by setting the "shards.tolerant" param to "requireZkConnected".  (Steve Rowe)
 
 * SOLR-9685: #Tagging queries in JSON Query DSL, equivalent to LocalParams based query/filter
   tagging.  Multiple tags are comma separated.
   LocalParams Example     : {!tag=colorfilt}color:blue
-  Equivalent JSON Example : { "#colorfilt" : "color:blue" } 
+  Equivalent JSON Example : { "#colorfilt" : "color:blue" }
   (Dmitry Tikhonov, Mikhail Khludnev, yonik)
 
 * SOLR-12328: JSON Facet API: Domain change with graph query.
@@ -145,7 +162,7 @@
 
 * SOLR-12401: Add getValue() and setValue() Stream Evaluators (Joel Bernstein, janhoy)
 
-* SOLR-11779: Basic long-term collection of aggregated metrics. Historical data is
+* SOLR-11779, SOLR-12438: Basic long-term collection of aggregated metrics. Historical data is
   maintained as multi-resolution time series using round-robin databases in the '.system'
   collection. New /admin/metrics/history API allows retrieval of this data in numeric
   or graph formats. (ab)
@@ -157,11 +174,25 @@
 * SOLR-12376: Added the TaggerRequestHandler (AKA SolrTextTagger) for tagging text.  It's used as a component of
   NER/ERD systems including query-understanding.  See the ref guide for more info.  (David Smiley)
 
+* SOLR-12266: Add discrete Fourier transform Stream Evaluators (Joel Bernstein)
+
+* SOLR-12158: Allow the monteCarlo Stream Evaluator to support variables (Joel Bernstein)
+
+* SOLR-11734: Add ones and zeros Stream Evaluators (Joel Bernstein)
+
+* SOLR-12273: Create Stream Evaluators for distance measures (Joel Bernstein)
+
+* SOLR-12159: Add memset Stream Evaluator (Joel Bernstein)
+
+* SOLR-12221: Add valueAt Stream Evaluator (Joel Bernstein)
+
+* SOLR-12175: Add random field type and dynamic field to the default managed-schema (Joel Bernstein)
+
 Bug Fixes
 ----------------------
 
 * SOLR-5351: Fixed More Like This Handler to use all fields provided in mlt.fl when used with
-  content stream. The similarity is calculated between the content stream's value and all 
+  content stream. The similarity is calculated between the content stream's value and all
   fields listed in mlt.fl. (Dawid Weiss)
 
 * SOLR-12103: Raise CryptoKeys.DEFAULT_KEYPAIR_LENGTH from 1024 to 2048. (Mark Miller)
@@ -188,10 +219,10 @@
 
 * SOLR-12172: Fixed race condition that could cause an invalid set of collection properties to be kept in
   memory when multiple collection property changes are done in a short period of time. (Tomás Fernández Löbbe)
-  
-* SOLR-11929: UpdateLog metrics are not initialized on core reload.  (ab, Steve Rowe) 
 
-* SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure: 
+* SOLR-11929: UpdateLog metrics are not initialized on core reload.  (ab, Steve Rowe)
+
+* SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure:
   Server refused connection at: http://127.0.0.1:TEST_PORT/solr  (Mikhail Khludnev, Dawid Weiss, Steve Rowe)
 
 * SOLR-12096: Fixed inconsistent results format of subquery transformer for distributed search (multi-shard).
@@ -240,8 +271,8 @@
 
 * SOLR-12275: wrong caching for {!filters} as well as for `filters` local param in {!parent} and {!child}
   (David Smiley, Mikhail Khluldnev)
-  
-* SOLR-12284: WordBreakSolrSpellchecker will no longer add parenthesis in collations when breaking words in 
+
+* SOLR-12284: WordBreakSolrSpellchecker will no longer add parenthesis in collations when breaking words in
   non-boolean queries. (James Dyer)
 
 * SOLR-12290: Do not close any servlet streams and improve our servlet stream closing prevention code for users
@@ -264,9 +295,9 @@
   about "Invalid Date String". (yonik)
 
 * SOLR-12307: exiting OverseerTriggerThread without endless noise in log when Zookeeper session is expired
-  (Mikhail Khludnev) 
+  (Mikhail Khludnev)
 
-* SOLR-12200: abandon OverseerExitThread when ZkController is closed. (Mikhail Khludnev) 
+* SOLR-12200: abandon OverseerExitThread when ZkController is closed. (Mikhail Khludnev)
 
 * SOLR-12355: Fixes hash conflict in HashJoinStream and OuterHashJoinStream (Dennis Gove)
 
@@ -290,11 +321,13 @@
 
 * SOLR-12271: Fixed bug in how Analytics component reads negative values from float and double fields. (Houston Putman)
 
-* SOLR-12433: Recovering flag of a replica is set equals to leader even it failed to receive update 
+* SOLR-12433: Recovering flag of a replica is set equals to leader even it failed to receive update
   on recovering. (Cao Manh Dat)
 
 * SOLR-12354: Register the /admin/info/key end-point at the startup time to avoid 404 (noble)
 
+* SOLR-12445: Upgrade Dropwizard Metrics to version 3.2.6. (ab)
+
 Optimizations
 ----------------------
 
@@ -336,9 +369,13 @@
   references.  This is consistent with other plugin registries and allows a SolrCore to load faster.
   (Jeff Miller, David Smiley)
 
+* SOLR-12198: Stream Evaluators should not copy matrices needlessly (Joel Bernstein)
+
 Other Changes
 ----------------------
 
+* SOLR-12018: Remove comments.apache.org integration for the Ref Guide; the comments system has been down since December 2017 and there is no concrete plan to bring it back. (Cassandra Targett)
+
 * SOLR-12076: Remove unnecessary printLayout usage in CDCR tests (Varun Thacker)
 
 * SOLR-12086: Fix format problem in FastLRUCache description string shown on Cache Statistics page.
@@ -398,6 +435,8 @@
 * SOLR-12374: Added SolrCore.withSearcher(lambda) to make grabbing the searcher easier than the more awkward
   RefCounted API.  (David Smiley)
 
+* SOLR-12183: Refactor Streaming Expression test cases (Joel Bernstein)
+
 ==================  7.3.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@@ -418,7 +457,7 @@
 
 * SOLR-12256: Fixed some eventual-consistency issues with collection aliases by using ZooKeeper.sync(). (David Smiley)
 
-* SOLR-12087: Deleting replicas sometimes fails and causes the replicas to exist in the down 
+* SOLR-12087: Deleting replicas sometimes fails and causes the replicas to exist in the down
   state (Cao Manh Dat)
 
 * SOLR-12146: LIR should skip deleted replicas (Cao Manh Dat)
@@ -434,7 +473,7 @@
 * SOLR-12202: Fix errors in solr-exporter.cmd. (Minoru Osuka via koji)
 
 * SOLR-12316: Do not allow to use absolute URIs for including other files in solrconfig.xml
-  and schema parsing (CVE-2018-8010).  (Ananthesh, Ishan Chattopadhyaya, Uwe Schindler)  
+  and schema parsing (CVE-2018-8010).  (Ananthesh, Ishan Chattopadhyaya, Uwe Schindler)
 
 ==================  7.3.0 ==================
 
@@ -598,7 +637,7 @@
 
 * SOLR-12077: Add support for autoAddReplicas in the collection creation dialog in Admin UI. (shalin)
 
-* SOLR-9510: introducing {!filters param=$fq excludeTags=f} query parser. 
+* SOLR-9510: introducing {!filters param=$fq excludeTags=f} query parser.
   Introducing {!.. filters=$fq excludeTags=t,q} in {!parent} and {!child} (Dr. Oleg Savrasov via Mikhail Khludnev)
 
 Bug Fixes
diff --git a/solr/core/src/java/org/apache/solr/api/ApiBag.java b/solr/core/src/java/org/apache/solr/api/ApiBag.java
index 0c0d54b..8a3f972 100644
--- a/solr/core/src/java/org/apache/solr/api/ApiBag.java
+++ b/solr/core/src/java/org/apache/solr/api/ApiBag.java
@@ -35,7 +35,9 @@
 import org.apache.solr.common.SpecProvider;
 import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.JsonSchemaValidator;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.PathTrie;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.common.util.ValidatingJsonMap;
 import org.apache.solr.core.PluginBag;
@@ -45,8 +47,6 @@
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.security.AuthorizationContext;
 import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.common.util.JsonSchemaValidator;
-import org.apache.solr.common.util.PathTrie;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -288,7 +288,7 @@
     try {
       parsedCommands = CommandOperation.readCommands(Collections.singleton(stream), new NamedList());
     } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to parse commands");
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to parse commands",e);
     }
 
     if (validators == null || !validate) {    // no validation possible because we do not have a spec
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
index 90eb0d1..81cf374 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
@@ -20,7 +20,10 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.file.Path;
+import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
 
 import com.google.common.base.Strings;
 import org.apache.commons.io.output.ByteArrayOutputStream;
@@ -30,12 +33,15 @@
 import org.apache.solr.client.solrj.StreamingResponseCallback;
 import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
 import org.apache.solr.client.solrj.impl.BinaryRequestWriter.BAOS;
+import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.ContentStream;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.apache.solr.common.util.JavaBinCodec;
 import org.apache.solr.common.util.NamedList;
@@ -130,19 +136,8 @@
     SolrRequestHandler handler = coreContainer.getRequestHandler(path);
     if (handler != null) {
       try {
-        SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), Collections.singleton(new ContentStreamBase() {
-          @Override
-          public InputStream getStream() throws IOException {
-            BAOS baos = new BAOS();
-            new BinaryRequestWriter().write(request, baos);
-            return new ByteArrayInputStream(baos.getbuf());
-          }
-          @Override
-          public String getContentType() {
-            return CommonParams.JAVABIN_MIME;
-
-          }
-        }));
+        SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request));
+        req.getContext().put("httpMethod", request.getMethod().name());
         req.getContext().put(PATH, path);
         SolrQueryResponse resp = new SolrQueryResponse();
         handler.handleRequest(req, resp);
@@ -187,7 +182,7 @@
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + path);
       }
 
-      req = _parser.buildRequestFrom(core, params, request.getContentStreams());
+      req = _parser.buildRequestFrom(core, params, getContentStreams(request));
       req.getContext().put(PATH, path);
       req.getContext().put("httpMethod", request.getMethod().name());
       SolrQueryResponse rsp = new SolrQueryResponse();
@@ -242,6 +237,37 @@
     }
   }
 
+  private Set<ContentStream> getContentStreams(SolrRequest request) throws IOException {
+    if (request.getMethod() == SolrRequest.METHOD.GET) return null;
+    if (request instanceof ContentStreamUpdateRequest) {
+      ContentStreamUpdateRequest csur = (ContentStreamUpdateRequest) request;
+      Collection<ContentStream> cs = csur.getContentStreams();
+      if (cs != null) return new HashSet<>(cs);
+    }
+    RequestWriter.ContentWriter contentWriter = request.getContentWriter(CommonParams.JAVABIN_MIME);
+    final String cType = contentWriter == null ? CommonParams.JAVABIN_MIME : contentWriter.getContentType();
+
+    return Collections.singleton(new ContentStreamBase() {
+
+      @Override
+      public InputStream getStream() throws IOException {
+        BAOS baos = new BAOS();
+        if (contentWriter != null) {
+          contentWriter.write(baos);
+        } else {
+          new BinaryRequestWriter().write(request, baos);
+        }
+        return new ByteArrayInputStream(baos.toByteArray());
+      }
+
+      @Override
+      public String getContentType() {
+        return cType;
+
+      }
+    });
+  }
+
   private JavaBinCodec createJavaBinCodec(final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) {
     return new JavaBinCodec(resolver) {
 
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index d546dd2..37a660f 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -26,6 +26,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -43,6 +44,9 @@
 import org.apache.http.config.Lookup;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.store.Directory;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
@@ -59,6 +63,7 @@
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.IOUtils;
 import org.apache.solr.common.util.Utils;
@@ -570,21 +575,7 @@
     containerHandlers.put(METRICS_PATH, metricsHandler);
     metricsHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_PATH);
 
-    if (isZooKeeperAware()) {
-      PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
-      Map<String, Object> initArgs;
-      if (plugin != null && plugin.initArgs != null) {
-        initArgs = plugin.initArgs.asMap(5);
-        initArgs.put(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
-      } else {
-        initArgs = Collections.emptyMap();
-      }
-      metricsHistoryHandler = new MetricsHistoryHandler(getZkController().getNodeName(), metricsHandler,
-          new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
-      .withHttpClient(updateShardHandler.getDefaultHttpClient()).build(), getZkController().getSolrCloudManager(), initArgs);
-      containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
-      metricsHistoryHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_HISTORY_PATH);
-    }
+    createMetricsHistoryHandler();
 
     autoscalingHistoryHandler = createHandler(AUTOSCALING_HISTORY_PATH, AutoscalingHistoryHandler.class.getName(), AutoscalingHistoryHandler.class);
     metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
@@ -748,6 +739,49 @@
     status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
   }
 
+  // MetricsHistoryHandler supports both cloud and standalone configs
+  private void createMetricsHistoryHandler() {
+    PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
+    Map<String, Object> initArgs;
+    if (plugin != null && plugin.initArgs != null) {
+      initArgs = plugin.initArgs.asMap(5);
+      initArgs.put(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
+    } else {
+      initArgs = new HashMap<>();
+    }
+    String name;
+    SolrCloudManager cloudManager;
+    SolrClient client;
+    if (isZooKeeperAware()) {
+      name = getZkController().getNodeName();
+      cloudManager = getZkController().getSolrCloudManager();
+      client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
+          .withHttpClient(updateShardHandler.getDefaultHttpClient()).build();
+    } else {
+      name = getNodeConfig().getNodeName();
+      if (name == null || name.isEmpty()) {
+        name = "localhost";
+      }
+      cloudManager = null;
+      client = new EmbeddedSolrServer(this, CollectionAdminParams.SYSTEM_COLL) {
+        @Override
+        public void close() throws IOException {
+          // do nothing - we close the container ourselves
+        }
+      };
+      // enable local metrics unless specifically set otherwise
+      if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_NODES_PROP)) {
+        initArgs.put(MetricsHistoryHandler.ENABLE_NODES_PROP, true);
+      }
+      if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_REPLICAS_PROP)) {
+        initArgs.put(MetricsHistoryHandler.ENABLE_REPLICAS_PROP, true);
+      }
+    }
+    metricsHistoryHandler = new MetricsHistoryHandler(name, metricsHandler,
+        client, cloudManager, initArgs);
+    containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
+    metricsHistoryHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_HISTORY_PATH);
+  }
 
   public void securityNodeChanged() {
     log.info("Security node changed, reloading security.json");
@@ -792,6 +826,12 @@
 
     ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
     replayUpdatesExecutor.shutdownAndAwaitTermination();
+
+    if (metricsHistoryHandler != null) {
+      IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
+      metricsHistoryHandler.close();
+    }
+
     if (metricManager != null) {
       metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node));
       metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm));
@@ -810,10 +850,6 @@
       } catch (Exception e) {
         log.warn("Error removing live node. Continuing to close CoreContainer", e);
       }
-      if (metricsHistoryHandler != null) {
-        IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
-        metricsHistoryHandler.close();
-      }
       if (metricManager != null) {
         metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster));
       }
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
index 3d48680..03b545f 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
@@ -19,10 +19,13 @@
 import javax.imageio.ImageIO;
 import java.awt.Color;
 import java.awt.image.BufferedImage;
+import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.Closeable;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.net.MalformedURLException;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -45,17 +48,21 @@
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.DoubleAdder;
 import java.util.function.Function;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.solr.api.Api;
 import org.apache.solr.api.ApiBag;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggestion;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
@@ -68,9 +75,12 @@
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.Base64;
+import org.apache.solr.common.util.JavaBinCodec;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
 import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.metrics.rrd.SolrRrdBackendFactory;
@@ -105,30 +115,32 @@
 public class MetricsHistoryHandler extends RequestHandlerBase implements PermissionNameProvider, Closeable {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final List<String> DEFAULT_CORE_COUNTERS = new ArrayList<String>() {{
-    add("QUERY./select.requests");
-    add("UPDATE./update.requests");
-  }};
-  public static final List<String> DEFAULT_CORE_GAUGES = new ArrayList<String>() {{
-    add("INDEX.sizeInBytes");
-  }};
-  public static final List<String> DEFAULT_NODE_GAUGES = new ArrayList<String>() {{
-    add("CONTAINER.fs.coreRoot.usableSpace");
-  }};
-  public static final List<String> DEFAULT_JVM_GAUGES = new ArrayList<String>() {{
-    add("memory.heap.used");
-    add("os.processCpuLoad");
-    add("os.systemLoadAverage");
-  }};
+  public static final List<String> DEFAULT_CORE_COUNTERS = new ArrayList<>();
+  public static final List<String> DEFAULT_CORE_GAUGES = new ArrayList<>();
+  public static final List<String> DEFAULT_NODE_GAUGES = new ArrayList<>();
+  public static final List<String> DEFAULT_JVM_GAUGES = new ArrayList<>();
 
   public static final String NUM_SHARDS_KEY = "numShards";
   public static final String NUM_REPLICAS_KEY = "numReplicas";
   public static final String NUM_NODES_KEY = "numNodes";
 
-  public static final List<String> DEFAULT_COLLECTION_GAUGES = new ArrayList<String>() {{
-    add(NUM_SHARDS_KEY);
-    add(NUM_REPLICAS_KEY);
-  }};
+  public static final List<String> DEFAULT_COLLECTION_GAUGES = new ArrayList<>();
+
+  static {
+    DEFAULT_JVM_GAUGES.add("memory.heap.used");
+    DEFAULT_JVM_GAUGES.add("os.processCpuLoad");
+    DEFAULT_JVM_GAUGES.add("os.systemLoadAverage");
+
+    DEFAULT_NODE_GAUGES.add("CONTAINER.fs.coreRoot.usableSpace");
+
+    DEFAULT_CORE_GAUGES.add("INDEX.sizeInBytes");
+
+    DEFAULT_CORE_COUNTERS.add("QUERY./select.requests");
+    DEFAULT_CORE_COUNTERS.add("UPDATE./update.requests");
+
+    DEFAULT_COLLECTION_GAUGES.add(NUM_SHARDS_KEY);
+    DEFAULT_COLLECTION_GAUGES.add(NUM_REPLICAS_KEY);
+  }
 
   public static final String COLLECT_PERIOD_PROP = "collectPeriod";
   public static final String SYNC_PERIOD_PROP = "syncPeriod";
@@ -148,6 +160,7 @@
   private final int collectPeriod;
   private final Map<String, List<String>> counters = new HashMap<>();
   private final Map<String, List<String>> gauges = new HashMap<>();
+  private final String overseerUrlScheme;
 
   private final Map<String, RrdDb> knownDbs = new ConcurrentHashMap<>();
 
@@ -166,11 +179,17 @@
     if (pluginArgs != null) {
       args.putAll(pluginArgs);
     }
-    // override from ZK
-    Map<String, Object> props = (Map<String, Object>)cloudManager.getClusterStateProvider()
-        .getClusterProperty("metrics", Collections.emptyMap())
-        .getOrDefault("history", Collections.emptyMap());
-    args.putAll(props);
+    // override from ZK if available
+    if (cloudManager != null) {
+      Map<String, Object> props = (Map<String, Object>)cloudManager.getClusterStateProvider()
+          .getClusterProperty("metrics", Collections.emptyMap())
+          .getOrDefault("history", Collections.emptyMap());
+      args.putAll(props);
+
+      overseerUrlScheme = cloudManager.getClusterStateProvider().getClusterProperty("urlScheme", "http");
+    } else {
+      overseerUrlScheme = "http";
+    }
 
     this.nodeName = nodeName;
     this.enable = Boolean.parseBoolean(String.valueOf(args.getOrDefault(ENABLE_PROP, "true")));
@@ -180,12 +199,12 @@
     this.collectPeriod = Integer.parseInt(String.valueOf(args.getOrDefault(COLLECT_PERIOD_PROP, DEFAULT_COLLECT_PERIOD)));
     int syncPeriod = Integer.parseInt(String.valueOf(args.getOrDefault(SYNC_PERIOD_PROP, SolrRrdBackendFactory.DEFAULT_SYNC_PERIOD)));
 
-    factory = new SolrRrdBackendFactory(solrClient, CollectionAdminParams.SYSTEM_COLL,
-            syncPeriod, cloudManager.getTimeSource());
     this.solrClient = solrClient;
     this.metricsHandler = metricsHandler;
     this.cloudManager = cloudManager;
-    this.timeSource = cloudManager.getTimeSource();
+    this.timeSource = cloudManager != null ? cloudManager.getTimeSource() : TimeSource.NANO_TIME;
+    factory = new SolrRrdBackendFactory(solrClient, CollectionAdminParams.SYSTEM_COLL,
+            syncPeriod, this.timeSource);
 
     counters.put(Group.core.toString(), DEFAULT_CORE_COUNTERS);
     counters.put(Group.node.toString(), Collections.emptyList());
@@ -217,43 +236,60 @@
     }
   }
 
+  // check that .system exists
   public void checkSystemCollection() {
-    // check that .system exists
-    try {
-      if (cloudManager.isClosed() || Thread.interrupted()) {
-        factory.setPersistent(false);
-        return;
-      }
-      ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-      DocCollection systemColl = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
-      if (systemColl == null) {
-        if (logMissingCollection) {
-          log.warn("Missing " + CollectionAdminParams.SYSTEM_COLL + ", keeping metrics history in memory");
-          logMissingCollection = false;
-        }
-        factory.setPersistent(false);
-        return;
-      } else {
-        boolean ready = false;
-        for (Replica r : systemColl.getReplicas()) {
-          if (r.isActive(clusterState.getLiveNodes())) {
-            ready = true;
-            break;
-          }
-        }
-        if (!ready) {
-          log.debug(CollectionAdminParams.SYSTEM_COLL + " not ready yet, keeping metrics history in memory");
+    if (cloudManager != null) {
+      try {
+        if (cloudManager.isClosed() || Thread.interrupted()) {
           factory.setPersistent(false);
           return;
         }
+        ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
+        DocCollection systemColl = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
+        if (systemColl == null) {
+          if (logMissingCollection) {
+            log.warn("Missing " + CollectionAdminParams.SYSTEM_COLL + ", keeping metrics history in memory");
+            logMissingCollection = false;
+          }
+          factory.setPersistent(false);
+          return;
+        } else {
+          boolean ready = false;
+          for (Replica r : systemColl.getReplicas()) {
+            if (r.isActive(clusterState.getLiveNodes())) {
+              ready = true;
+              break;
+            }
+          }
+          if (!ready) {
+            log.debug(CollectionAdminParams.SYSTEM_COLL + " not ready yet, keeping metrics history in memory");
+            factory.setPersistent(false);
+            return;
+          }
+        }
+      } catch (Exception e) {
+        if (logMissingCollection) {
+          log.warn("Error getting cluster state, keeping metrics history in memory", e);
+        }
+        logMissingCollection = false;
+        factory.setPersistent(false);
+        return;
       }
-    } catch (Exception e) {
-      log.warn("Error getting cluster state, keeping metrics history in memory", e);
-      factory.setPersistent(false);
-      return;
+      logMissingCollection = true;
+      factory.setPersistent(true);
+    } else {
+      try {
+        solrClient.query(CollectionAdminParams.SYSTEM_COLL, new SolrQuery(CommonParams.Q, "*:*", CommonParams.ROWS, "0"));
+        factory.setPersistent(true);
+        logMissingCollection = true;
+      } catch (Exception e) {
+        if (logMissingCollection) {
+          log.warn("Error querying .system collection, keeping metrics history in memory", e);
+        }
+        logMissingCollection = false;
+        factory.setPersistent(false);
+      }
     }
-    logMissingCollection = true;
-    factory.setPersistent(true);
   }
 
   public SolrClient getSolrClient() {
@@ -271,7 +307,11 @@
     return factory;
   }
 
-  private boolean isOverseerLeader() {
+  private String getOverseerLeader() {
+    // non-ZK node has no Overseer
+    if (cloudManager == null) {
+      return null;
+    }
     ZkNodeProps props = null;
     try {
       VersionedData data = cloudManager.getDistribStateManager().getData(
@@ -281,24 +321,39 @@
       }
     } catch (KeeperException | IOException | NoSuchElementException e) {
       log.warn("Could not obtain overseer's address, skipping.", e);
-      return false;
+      return null;
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
-      return false;
+      return null;
     }
     if (props == null) {
-      return false;
+      return null;
     }
     String oid = props.getStr(ID);
     if (oid == null) {
-      return false;
+      return null;
     }
     String[] ids = oid.split("-");
     if (ids.length != 3) { // unknown format
       log.warn("Unknown format of leader id, skipping: " + oid);
-      return false;
+      return null;
     }
-    return nodeName.equals(ids[1]);
+    return ids[1];
+  }
+
+  private boolean amIOverseerLeader() {
+    return amIOverseerLeader(null);
+  }
+
+  private boolean amIOverseerLeader(String leader) {
+    if (leader == null) {
+      leader = getOverseerLeader();
+    }
+    if (leader == null) {
+      return false;
+    } else {
+      return nodeName.equals(leader);
+    }
   }
 
   private void collectMetrics() {
@@ -383,7 +438,7 @@
   }
 
   private void collectGlobalMetrics() {
-    if (!isOverseerLeader()) {
+    if (!amIOverseerLeader()) {
       return;
     }
     Set<String> nodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
@@ -640,11 +695,19 @@
     if (cmd == null) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown 'action' param '" + actionStr + "', supported actions: " + Cmd.actions);
     }
-    Object res = null;
+    final SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
+    rsp.add("metrics", res);
     switch (cmd) {
       case LIST:
         int rows = req.getParams().getInt(CommonParams.ROWS, SolrRrdBackendFactory.DEFAULT_MAX_DBS);
-        res = factory.list(rows);
+        List<Pair<String, Long>> lst = factory.list(rows);
+        lst.forEach(p -> {
+          SimpleOrderedMap<Object> data = new SimpleOrderedMap<>();
+          // RrdDb always uses seconds - convert here for compatibility
+          data.add("lastModified", TimeUnit.SECONDS.convert(p.second(), TimeUnit.MILLISECONDS));
+          data.add("node", nodeName);
+          res.add(p.first(), data);
+        });
         break;
       case GET:
         String name = req.getParams().get(CommonParams.NAME);
@@ -657,15 +720,14 @@
         if (format == null) {
           throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown 'format' param '" + formatStr + "', supported formats: " + Format.formats);
         }
-        if (!factory.exists(name)) {
-          rsp.add("error", "'" + name + "' doesn't exist");
-        } else {
+        if (factory.exists(name)) {
           // get a throwaway copy (safe to close and discard)
           RrdDb db = new RrdDb(URI_PREFIX + name, true, factory);
-          res = new NamedList<>();
-          NamedList<Object> data = new NamedList<>();
+          SimpleOrderedMap<Object> data = new SimpleOrderedMap<>();
           data.add("data", getDbData(db, dsNames, format, req.getParams()));
-          ((NamedList)res).add(name, data);
+          data.add("lastModified", db.getLastUpdateTime());
+          data.add("node", nodeName);
+          res.add(name, data);
           db.close();
         }
         break;
@@ -674,17 +736,14 @@
         if (name == null) {
           throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'name' is a required param");
         }
-        if (!factory.exists(name)) {
-          rsp.add("error", "'" + name + "' doesn't exist");
-        } else {
+        if (factory.exists(name)) {
           // get a throwaway copy (safe to close and discard)
           RrdDb db = new RrdDb(URI_PREFIX + name, true, factory);
-          NamedList<Object> map = new NamedList<>();
-          NamedList<Object> status = new NamedList<>();
+          SimpleOrderedMap<Object> status = new SimpleOrderedMap<>();
           status.add("status", getDbStatus(db));
-          map.add(name, status);
+          status.add("node", nodeName);
+          res.add(name, status);
           db.close();
-          res = map;
         }
         break;
       case DELETE:
@@ -700,9 +759,61 @@
         rsp.add("success", "ok");
         break;
     }
-    if (res != null) {
-      rsp.add("metrics", res);
+    // when using in-memory DBs non-overseer node has no access to overseer DBs - in this case
+    // forward the request to Overseer leader if available
+    if (!factory.isPersistent()) {
+      String leader = getOverseerLeader();
+      if (leader != null && !amIOverseerLeader(leader)) {
+        // get & merge remote response
+        NamedList<Object> remoteRes = handleRemoteRequest(leader, req);
+        mergeRemoteRes(rsp, remoteRes);
+      }
     }
+    SimpleOrderedMap<Object> apiState = new SimpleOrderedMap<>();
+    apiState.add("enableReplicas", enableReplicas);
+    apiState.add("enableNodes", enableNodes);
+    apiState.add("mode", enable ? (factory.isPersistent() ? "index" : "memory") : "inactive");
+    if (!factory.isPersistent()) {
+      apiState.add("message", "WARNING: metrics history is not being persisted. Create .system collection to start persisting history.");
+    }
+    rsp.add("state", apiState);
+    rsp.getResponseHeader().add("zkConnected", cloudManager != null);
+  }
+
+  private NamedList<Object> handleRemoteRequest(String nodeName, SolrQueryRequest req) {
+    String baseUrl = Utils.getBaseUrlForNodeName(nodeName, overseerUrlScheme);
+    String url;
+    try {
+      URL u = new URL(baseUrl);
+      u = new URL(u.getProtocol(), u.getHost(), u.getPort(), "/api/cluster/metrics/history");
+      url = u.toString();
+    } catch (MalformedURLException e) {
+      log.warn("Invalid Overseer url '" + baseUrl + "', unable to fetch remote metrics history", e);
+      return null;
+    }
+    // always use javabin
+    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
+    params.set(CommonParams.WT, "javabin");
+    url = url + "?" + params.toString();
+    try {
+      byte[] data = cloudManager.httpRequest(url, SolrRequest.METHOD.GET, null, null, HttpClientUtil.DEFAULT_CONNECT_TIMEOUT, true);
+      // response is always a NamedList
+      try (JavaBinCodec codec = new JavaBinCodec()) {
+        return (NamedList<Object>)codec.unmarshal(new ByteArrayInputStream(data));
+      }
+    } catch (IOException e) {
+      log.warn("Exception forwarding request to Overseer at " + url, e);
+      return null;
+    }
+  }
+
+  private void mergeRemoteRes(SolrQueryResponse rsp, NamedList<Object> remoteRes) {
+    if (remoteRes == null || remoteRes.get("metrics") == null) {
+      return;
+    }
+    NamedList<Object> remoteMetrics = (NamedList<Object>)remoteRes.get("metrics");
+    SimpleOrderedMap localMetrics = (SimpleOrderedMap) rsp.getValues().get("metrics");
+    remoteMetrics.forEach((k, v) -> localMetrics.add(k, v));
   }
 
   private NamedList<Object> getDbStatus(RrdDb db) throws IOException {
@@ -750,7 +861,7 @@
     RrdDef def = db.getRrdDef();
     ArcDef[] arcDefs = def.getArcDefs();
     for (ArcDef arcDef : arcDefs) {
-      SimpleOrderedMap map = new SimpleOrderedMap();
+      SimpleOrderedMap<Object> map = new SimpleOrderedMap<>();
       res.add(arcDef.dump(), map);
       Archive a = db.getArchive(arcDef.getConsolFun(), arcDef.getSteps());
       // startTime / endTime, arcStep are in seconds
@@ -761,22 +872,21 @@
       if (format != Format.GRAPH) {
         // add timestamps separately from values
         long[] timestamps = fd.getTimestamps();
-        str.setLength(0);
-        for (int i = 0; i < timestamps.length; i++) {
-          if (format == Format.LIST) {
-            map.add("timestamps", timestamps[i]);
-          } else {
+        if (format == Format.LIST) {
+          // Arrays.asList works only on arrays of Objects
+          map.add("timestamps", Arrays.stream(timestamps).boxed().collect(Collectors.toList()));
+        } else {
+          str.setLength(0);
+          for (int i = 0; i < timestamps.length; i++) {
             if (i > 0) {
               str.append('\n');
             }
             str.append(String.valueOf(timestamps[i]));
           }
-        }
-        if (format == Format.STRING) {
           map.add("timestamps", str.toString());
         }
       }
-      SimpleOrderedMap values = new SimpleOrderedMap();
+      SimpleOrderedMap<Object> values = new SimpleOrderedMap<>();
       map.add("values", values);
       for (String name : dsNames) {
         double[] vals = fd.getValues(name);
@@ -825,9 +935,7 @@
             values.add(name, str.toString());
             break;
           case LIST:
-            for (int i = 0; i < vals.length; i++) {
-              values.add(name, vals[i]);
-            }
+            values.add(name, Arrays.stream(vals).boxed().collect(Collectors.toList()));
             break;
         }
       }
diff --git a/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackend.java b/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackend.java
index 956aabb..d0aa3e2 100644
--- a/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackend.java
+++ b/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackend.java
@@ -19,6 +19,7 @@
 import java.io.Closeable;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.rrd4j.core.RrdByteArrayBackend;
@@ -36,14 +37,27 @@
   private final ReentrantLock lock = new ReentrantLock();
   private volatile boolean dirty = false;
   private volatile boolean closed = false;
+  private volatile long lastModifiedTime;
+
+  public static final class SyncData {
+    public byte[] data;
+    public long timestamp;
+
+    public SyncData(byte[] data, long timestamp) {
+      this.data = data;
+      this.timestamp = timestamp;
+    }
+  }
 
   public SolrRrdBackend(String path, boolean readOnly, SolrRrdBackendFactory factory) {
     super(path);
     this.factory = factory;
+    this.lastModifiedTime = TimeUnit.MILLISECONDS.convert(factory.getTimeSource().getEpochTimeNs(), TimeUnit.NANOSECONDS);
     try {
-      byte[] data = factory.getData(path);
-      if (data != null) {
-        this.buffer = data;
+      SyncData syncData = factory.getData(path);
+      if (syncData != null) {
+        this.buffer = syncData.data;
+        this.lastModifiedTime = syncData.timestamp;
       }
     } catch (IOException e) {
       log.warn("Exception retrieving data from " + path + ", store will be readOnly", e);
@@ -60,6 +74,7 @@
     super(other.getPath());
     readOnly = true;
     factory = null;
+    this.lastModifiedTime = other.lastModifiedTime;
     byte[] otherBuffer = other.buffer;
     buffer = new byte[otherBuffer.length];
     System.arraycopy(otherBuffer, 0, buffer, 0, otherBuffer.length);
@@ -69,6 +84,10 @@
     return readOnly;
   }
 
+  public long getLastModifiedTime() {
+    return lastModifiedTime;
+  }
+
   @Override
   protected void write(long offset, byte[] bytes) throws IOException {
     if (readOnly || closed) {
@@ -77,13 +96,14 @@
     lock.lock();
     try {
       super.write(offset, bytes);
+      lastModifiedTime = TimeUnit.MILLISECONDS.convert(factory.getTimeSource().getEpochTimeNs(), TimeUnit.NANOSECONDS);
       dirty = true;
     } finally {
       lock.unlock();
     }
   }
 
-  public byte[] getSyncData() {
+  public SyncData getSyncData() {
     if (readOnly || closed) {
       return null;
     }
@@ -95,7 +115,7 @@
     try {
       byte[] bufferCopy = new byte[buffer.length];
       System.arraycopy(buffer, 0, bufferCopy, 0, buffer.length);
-      return bufferCopy;
+      return new SyncData(bufferCopy, lastModifiedTime);
     } finally {
       lock.unlock();
     }
diff --git a/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackendFactory.java b/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackendFactory.java
index 06ab5fe..a3c6f64 100644
--- a/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackendFactory.java
+++ b/solr/core/src/java/org/apache/solr/metrics/rrd/SolrRrdBackendFactory.java
@@ -22,14 +22,12 @@
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Date;
+import java.util.Comparator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -47,6 +45,7 @@
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.IOUtils;
+import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.rrd4j.core.RrdBackend;
@@ -114,6 +113,10 @@
         TimeUnit.MILLISECONDS);
   }
 
+  public TimeSource getTimeSource() {
+    return timeSource;
+  }
+
   private void ensureOpen() throws IOException {
     if (closed) {
       throw new IOException("Factory already closed");
@@ -181,7 +184,7 @@
     }
   }
 
-  byte[] getData(String path) throws IOException {
+  SolrRrdBackend.SyncData getData(String path) throws IOException {
     if (!persistent) {
       return null;
     }
@@ -203,7 +206,8 @@
         return null;
       }
       if (o instanceof byte[]) {
-        return (byte[])o;
+        Long time = (Long)doc.getFieldValue("timestamp_l");
+        return new SolrRrdBackend.SyncData((byte[])o, time);
       } else {
         throw new SolrServerException("Unexpected value of '" + DATA_FIELD + "' field: " + o.getClass().getName() + ": " + o);
       }
@@ -216,34 +220,58 @@
     backends.remove(path);
   }
 
+  private static final class DbComparator implements Comparator<Pair<String, Long>> {
+    static final DbComparator INSTANCE = new DbComparator();
+
+    @Override
+    public int compare(Pair<String, Long> o1, Pair<String, Long> o2) {
+      return o1.first().compareTo(o2.first());
+    }
+  }
+
   /**
    * List all available databases created by this node name
    * @param maxLength maximum number of results to return
-   * @return list of database names, or empty
+   * @return list of database names and their last update times, or empty
    * @throws IOException on server errors
    */
-  public List<String> list(int maxLength) throws IOException {
-    Set<String> names = new HashSet<>();
+  public List<Pair<String, Long>> list(int maxLength) throws IOException {
+    Map<String, Pair<String, Long>> byName = new HashMap<>();
     if (persistent) {
       try {
         ModifiableSolrParams params = new ModifiableSolrParams();
         params.add(CommonParams.Q, "*:*");
         params.add(CommonParams.FQ, CommonParams.TYPE + ":" + DOC_TYPE);
-        params.add(CommonParams.FL, "id");
+        params.add(CommonParams.FL, "id,timestamp_l");
         params.add(CommonParams.ROWS, String.valueOf(maxLength));
         QueryResponse rsp = solrClient.query(collection, params);
         SolrDocumentList docs = rsp.getResults();
         if (docs != null) {
-          docs.forEach(d -> names.add(((String)d.getFieldValue("id")).substring(idPrefixLength)));
+          docs.forEach(d -> {
+            Long time = (Long)d.getFieldValue("timestamp_l");
+            Pair<String, Long> p = new Pair<>(((String)d.getFieldValue("id")).substring(idPrefixLength), time);
+            byName.put(p.first(), p);
+          });
         }
       } catch (SolrServerException e) {
         log.warn("Error retrieving RRD list", e);
       }
     }
-    // add in-memory backends not yet stored
-    names.addAll(backends.keySet());
-    ArrayList<String> list = new ArrayList<>(names);
-    Collections.sort(list);
+    // add in-memory backends not yet stored, or replace with more recent versions
+    backends.forEach((name, db) -> {
+      long lastModifiedTime = db.getLastModifiedTime();
+      Pair<String, Long> stored = byName.get(name);
+      Pair<String, Long> inMemory = new Pair(name, lastModifiedTime);
+      if (stored != null) {
+        if (stored.second() < lastModifiedTime) {
+          byName.put(name, inMemory);
+        }
+      } else {
+        byName.put(name, inMemory);
+      }
+    });
+    ArrayList<Pair<String, Long>> list = new ArrayList<>(byName.values());
+    Collections.sort(list, DbComparator.INSTANCE);
     return list;
   }
 
@@ -301,25 +329,25 @@
       return;
     }
     log.debug("-- maybe sync backends: " + backends.keySet());
-    Map<String, byte[]> syncData = new HashMap<>();
+    Map<String, SolrRrdBackend.SyncData> syncDatas = new HashMap<>();
     backends.forEach((path, backend) -> {
-      byte[] data = backend.getSyncData();
-      if (data != null) {
-        syncData.put(backend.getPath(), data);
+      SolrRrdBackend.SyncData syncData = backend.getSyncData();
+      if (syncData != null) {
+        syncDatas.put(backend.getPath(), syncData);
       }
     });
-    if (syncData.isEmpty()) {
+    if (syncDatas.isEmpty()) {
       return;
     }
-    log.debug("-- syncing " + syncData.keySet());
+    log.debug("-- syncing " + syncDatas.keySet());
     // write updates
     try {
-      syncData.forEach((path, data) -> {
+      syncDatas.forEach((path, syncData) -> {
         SolrInputDocument doc = new SolrInputDocument();
         doc.setField("id", ID_PREFIX + ID_SEP + path);
         doc.addField(CommonParams.TYPE, DOC_TYPE);
-        doc.addField(DATA_FIELD, data);
-        doc.setField("timestamp", new Date(TimeUnit.MILLISECONDS.convert(timeSource.getEpochTimeNs(), TimeUnit.NANOSECONDS)));
+        doc.addField(DATA_FIELD, syncData.data);
+        doc.setField("timestamp_l", syncData.timestamp);
         try {
           solrClient.add(collection, doc);
         } catch (SolrServerException | IOException e) {
@@ -334,7 +362,7 @@
       } catch (SolrServerException e) {
         log.warn("Error committing RRD data updates", e);
       }
-      syncData.forEach((path, data) -> {
+      syncDatas.forEach((path, data) -> {
         SolrRrdBackend backend = backends.get(path);
         if (backend != null) {
           backend.markClean();
diff --git a/solr/core/src/test/org/apache/solr/cloud/MetricsHistoryIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/MetricsHistoryIntegrationTest.java
index 2012a1a..b3a1fb6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MetricsHistoryIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MetricsHistoryIntegrationTest.java
@@ -35,6 +35,7 @@
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.Base64;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.LogLevel;
 import org.junit.AfterClass;
@@ -94,12 +95,12 @@
     NamedList<Object> rsp = solrClient.request(createHistoryRequest(params(CommonParams.ACTION, "list")));
     assertNotNull(rsp);
     // expected solr.jvm, solr.node and solr.collection..system
-    List<String> lst = (List<String>)rsp.get("metrics");
+    SimpleOrderedMap<Object> lst = (SimpleOrderedMap<Object>) rsp.get("metrics");
     assertNotNull(lst);
     assertEquals(lst.toString(), 3, lst.size());
-    assertTrue(lst.toString(), lst.contains("solr.jvm"));
-    assertTrue(lst.toString(), lst.contains("solr.node"));
-    assertTrue(lst.toString(), lst.contains("solr.collection..system"));
+    assertNotNull(lst.toString(), lst.get("solr.jvm"));
+    assertNotNull(lst.toString(), lst.get("solr.node"));
+    assertNotNull(lst.toString(), lst.get("solr.collection..system"));
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index 234eaea..900fc76 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -665,6 +665,7 @@
         }
         queryRequest.getContext().put("httpMethod", req.getMethod().toString());
         SolrQueryResponse queryResponse = new SolrQueryResponse();
+        queryResponse.addResponseHeader(new SimpleOrderedMap<>());
         if (autoscaling) {
           autoScalingHandler.handleRequest(queryRequest, queryResponse);
         } else {
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
index e83f72f..270e7e7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
@@ -80,40 +80,38 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    if (cluster != null) {
-      // clear any persisted configuration
-      cluster.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), -1);
-      cluster.getDistribStateManager().setData(ZkStateReader.ROLES, Utils.toJSON(new HashMap<>()), -1);
-      cluster.getSimClusterStateProvider().simDeleteAllCollections();
-      cluster.simClearSystemCollection();
-      cluster.getSimNodeStateProvider().simRemoveDeadNodes();
-      cluster.getSimClusterStateProvider().simRemoveDeadNodes();
-      // restore the expected number of nodes
-      int currentSize = cluster.getLiveNodesSet().size();
-      if (currentSize < clusterNodeCount) {
-        int addCnt = clusterNodeCount - currentSize;
-        while (addCnt-- > 0) {
-          cluster.simAddNode();
-        }
-      } else if (currentSize > clusterNodeCount) {
-        cluster.simRemoveRandomNodes(currentSize - clusterNodeCount, true, random());
-      }
-      // clean any persisted trigger state or events
-      removeChildren(ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH);
-      removeChildren(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH);
-      removeChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
-      removeChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
-      cluster.getSimClusterStateProvider().simResetLeaderThrottles();
-      cluster.simRestartOverseer(null);
-      cluster.getTimeSource().sleep(5000);
-      cluster.simResetOpCounts();
-    }
   }
 
   @Before
-  public void checkClusterConfiguration() {
+  public void checkClusterConfiguration() throws Exception {
     if (cluster == null)
       throw new RuntimeException("SimCloudManager not configured - have you called configureCluster()?");
+    // clear any persisted configuration
+    cluster.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), -1);
+    cluster.getDistribStateManager().setData(ZkStateReader.ROLES, Utils.toJSON(new HashMap<>()), -1);
+    cluster.getSimClusterStateProvider().simDeleteAllCollections();
+    cluster.simClearSystemCollection();
+    cluster.getSimNodeStateProvider().simRemoveDeadNodes();
+    cluster.getSimClusterStateProvider().simRemoveDeadNodes();
+    // restore the expected number of nodes
+    int currentSize = cluster.getLiveNodesSet().size();
+    if (currentSize < clusterNodeCount) {
+      int addCnt = clusterNodeCount - currentSize;
+      while (addCnt-- > 0) {
+        cluster.simAddNode();
+      }
+    } else if (currentSize > clusterNodeCount) {
+      cluster.simRemoveRandomNodes(currentSize - clusterNodeCount, true, random());
+    }
+    // clean any persisted trigger state or events
+    removeChildren(ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH);
+    removeChildren(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH);
+    removeChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
+    removeChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
+    cluster.getSimClusterStateProvider().simResetLeaderThrottles();
+    cluster.simRestartOverseer(null);
+    cluster.getTimeSource().sleep(5000);
+    cluster.simResetOpCounts();
   }
 
   protected void removeChildren(String path) throws Exception {
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
index e1e230f..7c84c16 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
@@ -30,6 +30,7 @@
 import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
 import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.core.SolrInfoBean;
 import org.apache.solr.metrics.SolrMetricManager;
@@ -57,7 +58,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-    simulated = random().nextBoolean();
+    simulated = random().nextBoolean() || true;
     Map<String, Object> args = new HashMap<>();
     args.put(MetricsHistoryHandler.SYNC_PERIOD_PROP, 1);
     args.put(MetricsHistoryHandler.COLLECT_PERIOD_PROP, 1);
@@ -111,11 +112,11 @@
   @Test
   public void testBasic() throws Exception {
     timeSource.sleep(10000);
-    List<String> list = handler.getFactory().list(100);
+    List<Pair<String, Long>> list = handler.getFactory().list(100);
     // solr.jvm, solr.node, solr.collection..system
     assertEquals(list.toString(), 3, list.size());
-    for (String path : list) {
-      RrdDb db = new RrdDb(MetricsHistoryHandler.URI_PREFIX + path, true, handler.getFactory());
+    for (Pair<String, Long> p : list) {
+      RrdDb db = new RrdDb(MetricsHistoryHandler.URI_PREFIX + p.first(), true, handler.getFactory());
       int dsCount = db.getDsCount();
       int arcCount = db.getArcCount();
       assertTrue("dsCount should be > 0, was " + dsCount, dsCount > 0);
diff --git a/solr/core/src/test/org/apache/solr/handler/tagger/EmbeddedSolrNoSerializeTest.java b/solr/core/src/test/org/apache/solr/handler/tagger/EmbeddedSolrNoSerializeTest.java
index 8d31ad0..37b8207 100644
--- a/solr/core/src/test/org/apache/solr/handler/tagger/EmbeddedSolrNoSerializeTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/tagger/EmbeddedSolrNoSerializeTest.java
@@ -34,6 +34,7 @@
 import org.apache.solr.client.solrj.StreamingResponseCallback;
 import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
 import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
@@ -99,11 +100,11 @@
       return Collections.singleton(new ContentStreamBase.StringStream(input));
     }
 
-    // As of 7.2.  But won't work until: https://issues.apache.org/jira/browse/SOLR-12142
-//    @Override
-//    public RequestWriter.ContentWriter getContentWriter(String expectedType) {
-//      return new RequestWriter.StringPayloadContentWriter(input, "text/plain; charset=UTF8");
-//    }
+    //     As of 7.2.  But won't work until: https://issues.apache.org/jira/browse/SOLR-12142
+    @Override
+    public RequestWriter.ContentWriter getContentWriter(String expectedType) {
+      return new RequestWriter.StringPayloadContentWriter(input, "text/plain; charset=UTF8");
+    }
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/handler/tagger/Tagger2Test.java b/solr/core/src/test/org/apache/solr/handler/tagger/Tagger2Test.java
index c7580e1..cafda46 100644
--- a/solr/core/src/test/org/apache/solr/handler/tagger/Tagger2Test.java
+++ b/solr/core/src/test/org/apache/solr/handler/tagger/Tagger2Test.java
@@ -86,6 +86,7 @@
 
   /** Support for stopwords (posInc &gt; 1);
    * discussion: https://github.com/OpenSextant/SolrTextTagger/issues/13 */
+  @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8344")
   @Test
   public void testStopWords() throws Exception {
     baseParams.set("field", "name_tagStop");//stop filter (pos inc enabled) index & query
diff --git a/solr/core/src/test/org/apache/solr/metrics/rrd/SolrRrdBackendFactoryTest.java b/solr/core/src/test/org/apache/solr/metrics/rrd/SolrRrdBackendFactoryTest.java
index 2f5fa13..18c72ec 100644
--- a/solr/core/src/test/org/apache/solr/metrics/rrd/SolrRrdBackendFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/rrd/SolrRrdBackendFactoryTest.java
@@ -17,13 +17,13 @@
 
 package org.apache.solr.metrics.rrd;
 
-import java.util.Date;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.MockSearchableSolrClient;
 import org.junit.After;
@@ -78,15 +78,15 @@
   @Test
   public void testBasic() throws Exception {
     RrdDb db = new RrdDb(createDef(), factory);
-    List<String> list = factory.list(100);
+    List<Pair<String, Long>> list = factory.list(100);
     assertEquals(list.toString(), 1, list.size());
-    assertEquals(list.toString(), "foo", list.get(0));
+    assertEquals(list.toString(), "foo", list.get(0).first());
     timeSource.sleep(2000);
     // there should be one sync data
     assertEquals(solrClient.docs.toString(), 1, solrClient.docs.size());
     String id = SolrRrdBackendFactory.ID_PREFIX + SolrRrdBackendFactory.ID_SEP + "foo";
     SolrInputDocument doc = solrClient.docs.get(CollectionAdminParams.SYSTEM_COLL).get(id);
-    long timestamp = ((Date)doc.getFieldValue("timestamp")).getTime();
+    long timestamp = (Long)doc.getFieldValue("timestamp_l");
     timeSource.sleep(2000);
     SolrInputDocument newDoc = solrClient.docs.get(CollectionAdminParams.SYSTEM_COLL).get(id);
     assertEquals(newDoc.toString(), newDoc, doc);
@@ -104,7 +104,7 @@
     timeSource.sleep(3000);
     newDoc = solrClient.docs.get(CollectionAdminParams.SYSTEM_COLL).get(id);
     assertFalse(newDoc.toString(), newDoc.equals(doc));
-    long newTimestamp = ((Date)newDoc.getFieldValue("timestamp")).getTime();
+    long newTimestamp = (Long)newDoc.getFieldValue("timestamp_l");
     assertNotSame(newTimestamp, timestamp);
     FetchRequest fr = db.createFetchRequest(ConsolFun.AVERAGE, firstTimestamp + 60, lastTimestamp - 60, 60);
     FetchData fd = fr.fetchData();
@@ -126,7 +126,7 @@
     // should still be listed
     list = factory.list(100);
     assertEquals(list.toString(), 1, list.size());
-    assertEquals(list.toString(), "foo", list.get(0));
+    assertEquals(list.toString(), "foo", list.get(0).first());
 
     // re-open read-write
     db = new RrdDb("solr:foo", factory);
@@ -141,7 +141,7 @@
     doc = newDoc;
     newDoc = solrClient.docs.get(CollectionAdminParams.SYSTEM_COLL).get(id);
     assertFalse(newDoc.toString(), newDoc.equals(doc));
-    newTimestamp = ((Date)newDoc.getFieldValue("timestamp")).getTime();
+    newTimestamp = (Long)newDoc.getFieldValue("timestamp_l");
     assertNotSame(newTimestamp, timestamp);
     fr = db.createFetchRequest(ConsolFun.AVERAGE, firstTimestamp + 60, lastTimestamp, 60);
     fd = fr.fetchData();
@@ -174,7 +174,7 @@
     timestamp = newTimestamp;
     newDoc = solrClient.docs.get(CollectionAdminParams.SYSTEM_COLL).get(id);
     assertTrue(newDoc.toString(), newDoc.equals(doc));
-    newTimestamp = ((Date)newDoc.getFieldValue("timestamp")).getTime();
+    newTimestamp = (Long)newDoc.getFieldValue("timestamp_l");
     assertEquals(newTimestamp, timestamp);
     readOnly.close();
   }
diff --git a/solr/core/src/test/org/apache/solr/update/TransactionLogTest.java b/solr/core/src/test/org/apache/solr/update/TransactionLogTest.java
index d2b4b26..66ecbc6 100644
--- a/solr/core/src/test/org/apache/solr/update/TransactionLogTest.java
+++ b/solr/core/src/test/org/apache/solr/update/TransactionLogTest.java
@@ -17,8 +17,8 @@
 
 package org.apache.solr.update;
 
-import java.io.IOException;
-import java.nio.file.Files;
+import java.io.File;
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Locale;
 
@@ -29,9 +29,11 @@
 public class TransactionLogTest extends LuceneTestCase {
 
   @Test
-  public void testBigLastAddSize() throws IOException {
-    String tlogFileName = String.format(Locale.ROOT, UpdateLog.LOG_FILENAME_PATTERN, UpdateLog.TLOG_NAME, 0);
-    try (TransactionLog transactionLog = new TransactionLog(Files.createTempFile(tlogFileName, "").toFile(), new ArrayList<>())) {
+  public void testBigLastAddSize() {
+    String tlogFileName = String.format(Locale.ROOT, UpdateLog.LOG_FILENAME_PATTERN, UpdateLog.TLOG_NAME, Long.MAX_VALUE);
+    Path path = createTempDir();
+    File logFile = new File(path.toFile(), tlogFileName);
+    try (TransactionLog transactionLog = new TransactionLog(logFile, new ArrayList<>())) {
       transactionLog.lastAddSize = 2000000000;
       AddUpdateCommand updateCommand = new AddUpdateCommand(null);
       updateCommand.solrDoc = new SolrInputDocument();
diff --git a/solr/licenses/metrics-core-3.2.2.jar.sha1 b/solr/licenses/metrics-core-3.2.2.jar.sha1
deleted file mode 100644
index d14a04e..0000000
--- a/solr/licenses/metrics-core-3.2.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cd9886f498ee2ab2d994f0c779e5553b2c450416
diff --git a/solr/licenses/metrics-core-3.2.6.jar.sha1 b/solr/licenses/metrics-core-3.2.6.jar.sha1
new file mode 100644
index 0000000..13fae6d
--- /dev/null
+++ b/solr/licenses/metrics-core-3.2.6.jar.sha1
@@ -0,0 +1 @@
+62fe170cffeded1cef60e9e3402a93b45ce14327
diff --git a/solr/licenses/metrics-ganglia-3.2.2.jar.sha1 b/solr/licenses/metrics-ganglia-3.2.2.jar.sha1
deleted file mode 100644
index e5d8496..0000000
--- a/solr/licenses/metrics-ganglia-3.2.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d5bb1883e9b0daf0e4187e558746f5058f4585c1
diff --git a/solr/licenses/metrics-ganglia-3.2.6.jar.sha1 b/solr/licenses/metrics-ganglia-3.2.6.jar.sha1
new file mode 100644
index 0000000..32c9d30
--- /dev/null
+++ b/solr/licenses/metrics-ganglia-3.2.6.jar.sha1
@@ -0,0 +1 @@
+a44039835eafd2dad8842a9ed16a60c088c5b7ef
diff --git a/solr/licenses/metrics-graphite-3.2.2.jar.sha1 b/solr/licenses/metrics-graphite-3.2.2.jar.sha1
deleted file mode 100644
index 5d11db4..0000000
--- a/solr/licenses/metrics-graphite-3.2.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-908e8cbec1bbdb2f4023334e424c7de2832a95af
diff --git a/solr/licenses/metrics-graphite-3.2.6.jar.sha1 b/solr/licenses/metrics-graphite-3.2.6.jar.sha1
new file mode 100644
index 0000000..26a1bbc
--- /dev/null
+++ b/solr/licenses/metrics-graphite-3.2.6.jar.sha1
@@ -0,0 +1 @@
+ecbc470e9097bb3d7ff0232cca47f3badde2e20b
diff --git a/solr/licenses/metrics-jetty9-3.2.2.jar.sha1 b/solr/licenses/metrics-jetty9-3.2.2.jar.sha1
deleted file mode 100644
index 92d3508..0000000
--- a/solr/licenses/metrics-jetty9-3.2.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3fc94d99f41dc3f5be5483c81828138104df4449
diff --git a/solr/licenses/metrics-jetty9-3.2.6.jar.sha1 b/solr/licenses/metrics-jetty9-3.2.6.jar.sha1
new file mode 100644
index 0000000..5d7bfa4
--- /dev/null
+++ b/solr/licenses/metrics-jetty9-3.2.6.jar.sha1
@@ -0,0 +1 @@
+5dae1c13d8607663fbc7b22cf8c05aacd22f802e
diff --git a/solr/licenses/metrics-jvm-3.2.2.jar.sha1 b/solr/licenses/metrics-jvm-3.2.2.jar.sha1
deleted file mode 100644
index 0c02f93..0000000
--- a/solr/licenses/metrics-jvm-3.2.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9cbf2030242f7ffb97fae23f8a81421eb8d4ad45
diff --git a/solr/licenses/metrics-jvm-3.2.6.jar.sha1 b/solr/licenses/metrics-jvm-3.2.6.jar.sha1
new file mode 100644
index 0000000..219d02b
--- /dev/null
+++ b/solr/licenses/metrics-jvm-3.2.6.jar.sha1
@@ -0,0 +1 @@
+a7a475393fe47dfee2042415430da3f01d4fe94e
diff --git a/solr/solr-ref-guide/src/_includes/head.html b/solr/solr-ref-guide/src/_includes/head.html
index ac20a72..60d6e5d 100755
--- a/solr/solr-ref-guide/src/_includes/head.html
+++ b/solr/solr-ref-guide/src/_includes/head.html
@@ -12,7 +12,6 @@
 <link rel="stylesheet" href="{{ "css/customstyles.css" }}">
 <link rel="stylesheet" href="{{ "css/theme-solr.css" }}">
 <link rel="stylesheet" href="{{ "css/ref-guide.css" }}">
-<link rel="stylesheet" href="{{ "css/comments.css" }}">
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-cookie/1.4.1/jquery.cookie.min.js"></script>
diff --git a/solr/solr-ref-guide/src/_layouts/page.html b/solr/solr-ref-guide/src/_layouts/page.html
index 22f88e6..d9f56c9f 100755
--- a/solr/solr-ref-guide/src/_layouts/page.html
+++ b/solr/solr-ref-guide/src/_layouts/page.html
@@ -68,16 +68,4 @@
 
 </div>
 
-<!-- Adds comments from Apache's Comment system -->
-
-<div id="comments_thread">
-<div style="font-size: 1.6em; color: #d9411e; padding-top: 30px;">Comments on this Page</div>
-<div class="paragraph"><p>We welcome feedback on Solr documentation. However, we cannot provide application support via comments. If you need help, please send a message to the <a href="https://lucene.apache.org/solr/resources.html#community">Solr User mailing list</a>.</p></div>
-</div>
-<script type="text/javascript" src="https://comments.apache.org/show_comments.lua?site=solr-refguide&style=css/comments.css&page={{ page_id }}" async="true">
-</script>
-<noscript>
-<iframe width="100%" height="500" src="https://comments.apache.org/iframe.lua?site=solr-refguide&style=css/comments.css&page={{ page_id }}"></iframe>
-</noscript>
-
 {% include footer.html %}
diff --git a/solr/solr-ref-guide/src/css/comments.css b/solr/solr-ref-guide/src/css/comments.css
deleted file mode 100644
index f59796a..0000000
--- a/solr/solr-ref-guide/src/css/comments.css
+++ /dev/null
@@ -1,160 +0,0 @@
-/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * comments.css
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
-
-#comments_thread a:link {
-    color: #5A88B5;
-    background-color: inherit;
-}
-
-#comments_thread a:visited {
-    color: #5A88B5;
-    background-color: inherit;
-}
-
-#comments_thread a:link:hover,
-#comments_thread a:link:active,
-#comments_thread a:visited:hover,
-#comments_thread a:visited:active {
-    color: #0073c7;
-    background-color: #f0f0f0;
-}
-
-
-/* in general */
-
-#comments_thread h4 {
-   font-size: 14px;
-}
-
-.apaste_menu {
-        float: right;
-        margin-right: 10px;
-        width: 80px;
-}
-
-.apaste_comment {
-  background: #FEFEFE;
-  border: 1px solid #AAA;
-  border-radius: 2px;
-  display: block;
-  white-space: pre-wrap;
-  font-weight: normal;
-  padding-left: 20px;
-  padding-right: 20px;
-  padding-bottom: 16px;
-  padding-top: 5px;
-  margin: 15px;
-  font-size: 13px
-}
-.comment_header {
-    color: #000000;
-    border-radius: 3px;
-    border: 1px solid #999;
-    min-height: 24px;
-    text-indent: 5px;
-    font-size: 12pt;
-    background: #ffe9a3; /* Old browsers */
-    background: -moz-linear-gradient(top, #ffe9a3 0%, #ffd08a 32%, #ff9d57 69%, #ff833d 100%); /* FF3.6-15 */
-    background: -webkit-linear-gradient(top, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* Chrome10-25,Safari5.1-6 */
-    background: linear-gradient(to bottom, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
-}
-
-.comment_header_verified {
-    color: #000000;
-    border-radius: 3px;
-    border: 1px solid #999;
-    min-height: 24px;
-    text-indent: 5px;
-    font-size: 12pt;
-    background: #ffe9a3; /* Old browsers */
-    background: -moz-linear-gradient(top, #ffe9a3 0%, #ffd08a 32%, #ff9d57 69%, #ff833d 100%); /* FF3.6-15 */
-    background: -webkit-linear-gradient(top, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* Chrome10-25,Safari5.1-6 */
-    background: linear-gradient(to bottom, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
-}
-
-.comment_header_sticky {
-    color: #000000;
-    border-radius: 3px;
-    border: 1px solid #999;
-    min-height: 24px;
-    text-indent: 5px;
-    font-size: 12pt;
-    background: #ffe9a3; /* Old browsers */
-    background: -moz-linear-gradient(top, #ffe9a3 0%, #ffd08a 32%, #ff9d57 69%, #ff833d 100%); /* FF3.6-15 */
-    background: -webkit-linear-gradient(top, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* Chrome10-25,Safari5.1-6 */
-    background: linear-gradient(to bottom, #ffe9a3 0%,#ffd08a 32%,#ff9d57 69%,#ff833d 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
-}
-
-.comment_header img {
-    padding-top: 3px;
-    padding-bottom: 2px;
-}
-
-.comment_header_verified img {
-    padding-top: 3px;
-    padding-bottom: 2px;
-}
-
-.comment_header_sticky img {
-    padding-top: 3px;
-    padding-bottom: 2px;
-}
-
-.apaste_comment img {
-/*    border-radius: 5px;*/
-    border: none;
-}
-
-.apaste_comment_selected {background: #F8F4E9;}
-.apaste_comment_notapproved {background: #F8E0E0;}
-.apaste_comment_resolved {background: #FAFCFA;}
-.apaste_comment_sticky {background: #FFFFF6;}
-.apaste_comment_verified {background: #FAFBFA;}
-
-.apaste_comment_invalid {
-  color: #999;
-  background: #F8F8F8;
-}
-
-
-.apaste_comment textarea {
-  width: 480px;
-  height: 180px;
-}
-
-#apaste {
-  margin: 5px;
-  font-weight: normal;
-  font-size: 14px;
-  color: #024;
-
-}
-#apaste .section {
-  padding: 20px;
-  padding-left: 80px;
-}
-
-.notapproved {
-  background-color: #FEE;
-  padding: 5px;
-}
-
-#comments_thread textarea{
-    background-color: #ffffff;
-    width: auto;
-    border: 1px solid #1c1c1c;
-    border-radius: 3px;
-    box-shadow: 0pt 1px 3px rgba(0, 0, 0, 0.16) inset;
-    position: relative;
-}
-
-.apaste_honeypot {
-  display: none;
-}
-
-//* Remove external link icons when they appear in comments *//
-a[href^="http://"]:after,
-a[href^="https://"]:after {
-   content: none !important;
-}
diff --git a/solr/solr-ref-guide/src/metrics-history.adoc b/solr/solr-ref-guide/src/metrics-history.adoc
index 5dc1c3c..e39f66e 100644
--- a/solr/solr-ref-guide/src/metrics-history.adoc
+++ b/solr/solr-ref-guide/src/metrics-history.adoc
@@ -18,15 +18,16 @@
 
 == Design
 === Round-robin databases
-When Solr runs in "cloud" mode it collects long-term history of certain key metrics. This information
-can be used for very simple monitoring and troubleshooting, but also some Solr Cloud components
-(eg. autoscaling) can use this data for making informed decisions based on long-term
-trends of selected metrics.
+Solr collects long-term history of certain key metrics both in SolrCloud and in standalone mode.
+This information can be used for very simple monitoring and troubleshooting, but also some
+Solr Cloud components (eg. autoscaling) can use this data for making informed decisions based on
+long-term trends of selected metrics.
 
 [IMPORTANT]
 ====
-Metrics history is available ONLY in SolrCloud mode, it's not supported in standalone Solr. Also,
-the `.system` collection must exist if metrics history should be persisted.
+The `.system` collection must exist if metrics history should be persisted. If this collection
+is absent then metrics history will still be collected and kept in memory but it will be lost
+on node restart.
 ====
 
 This data is maintained as multi-resolution time series, with a fixed total number of data points
@@ -61,14 +62,16 @@
 detailed data from each database, including retrieval of all individual datapoints.
 
 Databases are identified primarily by their corresponding metric registry name, so for databases that
-keep track of aggregated metrics this will be eg. `solr.jvm`, `solr.node`, `solr.collection.gettingstarted`,
-and for databases with non-aggregated metrics this will be eg. `solr.jvm.localhost:8983_solr`,
-`solr.node.localhost:7574_solr`, `solr.core.gettingstarted.shard1.replica_n1`.
+keep track of aggregated metrics this will be eg. `solr.jvm`, `solr.node`, `solr.collection.gettingstarted`.
+For databases with non-aggregated metrics the name consists of the registry name, optionally with a node name
+to identify databases with the same name coming from different nodes. For example, per-node databases are
+name like this: `solr.jvm.localhost:8983_solr`, `solr.node.localhost:7574_solr`, but per-replica names are
+already unique across the cluster so they are named like this: `solr.core.gettingstarted.shard1.replica_n1`.
 
 === Collected metrics
 Currently the following selected metrics are tracked:
 
-* `solr.core` and `solr.collection` metrics:
+* Non-aggregated `solr.core` and aggregated `solr.collection` metrics:
 ** `QUERY./select.requests`
 ** `UPDATE./update.requests`
 ** `INDEX.sizeInBytes`
@@ -78,6 +81,7 @@
 * `solr.node` metrics:
 ** `CONTAINER.fs.coreRoot.usableSpace`
 ** `numNodes` (aggregated, number of live nodes)
+
 * `solr.jvm` metrics:
 ** `memory.heap.used`
 ** `os.processCpuLoad`
@@ -86,6 +90,10 @@
 Separate databases are created for each of these groups, and each database keeps data for
 all metrics listed in that group.
 
+NOTE: Currently this list is not configurable. Also, if you change this list in the code then
+all existing databases must be first removed from the `.system` collection because RRD4j doesn't allow
+adding new datasources once the database is created.
+
 === SolrRrdBackendFactory
 This component is responsible for managing in-memory databases and periodically saving them
 to the `.system` collection. If the `.system` collection is not available the updates to the
@@ -101,7 +109,8 @@
 This handler also performs aggregation of metrics on per-collection level, and on a cluster level.
 By default only these aggregated metrics are tracked - historic data from each node and each replica
 in each collection is not collected separately. Aggregated databases are managed on the Overseer leader
-node.
+node but they are still accessible from other nodes even if they are not persisted - the handler redirects
+the call from originating node to the current Overseer leader.
 
 The handler assumes that a simple aggregation (sum of partial metric values from each resource) is
 sufficient. This happens to make sense for the default built-in sets of metrics. Future extensions will
@@ -135,6 +144,7 @@
 
 `collectPeriod`:: integer, in seconds, default is 60. Metrics values will be collected and respective
 databases updated every `collectPeriod` seconds.
+
 [IMPORTANT]
 ====
 Value of `collectPeriod` must be at least 1, and if it's changed then all previously existing databases
@@ -142,9 +152,9 @@
 ====
 
 `syncPeriod`:: integer, in seconds, default is 60. Data from modified databases will be saved to Solr
-every `syncPeriod` seconds. When accessing the databases via REST API the visibility of most recent
-data depends on this period, because requests accessing the data from other nodes see only the
-version of the data that is stored in the `.system` collection.
+every `syncPeriod` seconds. When accessing the databases via REST API in `index` mode the visibility of
+most recent data depends on this period, because requests accessing the data from other nodes see only
+the version of the data that is stored in the `.system` collection.
 
 === Example configuration
 Example `/clusterprops.json` file with metrics history configuration that turns on the collection of
@@ -154,6 +164,7 @@
 [source,json]
 ----
 {
+...
   "metrics" : {
     "history" : {
       "enable" : true,
@@ -161,42 +172,86 @@
       "syncPeriod" : 300
     }
   }
+...
 }
 ----
 
 == Metrics History API
-Main entry point for accessing metrics history is `/admin/metrics/history` (or `/api/cluster/metrics/history` for
-v2 API).
+Main entry point for accessing metrics history is `/admin/metrics/history` (or `/api/cluster/metrics/history`
+for v2 API).
 
 The following sections describe actions available in this API. All calls have at least one
 required parameter `action`.
 
+All responses contain a section named `state`, which reports the current internal state of the API:
+
+`enableReplicas`:: boolean, corresponds to the `enableReplicas` configuration setting.
+`enableNodes`:: boolean, corresponds to the `enableNodes` configuration setting.
+`mode`:: one of the following values:
+* `inactive` - when metrics collection is disabled (but access to existing metrics history is still available).
+* `memory` - when metrics history is kept only in memory because `.system` collection doesn't exist. In this mode
+clients can access metrics history available on the node that received the reuqest and on the Overseer leader.
+* `index` - when metrics history is periodically stored in the `.system` collection. Data available in memory on
+the node that accepted the request is retrieved from memory, any other data is retrieved from the
+`.system` collection (so it's at least `syncPeriod` old).
+
+Also, the response header section (`responseHeader`) contains `zkConnected` boolean property that indicates
+whether the current node is a part of SolrCloud cluster.
+
 === List databases (`action=list`)
 This call produces a list of available databases. It supports the following parameters:
 
-`rows`:: optional integer, default is 500. Maximum number of results to return
+`rows`:: optional integer, default is 500. Maximum number of results to return.
 
 Example:
+In this SolrCloud example the API is in `memory` mode, and the request was made to a node that is
+not Overseer leader. The API transparently forwarded the request to Overseer leader.
 [source,bash]
 ----
-curl http://localhost:8983/solr/admin/metrics/history?action=list&rows=10
+curl http://localhost:7574/solr/admin/metrics/history?action=list&rows=10
 ----
 [source,json]
 ----
 {
-    "responseHeader": {
-        "status": 0,
-        "QTime": 16
+  "responseHeader": {
+    "zkConnected": true,
+    "status": 0,
+    "QTime": 9
+  },
+  "metrics": {
+    "solr.collection..system": {
+      "lastModified": 1528360138,
+      "node": "127.0.0.1:8983_solr"
     },
-    "metrics": [
-        "solr.collection..system",
-        "solr.collection.gettingstarted",
-        "solr.jvm",
-        "solr.node"
-    ]
+    "solr.collection.gettingstarted": {
+      "lastModified": 1528360138,
+      "node": "127.0.0.1:8983_solr"
+    },
+    "solr.jvm": {
+      "lastModified": 1528360138,
+      "node": "127.0.0.1:8983_solr"
+    },
+    "solr.node": {
+      "lastModified": 1528360138,
+      "node": "127.0.0.1:8983_solr"
+    }
+  },
+  "state": {
+    "enableReplicas": false,
+    "enableNodes": false,
+    "mode": "memory"
+  }
 }
 ----
 
+Note the presence of the `node` element in each section, which shows where the information is coming
+from - when API is in `memory` mode this indicates which results are local and which ones are retrieved
+from the Overseer leader node. When the API is in `index` mode this element always shows the node name that
+received the request (because the data is retrieved from the `.system` collection anyway).
+
+Each section also contains a `lastModified` element, which contains the last modification time when the
+database was update. All timestamps returned from this API correspond to Unix epoch time in seconds.
+
 === Database status (`action=status`)
 This call provides detailed status of the selected database.
 
@@ -207,66 +262,71 @@
 Example:
 [source,bash]
 ----
-curl http://localhost:8983/solr/admin/metrics/history?action=status&name=solr.collection.gettingstarted
+curl http://localhost:7574/solr/admin/metrics/history?action=status&name=solr.collection.gettingstarted
 ----
 [source,json]
 ----
 {
-    "responseHeader": {
-        "status": 0,
-        "QTime": 38
-    },
-    "metrics": [
-        "solr.collection.gettingstarted",
-        [
-            "status",
-            {
-                "lastModified": 1527268438,
-                "step": 60,
-                "datasourceCount": 5,
-                "archiveCount": 5,
-                "datasourceNames": [
-                    "numShards",
-                    "numReplicas",
-                    "QUERY./select.requests",
-                    "UPDATE./update.requests",
-                    "INDEX.sizeInBytes"
-                ],
-                "datasources": [
-                    {
-                        "datasource": "DS:numShards:GAUGE:120:U:U",
-                        "lastValue": 2
-                    },
-                    {
-                        "datasource": "DS:QUERY./select.requests:COUNTER:120:U:U",
-                        "lastValue": 8786
-                    },
-                    ...
-                ],
-                "archives": [
-                    {
-                        "archive": "RRA:AVERAGE:0.5:1:240",
-                        "steps": 1,
-                        "consolFun": "AVERAGE",
-                        "xff": 0.5,
-                        "startTime": 1527254040,
-                        "endTime": 1527268380,
-                        "rows": 240
-                    },
-                    {
-                        "archive": "RRA:AVERAGE:0.5:10:288",
-                        "steps": 10,
-                        "consolFun": "AVERAGE",
-                        "xff": 0.5,
-                        "startTime": 1527096000,
-                        "endTime": 1527268200,
-                        "rows": 288
-                    },
-                    ...
-                ]
-            }
+  "responseHeader": {
+    "zkConnected": true,
+    "status": 0,
+    "QTime": 46
+  },
+  "metrics": {
+    "solr.collection.gettingstarted": {
+      "status": {
+        "lastModified": 1528318361,
+        "step": 60,
+        "datasourceCount": 5,
+        "archiveCount": 5,
+        "datasourceNames": [
+          "numShards",
+          "numReplicas",
+          "QUERY./select.requests",
+          "UPDATE./update.requests",
+          "INDEX.sizeInBytes"
+        ],
+        "datasources": [
+          {
+            "datasource": "DS:numShards:GAUGE:120:U:U",
+            "lastValue": 2
+          },
+          {
+            "datasource": "DS:numReplicas:GAUGE:120:U:U",
+            "lastValue": 4
+          },
+          ...
+        ],
+        "archives": [
+          {
+            "archive": "RRA:AVERAGE:0.5:1:240",
+            "steps": 1,
+            "consolFun": "AVERAGE",
+            "xff": 0.5,
+            "startTime": 1528303980,
+            "endTime": 1528318320,
+            "rows": 240
+          },
+          {
+            "archive": "RRA:AVERAGE:0.5:10:288",
+            "steps": 10,
+            "consolFun": "AVERAGE",
+            "xff": 0.5,
+            "startTime": 1528146000,
+            "endTime": 1528318200,
+            "rows": 288
+          },
+          ...
         ]
-    ]
+      },
+      "node": "127.0.0.1:7574_solr"
+    }
+  },
+  "state": {
+    "enableReplicas": false,
+    "enableNodes": false,
+    "mode": "index"
+  }
 }
 ----
 
@@ -286,7 +346,7 @@
 * `graph` - data is returned as PNG images, Base64-encoded, containing graphs of each time series values over time.
 
 In each case the response is structured in a similar way: archive identifiers are keys in a JSON map,
-and timestamps / datapoints / graphs are values.
+all data is placed in a `data` element, with timestamps / datapoints / graphs as values in lists or maps.
 
 ==== Examples
 This is the output using the default `list` format:
@@ -297,37 +357,49 @@
 [source,json]
 ----
 {
-    "responseHeader": {
-        "status": 0,
-        "QTime": 36
-    },
-    "metrics": [
-        "solr.collection.gettingstarted",
-        [
-            "data",
-            {
-                "RRA:AVERAGE:0.5:1:240": {
-                    "timestamps":1527254460,
-                    "timestamps":1527254520,
-                    "timestamps":1527254580,
-                    ...
-                    "values": {
-                        "numShards": "NaN",
-                        "numShards": 2.0,
-                        "numShards": 2.0,
-                        ...
-                        "numReplicas": "NaN",
-                        "numReplicas": 4.0,
-                        "numReplicas": 4.0,
-                        ...
-                        "QUERY./select.requests": "NaN",
-                        "QUERY./select.requests": 123,
-                        "QUERY./select.requests": 456,
-                        ...
-                    }
-                },
-                "RRA:AVERAGE:0.5:10:288": {
-...
+  "responseHeader": {
+    "zkConnected": true,
+    "status": 0,
+    "QTime": 4
+  },
+  "metrics": {
+    "solr.collection.gettingstarted": {
+      "data": {
+        "RRA:AVERAGE:0.5:1:240": {
+          "timestamps": [
+            1528304160,
+            1528304220,
+            ...
+          ],
+          "values": {
+            "numShards": [
+              "NaN",
+              2.0,
+              ...
+            ],
+            "numReplicas": [
+              "NaN",
+              4.0,
+              ...
+            ],
+            ...
+          }
+        },
+        "RRA:AVERAGE:0.5:10:288": {
+          "timestamps": [
+            1528145400,
+            1528146000,
+          ...
+      "lastModified": 1528318606,
+      "node": "127.0.0.1:8983_solr"
+    }
+  },
+  "state": {
+    "enableReplicas": false,
+    "enableNodes": false,
+    "mode": "index"
+  }
+}
 ----
 
 This is the output when using the `string` format:
@@ -338,25 +410,24 @@
 [source,json]
 ----
 {
-    "responseHeader": {
-        "status": 0,
-        "QTime": 11
-    },
-    "metrics": [
-        "solr.collection.gettingstarted",
-        [
-            "data",
-            {
-                "RRA:AVERAGE:0.5:1:240": {
-                    "timestamps": "1527254820\n1527254880\n1527254940\n...",
-                    "values": {
-                        "numShards": "NaN\n2.0\n2.0\n2.0\n2.0\n2.0\n2.0\n...",
-                        "numReplicas": "NaN\n4.0\n4.0\n4.0\n4.0\n4.0\n4.0\n...",
-                        "QUERY./select.requests": "NaN\n123\n456\n789\n...",
-                        ...
-                    }
-                },
-                "RRA:AVERAGE:0.5:10:288": {
+  "responseHeader": {
+    "zkConnected": true,
+    "status": 0,
+    "QTime": 2
+  },
+  "metrics": {
+    "solr.collection.gettingstarted": {
+      "data": {
+        "RRA:AVERAGE:0.5:1:240": {
+          "timestamps": "1527254820\n1527254880\n1527254940\n...",
+          "values": {
+            "numShards": "NaN\n2.0\n2.0\n2.0\n2.0\n2.0\n2.0\n...",
+            "numReplicas": "NaN\n4.0\n4.0\n4.0\n4.0\n4.0\n4.0\n...",
+            "QUERY./select.requests": "NaN\n123\n456\n789\n...",
+            ...
+          }
+        },
+        "RRA:AVERAGE:0.5:10:288": {
                 ...
 ----
 
@@ -368,29 +439,28 @@
 [source,json]
 ----
 {
-    "responseHeader": {
-        "status": 0,
-        "QTime": 2275
-    },
-    "metrics": [
-        "solr.collection.gettingstarted",
-        [
-            "data",
-            {
-                "RRA:AVERAGE:0.5:1:240": {
-                    "values": {
-                        "numShards": "iVBORw0KGgoAAAANSUhEUgAAAkQAAA...",
-                        "numReplicas": "iVBORw0KGgoAAAANSUhEUgAAAkQA...",
-                        "QUERY./select.requests": "iVBORw0KGgoAAAANS...",
-                        ...
-                    }
-                },
-                "RRA:AVERAGE:0.5:10:288": {
-                    "values": {
-                        "numShards": "iVBORw0KGgoAAAANSUhEUgAAAkQAAA...",
-                        ...
-                },
-                ...
+  "responseHeader": {
+    "zkConnected": true,
+    "status": 0,
+    "QTime": 2
+  },
+  "metrics": {
+    "solr.collection.gettingstarted": {
+      "data": {
+        "RRA:AVERAGE:0.5:1:240": {
+          "values": {
+            "numShards": "iVBORw0KGgoAAAANSUhEUgAAAkQAAA...",
+            "numReplicas": "iVBORw0KGgoAAAANSUhEUgAAAkQA...",
+            "QUERY./select.requests": "iVBORw0KGgoAAAANS...",
+            ...
+          }
+        },
+        "RRA:AVERAGE:0.5:10:288": {
+          "values": {
+            "numShards": "iVBORw0KGgoAAAANSUhEUgAAAkQAAA...",
+            ...
+          },
+        ...
 ----
 
 .Example 60 sec resolution history graph for `QUERY./select.requests` metric
diff --git a/solr/solr-ref-guide/src/the-tagger-handler.adoc b/solr/solr-ref-guide/src/the-tagger-handler.adoc
index 14ba8ed..83439af 100644
--- a/solr/solr-ref-guide/src/the-tagger-handler.adoc
+++ b/solr/solr-ref-guide/src/the-tagger-handler.adoc
@@ -1,118 +1,115 @@
-[[the-tagger-handler]]
 = The Tagger Handler
 
 The "Tagger" Request Handler, AKA the "SolrTextTagger" is a "text tagger".
+
 Given a dictionary (a Solr index) with a name-like field,
-  you post text to this request handler and it will return every occurrence of one of those names with offsets and other document metadata desired.
+  you can post text to this request handler and it will return every occurrence of one of those names with offsets and other document metadata desired.
 It's used for named entity recognition (NER).
-It doesn't do any NLP (outside of Lucene text analysis) so it's said to be a "naive tagger",
+
+The tagger doesn't do any natural language processing (NLP) (outside of Lucene text analysis) so it's considered a "naive tagger",
   but it's definitely useful as-is and a more complete NER or ERD (entity recognition and disambiguation)
   system can be built with this as a key component.
 The SolrTextTagger might be used on queries for query-understanding or large documents as well.
 
-To get a sense of how to use it, jump to the tutorial below.
+To get a sense of how to use it, jump to the <<tutorial-with-geonames,tutorial>> below.
 
 The tagger does not yet support a sharded index.
 Tens, perhaps hundreds of millions of names (documents) are supported, mostly limited by memory.
 
-[[tagger-configuration]]
-== Configuration
+== Tagger Configuration
 
-The Solr schema needs 2 things:
+To configure the tagger, your Solr schema needs 2 fields:
 
-* A unique key field (see `<uniqueKey>`).
-  Recommended field settings: set `docValues=true`
-* A tag field, a TextField, with `ConcatenateGraphFilterFactory` at the end of the index chain (not the query chain):
+* A unique key field (see <<other-schema-elements.adoc#unique-key,Unique Key>> for how to define a unique key in your schema).
+  Recommended field settings: set `docValues=true`.
+* A tag field, which must be a `TextField`, with `ConcatenateGraphFilterFactory` at the end of the index chain (not the query chain):
   Set `preservePositionIncrements=false` on that filter.
-  Recommended field settings: `omitNorms=true`, `omitTermFreqAndPositions=true` and `postingsFormat=FST50`
+  Recommended field settings: `omitNorms=true`, `omitTermFreqAndPositions=true` and `postingsFormat=FST50`.
 
-The text field's _index analysis chain_, aside from needing ConcatenateGraphFilterFactory at the end,
+The text field's _index analysis chain_, aside from needing `ConcatenateGraphFilterFactory` at the end,
   can otherwise have whatever tokenizer and filters suit your matching preferences.
-It can have multi-word synonyms and use WordDelimiterGraphFilterFactory for example.
-However, do _not_ use FlattenGraphFilterFactory as it will interfere with ConcatenateGraphFilterFactory.
-Position gaps (e.g. stop words) get ignored; it's not (yet) supported for the gap to be significant.
+It can have multi-word synonyms and use `WordDelimiterGraphFilterFactory` for example.
+However, do _not_ use `FlattenGraphFilterFactory` as it will interfere with `ConcatenateGraphFilterFactory`.
+Position gaps (e.g., stop words) get ignored; it's not (yet) supported for the gap to be significant.
 
 The text field's _query analysis chain_, on the other hand, is more limited.
 There should not be tokens at the same position, thus no synonym expansion -- do that at index time instead.
 Stop words (or any other filter introducing a position gap) are supported.
 At runtime the tagger can be configured to either treat it as a tag break or to ignore it.
 
-The Solr config needs the `solr.TagRequestHandler` defined, which supports `defaults`, `invariants`, and `appends`
+Your `solrconfig.xml` needs the `solr.TagRequestHandler` defined, which supports `defaults`, `invariants`, and `appends`
 sections just like the search handler.
 
-[[tagger-parameters]]
+For configuration examples, jump to the <<tutorial-with-geonames,tutorial>> below.
+
 == Tagger Parameters
 
 The tagger's execution is completely configurable with request parameters.  Only `field` is required.
 
 `field`::
-  The tag field that serves as the dictionary.
-  This is required; you'll probably specify it in the request handler.
+The tag field that serves as the dictionary.
+This is required; you'll probably specify it in the request handler.
 
 `fq`::
-  You can specify some number of _filter queries_ to limit the dictionary used for tagging.
-  This parameter is the same as is used by the `solr.SearchHandler`.
+You can specify some number of _filter queries_ to limit the dictionary used for tagging.
+This parameter is the same one used by the `solr.SearchHandler`.
 
 `rows`::
-  The maximum number of documents to return, but defaulting to 10000 for a tag request.
-  This parameter is the same as is used by the `solr.SearchHandler`.
+The maximum number of documents to return, but defaulting to 10000 for a tag request.
+This parameter is the same as is used by the `solr.SearchHandler`.
 
 `fl`::
-  Solr's standard param for listing the fields to return.
-  This parameter is the same as is used by the `solr.SearchHandler`.
+Solr's standard parameter for listing the fields to return.
+This parameter is the same one used by the `solr.SearchHandler`.
 
 `overlaps`::
-  Choose the algorithm to determine which tags in an overlapping set should be retained, versus being pruned away.
-  Options are:
+Choose the algorithm to determine which tags in an overlapping set should be retained, versus being pruned away.
+Options are:
 
-  * `ALL`: Emit all tags.
-  * `NO_SUB`: Don't emit a tag that is completely within another tag (i.e. no subtag).
-  * `LONGEST_DOMINANT_RIGHT`: Given a cluster of overlapping tags, emit the longest one (by character length).
-     If there is a tie, pick the right-most.
-     Remove any tags overlapping with this tag then repeat the algorithm to potentially find other tags
-     that can be emitted in the cluster.
+* `ALL`: Emit all tags.
+* `NO_SUB`: Don't emit a tag that is completely within another tag (i.e., no subtag).
+* `LONGEST_DOMINANT_RIGHT`: Given a cluster of overlapping tags, emit the longest one (by character length).
+If there is a tie, pick the right-most.
+Remove any tags overlapping with this tag then repeat the algorithm to potentially find other tags that can be emitted in the cluster.
 
 `matchText`::
-  A boolean indicating whether to return the matched text in the tag response.
-  This will trigger the tagger to fully buffer the input before tagging.
+A boolean indicating whether to return the matched text in the tag response.
+This will trigger the tagger to fully buffer the input before tagging.
 
 `tagsLimit`::
-  The maximum number of tags to return in the response.
-  Tagging effectively stops after this point.
-  By default this is 1000.
+The maximum number of tags to return in the response.
+Tagging effectively stops after this point.
+By default this is `1000`.
 
 `skipAltTokens`::
-  A boolean flag used to suppress errors that can occur if, for example,
+A boolean flag used to suppress errors that can occur if, for example,
   you enable synonym expansion at query time in the analyzer, which you normally shouldn't do.
-  Let this default to false unless you know that such tokens can't be avoided.
+Let this default to false unless you know that such tokens can't be avoided.
 
 `ignoreStopwords`::
-  A boolean flag that causes stopwords (or any condition causing positions to skip like >255 char words)
-  to be ignored as if it wasn't there.
-  Otherwise, the behavior is to treat them as breaks in tagging on the presumption your indexed text-analysis
-  configuration doesn't have a StopWordFilter.
-  By default the indexed analysis chain is checked for the presence of a StopWordFilter and if found
-  then ignoreStopWords is true if unspecified.
-  You probably shouldn't have a StopWordFilter configured and probably won't need to set this param either.
+A boolean flag that causes stopwords (or any condition causing positions to skip like >255 char words)
+to be ignored as if they aren't there.
+Otherwise, the behavior is to treat them as breaks in tagging on the presumption your indexed text-analysis
+  configuration doesn't have a `StopWordFilter` defined.
+By default the indexed analysis chain is checked for the presence of a `StopWordFilter` and if found
+  then `ignoreStopWords` is true if unspecified.
+You probably shouldn't have a `StopWordFilter` configured and probably won't need to set this parameter either.
 
 `xmlOffsetAdjust`::
-  A boolean indicating that the input is XML and furthermore that the offsets of returned tags should be adjusted as
-  necessary to allow for the client to insert an openening and closing element at the tag offset pair.
-  If it isn't possible to do so then the tag will be omitted.
-  You are expected to configure `HTMLStripCharFilterFactory` in the schema when using this option.
-  This will trigger the tagger to fully buffer the input before tagging.
+A boolean indicating that the input is XML and furthermore that the offsets of returned tags should be adjusted as
+  necessary to allow for the client to insert an opening and closing element at the tag offset pair.
+If it isn't possible to do so then the tag will be omitted.
+You are expected to configure `HTMLStripCharFilterFactory` in the schema when using this option.
+This will trigger the tagger to fully buffer the input before tagging.
 
-Solr's parameters for controlling the response format are supported, like:
-  `echoParams`, `wt`, `indent`, etc.
+Solr's parameters for controlling the response format are also supported, such as `echoParams`, `wt`, `indent`, etc.
 
-[[tagger-tutorial-with-geonames]]
 == Tutorial with Geonames
 
 This is a tutorial that demonstrates how to configure and use the text
-tagger with the popular Geonames data set. It's more than a tutorial;
+tagger with the popular http://www.geonames.org/[Geonames] data set. It's more than a tutorial;
 it's a how-to with information that wasn't described above.
 
-[[tagger-create-and-configure-a-solr-collection]]
 === Create and Configure a Solr Collection
 
 Create a Solr collection named "geonames". For the tutorial, we'll
@@ -120,26 +117,27 @@
 experimentation and getting going fast but not for production or being
 optimal.
 
-....
+[source,bash]
 bin/solr create -c geonames
-....
 
-[[tagger-configuring]]
-==== Configuring
+==== Configuring the Tagger
 
 We need to configure the schema first. The "data driven" mode we're
 using allows us to keep this step fairly minimal -- we just need to
-declare a field type, 2 fields, and a copy-field. The critical part
+declare a field type, 2 fields, and a copy-field.
+
+The critical part
 up-front is to define the "tag" field type. There are many many ways to
 configure text analysis; and we're not going to get into those choices
 here. But an important bit is the `ConcatenateGraphFilterFactory` at the
 end of the index analyzer chain. Another important bit for performance
-is postingsFormat=FST50 resulting in a compact FST based in-memory data
+is `postingsFormat=FST50` resulting in a compact FST based in-memory data
 structure that is especially beneficial for the text tagger.
 
 Schema configuration:
 
-....
+[source,bash]
+----
 curl -X POST -H 'Content-type:application/json'  http://localhost:8983/solr/geonames/schema -d '{
   "add-field-type":{
     "name":"tag",
@@ -166,25 +164,26 @@
       ]}
     },
 
-  "add-field":{ "name":"name",     "type":"text_general"},
+  "add-field":{"name":"name", "type":"text_general"},
 
-  "add-field":{ "name":"name_tag", "type":"tag",          "stored":false },
+  "add-field":{"name":"name_tag", "type":"tag", "stored":false },
 
-  "add-copy-field":{ "source":"name", "dest":[ "name_tag" ]}
+  "add-copy-field":{"source":"name", "dest":["name_tag"]}
 }'
-....
+----
 
 Configure a custom Solr Request Handler:
 
-....
+[source,bash]
+----
 curl -X POST -H 'Content-type:application/json' http://localhost:8983/solr/geonames/config -d '{
   "add-requesthandler" : {
     "name": "/tag",
     "class":"solr.TaggerRequestHandler",
-    "defaults":{ "field":"name_tag" }
+    "defaults":{"field":"name_tag"}
   }
 }'
-....
+----
 
 [[tagger-load-some-sample-data]]
 === Load Some Sample Data
@@ -197,40 +196,45 @@
 population.
 
 Using bin/post:
-....
+[source,bash]
+----
 bin/post -c geonames -type text/csv \
   -params 'optimize=true&separator=%09&encapsulator=%00&fieldnames=id,name,,alternative_names,latitude,longitude,,,countrycode,,,,,,population,elevation,,timezone,lastupdate' \
   /tmp/cities1000.txt
-....
+----
+
 or using curl:
-....
+
+[source,bash]
+----
 curl -X POST --data-binary @/path/to/cities1000.txt -H 'Content-type:application/csv' \
   'http://localhost:8983/solr/geonames/update?commit=true&optimize=true&separator=%09&encapsulator=%00&fieldnames=id,name,,alternative_names,latitude,longitude,,,countrycode,,,,,,population,elevation,,timezone,lastupdate'
-....
+----
 
 That might take around 35 seconds; it depends. It can be a lot faster if
 the schema were tuned to only have what we truly need (no text search if
 not needed).
 
-In that command we said optimize=true to put the index in a state that
-will make tagging faster. The encapsulator=%00 is a bit of a hack to
+In that command we said `optimize=true` to put the index in a state that
+will make tagging faster. The `encapsulator=%00` is a bit of a hack to
 disable the default double-quote.
 
-[[tagger-tag-time]]
 === Tag Time!
 
 This is a trivial example tagging a small piece of text. For more
 options, see the earlier documentation.
 
-....
+[source,bash]
+----
 curl -X POST \
   'http://localhost:8983/solr/geonames/tag?overlaps=NO_SUB&tagsLimit=5000&fl=id,name,countrycode&wt=json&indent=on' \
   -H 'Content-Type:text/plain' -d 'Hello New York City'
-....
+----
 
 The response should be this (the QTime may vary):
 
-....
+[source,json]
+----
 {
   "responseHeader":{
     "status":0,
@@ -246,10 +250,9 @@
         "name":["New York City"],
         "countrycode":["US"]}]
   }}
-....
+----
 
-[[tagger-tips]]
-== Tips
+== Tagger Tips
 
 Performance Tips:
 
@@ -262,4 +265,4 @@
      You'll need to keep track of the character offsets of these so you can subtract them from the results.
 ** For reducing tagging latency even further, consider embedding Solr with `EmbeddedSolrServer`.
    See `EmbeddedSolrNoSerializeTest`.
-** Use more than one thread -- perhaps as many as there are CPU cores available to Solr.
\ No newline at end of file
+** Use more than one thread -- perhaps as many as there are CPU cores available to Solr.
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java
index f0e0b3c..28d7874 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java
@@ -15,15 +15,18 @@
  * limitations under the License.
  */
 package org.apache.solr.client.solrj.request;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.ContentStreamBase;
 
-import java.io.IOException;
 import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.ContentStreamBase;
+
 
 /**
  * Basic functionality to upload a File or {@link org.apache.solr.common.util.ContentStream} to a Solr Cell or some
@@ -51,6 +54,23 @@
     return contentStreams;
   }
 
+  @Override
+  public RequestWriter.ContentWriter getContentWriter(String expectedType) {
+    if (contentStreams == null || contentStreams.isEmpty() || contentStreams.size() > 1) return null;
+    ContentStream stream = contentStreams.get(0);
+    return new RequestWriter.ContentWriter() {
+      @Override
+      public void write(OutputStream os) throws IOException {
+        IOUtils.copy(stream.getStream(), os);
+      }
+
+      @Override
+      public String getContentType() {
+        return stream.getContentType();
+      }
+    };
+  }
+
   /**
    * Add a File to the {@link org.apache.solr.common.util.ContentStream}s.
    * @param file The File to add.
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/schema/SchemaRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/schema/SchemaRequest.java
index 29db3bb..6c63bb4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/schema/SchemaRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/schema/SchemaRequest.java
@@ -17,6 +17,8 @@
 package org.apache.solr.client.solrj.request.schema;
 
 import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
@@ -25,7 +27,9 @@
 import java.util.Map;
 
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.request.RequestWriter;
 import org.apache.solr.client.solrj.response.schema.SchemaResponse;
+import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ContentStream;
 import org.apache.solr.common.util.ContentStreamBase;
@@ -706,6 +710,7 @@
     protected abstract NamedList<Object> getRequestParameters();
 
     @Override
+    @Deprecated
     public Collection<ContentStream> getContentStreams() throws IOException {
       CharArr json = new CharArr();
       new SchemaRequestJSONWriter(json).write(getRequestParameters());
@@ -713,6 +718,24 @@
     }
 
     @Override
+    public RequestWriter.ContentWriter getContentWriter(String expectedType) {
+      return new RequestWriter.ContentWriter() {
+        @Override
+        public void write(OutputStream os) throws IOException {
+          //TODO :  find a way to do streaming write
+          CharArr json = new CharArr();
+          new SchemaRequestJSONWriter(json).write(getRequestParameters());
+          os.write(json.toString().getBytes(StandardCharsets.UTF_8));
+        }
+
+        @Override
+        public String getContentType() {
+          return CommonParams.JSON_MIME;
+        }
+      };
+    }
+
+    @Override
     protected SchemaResponse.UpdateResponse createResponse(SolrClient client) {
       return new SchemaResponse.UpdateResponse();
     }