Solr Output Connector: apply multipart POST on all requests

git-svn-id: https://svn.apache.org/repos/asf/manifoldcf/branches/CONNECTORS-1740@1909097 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/build.xml b/build.xml
index bdd185f..92f204a 100644
--- a/build.xml
+++ b/build.xml
@@ -1442,6 +1442,10 @@
             <param name="artifact-name" value="http2-http-client-transport"/>
             <param name="artifact-type" value="jar"/>
         </antcall>
+    	 <antcall target="download-via-maven"><param name="project-path" value="${jetty-package}"/><param name="artifact-version" value="${jetty.version}"/><param name="target" value="lib"/>
+            <param name="artifact-name" value="jetty-alpn-client"/>
+            <param name="artifact-type" value="jar"/>
+        </antcall>
     	  <antcall target="download-via-maven"><param name="project-path" value="${jetty-package}"/><param name="artifact-version" value="${jetty.version}"/><param name="target" value="lib"/>
             <param name="artifact-name" value="jetty-alpn-java-client"/>
             <param name="artifact-type" value="jar"/>
diff --git a/connectors/solr/build.xml b/connectors/solr/build.xml
index e836034..f44d178 100644
--- a/connectors/solr/build.xml
+++ b/connectors/solr/build.xml
@@ -34,6 +34,11 @@
         <path refid="mcf-connector-build.connector-classpath"/>
         <fileset dir="../../lib">
             <include name="solr-solrj*.jar"/>
+        	  <include name="http2*.jar"/>
+        	  <include name="httpmime*.jar"/>
+        	  <include name="jetty-client*.jar"/>
+            <include name="jetty-http*.jar"/>
+            <include name="jetty-util*.jar"/>
             <include name="commons-math3*.jar"/>
             <include name="jcl-over-slf4j*.jar"/>
         </fileset>
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/HttpPoster.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/HttpPoster.java
index 23f1873..8d9d9ce 100644
--- a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/HttpPoster.java
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/HttpPoster.java
@@ -18,64 +18,61 @@
 */
 package org.apache.manifoldcf.agents.output.solr;
 
-import org.apache.http.client.CredentialsProvider;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.config.SocketConfig;
-import org.apache.http.config.RegistryBuilder;
-import org.apache.http.conn.socket.ConnectionSocketFactory;
-import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
-import org.apache.http.conn.socket.PlainConnectionSocketFactory;
-import org.apache.http.conn.ssl.NoopHostnameVerifier;
-import org.apache.http.impl.client.*;
-import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
-import org.apache.http.protocol.HttpRequestExecutor;
-import org.apache.manifoldcf.core.interfaces.*;
-import org.apache.manifoldcf.connectorcommon.interfaces.*;
-import org.apache.manifoldcf.core.common.DateParser;
-import org.apache.manifoldcf.agents.interfaces.*;
-import org.apache.manifoldcf.agents.system.*;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.InterruptedIOException;
+import java.io.Reader;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
 
-import java.io.*;
-import java.util.*;
-import java.util.regex.*;
-
+import org.apache.commons.lang.StringUtils;
 import org.apache.http.Consts;
-import org.apache.http.auth.AuthScope;
-import org.apache.http.auth.Credentials;
-import org.apache.http.auth.UsernamePasswordCredentials;
-
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.manifoldcf.agents.interfaces.IOutputAddActivity;
+import org.apache.manifoldcf.agents.interfaces.IOutputRemoveActivity;
+import org.apache.manifoldcf.agents.interfaces.RepositoryDocument;
+import org.apache.manifoldcf.agents.interfaces.ServiceInterruption;
+import org.apache.manifoldcf.agents.output.solr.ModifiedHttp2SolrClient.Builder;
+import org.apache.manifoldcf.agents.system.Logging;
+import org.apache.manifoldcf.connectorcommon.interfaces.IKeystoreManager;
+import org.apache.manifoldcf.core.common.DateParser;
+import org.apache.manifoldcf.core.interfaces.ManifoldCFException;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.Http2SolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.Krb5HttpClientBuilder;
+import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
 import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.common.util.ContentStreamBase;
-import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.response.SolrPingResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.common.SolrInputDocument;
-
-import org.apache.commons.lang.StringUtils;
-
-import org.apache.solr.client.solrj.impl.Krb5HttpClientBuilder;
-import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
-import org.apache.solr.client.solrj.impl.Http2SolrClient.Builder;
+import org.apache.solr.common.util.ContentStreamBase;
 
 /**
-* Posts an input stream to SOLR
-*
-* @author James Sablatura, modified by Karl Wright
-*/
-public class HttpPoster
-{
+ * Posts an input stream to SOLR
+ *
+ * @author James Sablatura, modified by Karl Wright
+ */
+public class HttpPoster {
   public static final String _rcsid = "@(#)$Id: HttpPoster.java 991295 2010-08-31 19:12:14Z kwright $";
 
   /** Ingestion buffer size property. */
@@ -118,7 +115,7 @@
 
   // Included and excluded mime types
   private final Set<String> includedMimeTypes;
-  private final Set<String>excludedMimeTypes;
+  private final Set<String> excludedMimeTypes;
 
   // Commit-within flag
   private final String commitWithin;
@@ -132,20 +129,14 @@
   /** How long to wait before retrying a failed ingestion */
   private static final long interruptionRetryTime = 60000L;
 
-  /** Initialize the SolrCloud http poster.
-  */
-  public HttpPoster(List<String> zookeeperHosts, String znodePath, String collection,
-    int socketTimeout, int connectionTimeout,
-    String updatePath, String removePath, String statusPath,
-    String allowAttributeName, String denyAttributeName, String idAttributeName,
-    String originalSizeAttributeName, String modifiedDateAttributeName, String createdDateAttributeName, String indexedDateAttributeName,
-    String fileNameAttributeName, String mimeTypeAttributeName, String contentAttributeName,
-    Long maxDocumentLength,
-    String commitWithin, boolean useExtractUpdateHandler,
-    final Set<String> includedMimeTypes, final Set<String> excludedMimeTypes,
-    boolean allowCompression)
-    throws ManifoldCFException
-  {
+  /**
+   * Initialize the SolrCloud http poster.
+   */
+  public HttpPoster(final List<String> zookeeperHosts, final String znodePath, final String collection, final int socketTimeout, final int connectionTimeout, final String updatePath,
+      final String removePath, final String statusPath, final String allowAttributeName, final String denyAttributeName, final String idAttributeName, final String originalSizeAttributeName,
+      final String modifiedDateAttributeName, final String createdDateAttributeName, final String indexedDateAttributeName, final String fileNameAttributeName, final String mimeTypeAttributeName,
+      final String contentAttributeName, final Long maxDocumentLength, final String commitWithin, final boolean useExtractUpdateHandler, final Set<String> includedMimeTypes,
+      final Set<String> excludedMimeTypes, final boolean allowCompression) throws ManifoldCFException {
     // These are the paths to the handlers in Solr that deal with the actions we need to do
     this.postUpdateAction = updatePath;
     this.postRemoveAction = removePath;
@@ -172,40 +163,34 @@
     initializeKerberos();
 
     Optional<String> chroot = Optional.empty();
-    if(znodePath != null && !znodePath.isEmpty()) {
+    if (znodePath != null && !znodePath.isEmpty()) {
       chroot = Optional.of(znodePath);
     }
-    Builder http2SolrClientBuilder = new Http2SolrClient.Builder();
+    final Builder http2SolrClientBuilder = new ModifiedHttp2SolrClient.Builder();
     http2SolrClientBuilder.connectionTimeout(connectionTimeout);
     http2SolrClientBuilder.idleTimeout(socketTimeout);
-    // CloudSolrClient does not have exceptions in its build method signature because it catches them and encapsulate them as runtime exceptions. 
+    // CloudSolrClient does not have exceptions in its build method signature because it catches them and encapsulate them as runtime exceptions.
     // Thus, we need to set a try catch here to properly raise the exceptions that may happen
     try {
-      CloudSolrClient cloudSolrServer = new CloudSolrClient.Builder(zookeeperHosts, chroot).withInternalClientBuilder(http2SolrClientBuilder).build();
+      final ModifiedCloudSolrClient cloudSolrServer = new ModifiedCloudSolrClient.Builder(zookeeperHosts, chroot).withInternalClientBuilder(http2SolrClientBuilder).build();
       cloudSolrServer.setDefaultCollection(collection);
       // Set the solrj instance we want to use
       solrServer = cloudSolrServer;
-    } catch (Exception e) {
+    } catch (final Exception e) {
       throw new ManifoldCFException(e);
     }
-    
+
   }
 
-  /** Initialize the standard http poster.
-  */
-  public HttpPoster(String protocol, String server, int port, String webapp, String core,
-    int connectionTimeout, int socketTimeout,
-    String updatePath, String removePath, String statusPath,
-    String realm, String userID, String password,
-    String allowAttributeName, String denyAttributeName, String idAttributeName,
-    String originalSizeAttributeName, String modifiedDateAttributeName, String createdDateAttributeName, String indexedDateAttributeName,
-    String fileNameAttributeName, String mimeTypeAttributeName, String contentAttributeName,
-    IKeystoreManager keystoreManager, Long maxDocumentLength,
-    String commitWithin, boolean useExtractUpdateHandler,
-    final Set<String> includedMimeTypes, final Set<String> excludedMimeTypes,
-    boolean allowCompression)
-    throws ManifoldCFException
-  {
+  /**
+   * Initialize the standard http poster.
+   */
+  public HttpPoster(final String protocol, final String server, final int port, final String webapp, final String core, final int connectionTimeout, final int socketTimeout, final String updatePath,
+      final String removePath, final String statusPath, final String realm, final String userID, final String password, final String allowAttributeName, final String denyAttributeName,
+      final String idAttributeName, final String originalSizeAttributeName, final String modifiedDateAttributeName, final String createdDateAttributeName, final String indexedDateAttributeName,
+      final String fileNameAttributeName, final String mimeTypeAttributeName, final String contentAttributeName, final IKeystoreManager keystoreManager, final Long maxDocumentLength,
+      final String commitWithin, final boolean useExtractUpdateHandler, final Set<String> includedMimeTypes, final Set<String> excludedMimeTypes, final boolean allowCompression)
+      throws ManifoldCFException {
     // These are the paths to the handlers in Solr that deal with the actions we need to do
     this.postUpdateAction = updatePath;
     this.postRemoveAction = removePath;
@@ -234,55 +219,48 @@
     String location = "";
     if (webapp != null)
       location = "/" + webapp;
-    if (core != null)
-    {
+    if (core != null) {
       if (webapp == null)
         throw new ManifoldCFException("Webapp must be specified if core is specified.");
       location += "/" + core;
     }
 
     // Initialize standard solr-j.
-    String httpSolrServerUrl = protocol + "://" + server + ":" + port + location;
-    Http2SolrClient.Builder solrClientBuilder = new Http2SolrClient.Builder(httpSolrServerUrl);
+    final String httpSolrServerUrl = protocol + "://" + server + ":" + port + location;
+    final Builder solrClientBuilder = new ModifiedHttp2SolrClient.Builder(httpSolrServerUrl);
     solrClientBuilder.maxConnectionsPerHost(1);
     // Set tiemouts
-    solrClientBuilder.connectionTimeout(connectionTimeout); 
+    solrClientBuilder.connectionTimeout(connectionTimeout);
     solrClientBuilder.idleTimeout(socketTimeout);
     // Set auth credentials if provided
-    if (userID != null && userID.length() > 0 && password != null)
-    {
-     solrClientBuilder.withBasicAuthCredentials(userID, password);
+    if (userID != null && userID.length() > 0 && password != null) {
+      solrClientBuilder.withBasicAuthCredentials(userID, password);
     }
-    solrServer = new Http2SolrClient.Builder(httpSolrServerUrl).build();
+    solrServer = new ModifiedHttp2SolrClient.Builder(httpSolrServerUrl).build();
   }
 
-  private static void initializeKerberos()
-  {
+  private static void initializeKerberos() {
 
     final String loginConfig = System.getProperty("java.security.auth.login.config");
     if (loginConfig != null && loginConfig.trim().length() > 0) {
       if (Logging.ingest.isInfoEnabled()) {
         Logging.ingest.info("Using Kerberos for Solr Authentication");
       }
-      Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder();
-      SolrHttpClientBuilder kb = krbBuild.getBuilder();
+      final Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder();
+      final SolrHttpClientBuilder kb = krbBuild.getBuilder();
       HttpClientUtil.setHttpClientBuilder(kb);
     }
 
   }
 
-  /** Shut down the poster.
-  */
-  public void shutdown()
-  {
-    if (solrServer != null)
-    {
-      try
-      {
+  /**
+   * Shut down the poster.
+   */
+  public void shutdown() {
+    if (solrServer != null) {
+      try {
         solrServer.close();
-      }
-      catch (IOException ioe)
-      {
+      } catch (final IOException ioe) {
         // Eat this exception
       }
       solrServer = null;
@@ -292,125 +270,90 @@
     connectionManager = null;
   }
 
-  /** Cause a commit to happen.
-  */
-  public void commitPost()
-    throws ManifoldCFException, ServiceInterruption
-  {
+  /**
+   * Cause a commit to happen.
+   */
+  public void commitPost() throws ManifoldCFException, ServiceInterruption {
     if (Logging.ingest.isDebugEnabled())
       Logging.ingest.debug("commitPost()");
 
     // Open a socket to ingest, and to the response stream to get the post result
-    try
-    {
-      CommitThread t = new CommitThread();
-      try
-      {
+    try {
+      final CommitThread t = new CommitThread();
+      try {
         t.start();
         t.finishUp();
         return;
-      }
-      catch (InterruptedException e)
-      {
+      } catch (final InterruptedException e) {
         t.interrupt();
-        throw new ManifoldCFException("Interrupted: "+e.getMessage(),ManifoldCFException.INTERRUPTED);
+        throw new ManifoldCFException("Interrupted: " + e.getMessage(), ManifoldCFException.INTERRUPTED);
       }
-    }
-    catch (SolrServerException e)
-    {
+    } catch (final SolrServerException e) {
       handleSolrServerException(e, "commit");
       return;
-    }
-    catch (SolrException e)
-    {
+    } catch (final SolrException e) {
       handleSolrException(e, "commit");
       return;
-    }
-    catch (RuntimeException e)
-    {
+    } catch (final RuntimeException e) {
       handleRuntimeException(e, "commit");
       return;
-    }
-    catch (IOException ioe)
-    {
+    } catch (final IOException ioe) {
       handleIOException(ioe, "commit");
       return;
     }
   }
 
-  /** Handle a RuntimeException.
-  * Unfortunately, SolrCloud 4.6.x throws RuntimeExceptions whenever ZooKeeper is not happy.
-  * We have to catch these too.  I've logged a ticket: SOLR-5678.
-  */
-  protected static void handleRuntimeException(RuntimeException e, String context)
-    throws ManifoldCFException, ServiceInterruption
-  {
-    Throwable childException = e.getCause();
-    if (childException != null && childException instanceof java.util.concurrent.TimeoutException)
-    {
-      Logging.ingest.warn("SolrJ runtime exception during "+context+": "+childException.getMessage(),childException);
-      long currentTime = System.currentTimeMillis();
-      throw new ServiceInterruption(childException.getMessage(),childException,
-        currentTime + interruptionRetryTime,
-        currentTime + 2L * 60L * 60000L,
-        -1,
-        true);
+  /**
+   * Handle a RuntimeException. Unfortunately, SolrCloud 4.6.x throws RuntimeExceptions whenever ZooKeeper is not happy. We have to catch these too. I've logged a ticket: SOLR-5678.
+   */
+  protected static void handleRuntimeException(final RuntimeException e, final String context) throws ManifoldCFException, ServiceInterruption {
+    final Throwable childException = e.getCause();
+    if (childException != null && childException instanceof java.util.concurrent.TimeoutException) {
+      Logging.ingest.warn("SolrJ runtime exception during " + context + ": " + childException.getMessage(), childException);
+      final long currentTime = System.currentTimeMillis();
+      throw new ServiceInterruption(childException.getMessage(), childException, currentTime + interruptionRetryTime, currentTime + 2L * 60L * 60000L, -1, true);
     }
     // Solr was not able to parse the request because it is malformed: skip the document
-    Logging.ingest.warn("Solr was unable to parse request during "+context+": "+e.getMessage(),e);
+    Logging.ingest.warn("Solr was unable to parse request during " + context + ": " + e.getMessage(), e);
     return;
   }
 
-  /** Handle a SolrServerException.
-  * These exceptions seem to be catch-all exceptions having to do with misconfiguration or
-  * underlying IO exceptions, or request parsing exceptions.
-  * If this method doesn't throw an exception, it means that the exception should be interpreted
-  * as meaning that the document or action is illegal and should not be repeated.
-  */
-  protected static void handleSolrServerException(SolrServerException e, String context)
-    throws ManifoldCFException, ServiceInterruption
-  {
-    Throwable childException = e.getCause();
-    if (childException instanceof IOException)
-    {
-      handleIOException((IOException)childException, context);
+  /**
+   * Handle a SolrServerException. These exceptions seem to be catch-all exceptions having to do with misconfiguration or underlying IO exceptions, or request parsing exceptions. If this method
+   * doesn't throw an exception, it means that the exception should be interpreted as meaning that the document or action is illegal and should not be repeated.
+   */
+  protected static void handleSolrServerException(final SolrServerException e, final String context) throws ManifoldCFException, ServiceInterruption {
+    final Throwable childException = e.getCause();
+    if (childException instanceof IOException) {
+      handleIOException((IOException) childException, context);
       return;
     } else if (childException instanceof RuntimeException) {
       handleRuntimeException((RuntimeException) childException, context);
       return;
     }
-    //  Unknown exception, but a "Solr down" related issue does not end up here: log the error and skip this document or action
-    String message = "SolrServerException exception during "+context+": "+e.getMessage();
-    Logging.ingest.warn(message,e);
+    // Unknown exception, but a "Solr down" related issue does not end up here: log the error and skip this document or action
+    final String message = "SolrServerException exception during " + context + ": " + e.getMessage();
+    Logging.ingest.warn(message, e);
     return;
   }
 
-  /** Handle a SolrException.
-  * These exceptions are mainly Http errors having to do with actual responses from Solr.
-  * If this method doesn't throw an exception, it means that the exception should be interpreted
-  * as meaning that the document or action is illegal and should not be repeated.
-  */
-  protected static void handleSolrException(SolrException e, String context)
-    throws ManifoldCFException, ServiceInterruption
-  {
+  /**
+   * Handle a SolrException. These exceptions are mainly Http errors having to do with actual responses from Solr. If this method doesn't throw an exception, it means that the exception should be
+   * interpreted as meaning that the document or action is illegal and should not be repeated.
+   */
+  protected static void handleSolrException(final SolrException e, final String context) throws ManifoldCFException, ServiceInterruption {
     int code = e.code();
-    if (code == 0)
-    {
-      try
-      {
-        // Solrj doesn't always set the code properly.  If it doesn't, we have to parse it out of the exception string.  Ugh.
-        Pattern p = Pattern.compile("non ok status:([0-9]*),");
-        Matcher m = p.matcher(e.getMessage());
+    if (code == 0) {
+      try {
+        // Solrj doesn't always set the code properly. If it doesn't, we have to parse it out of the exception string. Ugh.
+        final Pattern p = Pattern.compile("non ok status:([0-9]*),");
+        final Matcher m = p.matcher(e.getMessage());
         if (m.find())
           code = Integer.parseInt(m.group(1));
-      }
-      catch (PatternSyntaxException e2)
-      {
-        throw new ManifoldCFException("Unexpected error: "+e2.getMessage());
-      }
-      catch (NumberFormatException e2)
-      {
-        throw new ManifoldCFException("Unexpected error: "+e2.getMessage());
+      } catch (final PatternSyntaxException e2) {
+        throw new ManifoldCFException("Unexpected error: " + e2.getMessage());
+      } catch (final NumberFormatException e2) {
+        throw new ManifoldCFException("Unexpected error: " + e2.getMessage());
       }
     }
 
@@ -420,9 +363,8 @@
       return;
 
     // If code is 401, we should abort the job because security credentials are incorrect
-    if (code == 401)
-    {
-      String message = "Solr authorization failure, code "+code+": aborting job";
+    if (code == 401) {
+      final String message = "Solr authorization failure, code " + code + ": aborting job";
       Logging.ingest.error(message);
       throw new ManifoldCFException(message);
     }
@@ -433,351 +375,255 @@
 
     // The only other kind of return code we know how to handle is 50x.
     // For these, we should retry for a while.
-    if (code == 500)
-    {
-      long currentTime = System.currentTimeMillis();
+    if (code == 500) {
+      final long currentTime = System.currentTimeMillis();
 
       // Log the error
-      String message = "Solr exception during "+context+" ("+e.code()+"): "+e.getMessage();
-      Logging.ingest.warn(message,e);
-      throw new ServiceInterruption(message,
-        e,
-        currentTime + interruptionRetryTime,
-        currentTime + 2L * 60L * 60000L,
-        -1,
-        true);
+      final String message = "Solr exception during " + context + " (" + e.code() + "): " + e.getMessage();
+      Logging.ingest.warn(message, e);
+      throw new ServiceInterruption(message, e, currentTime + interruptionRetryTime, currentTime + 2L * 60L * 60000L, -1, true);
     }
 
     // Unknown code: end the job.
-    throw new ManifoldCFException("Unhandled Solr exception during "+context+" ("+e.code()+"): "+e.getMessage());
+    throw new ManifoldCFException("Unhandled Solr exception during " + context + " (" + e.code() + "): " + e.getMessage());
   }
 
-  /** Handle an IOException.
-  * I'm not actually sure where these exceptions come from in SolrJ, but we handle them
-  * as real I/O errors, meaning they should be retried.
-  */
-  protected static void handleIOException(IOException e, String context)
-    throws ManifoldCFException, ServiceInterruption
-  {
+  /**
+   * Handle an IOException. I'm not actually sure where these exceptions come from in SolrJ, but we handle them as real I/O errors, meaning they should be retried.
+   */
+  protected static void handleIOException(final IOException e, final String context) throws ManifoldCFException, ServiceInterruption {
     if ((e instanceof InterruptedIOException) && (!(e instanceof java.net.SocketTimeoutException)))
       throw new ManifoldCFException(e.getMessage(), ManifoldCFException.INTERRUPTED);
 
-    long currentTime = System.currentTimeMillis();
+    final long currentTime = System.currentTimeMillis();
 
-    if (e instanceof java.net.ConnectException)
-    {
-      // Server isn't up at all.  Try for a brief time then give up.
-      String message = "Server could not be contacted during "+context+": "+e.getMessage();
-      Logging.ingest.warn(message,e);
-      throw new ServiceInterruption(message,
-        e,
-        currentTime + interruptionRetryTime,
-        -1L,
-        3,
-        true);
+    if (e instanceof java.net.ConnectException) {
+      // Server isn't up at all. Try for a brief time then give up.
+      final String message = "Server could not be contacted during " + context + ": " + e.getMessage();
+      Logging.ingest.warn(message, e);
+      throw new ServiceInterruption(message, e, currentTime + interruptionRetryTime, -1L, 3, true);
     }
 
-    if (e instanceof java.net.SocketTimeoutException)
-    {
-      String message2 = "Socket timeout exception during "+context+": "+e.getMessage();
-      Logging.ingest.warn(message2,e);
-      throw new ServiceInterruption(message2,
-        e,
-        currentTime + interruptionRetryTime,
-        currentTime + 20L * 60000L,
-        -1,
-        false);
+    if (e instanceof java.net.SocketTimeoutException) {
+      final String message2 = "Socket timeout exception during " + context + ": " + e.getMessage();
+      Logging.ingest.warn(message2, e);
+      throw new ServiceInterruption(message2, e, currentTime + interruptionRetryTime, currentTime + 20L * 60000L, -1, false);
     }
 
-    if (e.getClass().getName().equals("java.net.SocketException"))
-    {
+    if (e.getClass().getName().equals("java.net.SocketException")) {
       // In the past we would have treated this as a straight document rejection, and
-      // treated it in the same manner as a 400.  The reasoning is that the server can
+      // treated it in the same manner as a 400. The reasoning is that the server can
       // perfectly legally send out a 400 and drop the connection immediately thereafter,
       // this a race condition.
       // However, Solr 4.0 (or the Jetty version that the example runs on) seems
       // to have a bug where it drops the connection when two simultaneous documents come in
-      // at the same time.  This is the final version of Solr 4.0 so we need to deal with
+      // at the same time. This is the final version of Solr 4.0 so we need to deal with
       // this.
-      if (e.getMessage().toLowerCase(Locale.ROOT).indexOf("broken pipe") != -1 ||
-        e.getMessage().toLowerCase(Locale.ROOT).indexOf("connection reset") != -1 ||
-        e.getMessage().toLowerCase(Locale.ROOT).indexOf("target server failed to respond") != -1)
-      {
+      if (e.getMessage().toLowerCase(Locale.ROOT).indexOf("broken pipe") != -1 || e.getMessage().toLowerCase(Locale.ROOT).indexOf("connection reset") != -1
+          || e.getMessage().toLowerCase(Locale.ROOT).indexOf("target server failed to respond") != -1) {
         // Treat it as a service interruption, but with a limited number of retries.
         // In that way we won't burden the user with a huge retry interval; it should
         // give up fairly quickly, and yet NOT give up if the error was merely transient
-        String message = "Server dropped connection during "+context+": "+e.getMessage();
-        Logging.ingest.warn(message,e);
-        throw new ServiceInterruption(message,
-          e,
-          currentTime + interruptionRetryTime,
-          -1L,
-          3,
-          false);
+        final String message = "Server dropped connection during " + context + ": " + e.getMessage();
+        Logging.ingest.warn(message, e);
+        throw new ServiceInterruption(message, e, currentTime + interruptionRetryTime, -1L, 3, false);
       }
 
       // Other socket exceptions are service interruptions - but if we keep getting them, it means
-      // that a socket timeout is probably set too low to accept this particular document.  So
+      // that a socket timeout is probably set too low to accept this particular document. So
       // we retry for a while, then skip the document.
-      String message2 = "Socket exception during "+context+": "+e.getMessage();
-      Logging.ingest.warn(message2,e);
-      throw new ServiceInterruption(message2,
-        e,
-        currentTime + interruptionRetryTime,
-        currentTime + 20L * 60000L,
-        -1,
-        false);
+      final String message2 = "Socket exception during " + context + ": " + e.getMessage();
+      Logging.ingest.warn(message2, e);
+      throw new ServiceInterruption(message2, e, currentTime + interruptionRetryTime, currentTime + 20L * 60000L, -1, false);
     }
 
     // Otherwise, no idea what the trouble is, so presume that retries might fix it, skip the document/action otherwise
-    String message3 = "IO exception during "+context+": "+e.getMessage();
-    Logging.ingest.warn(message3,e);
-    throw new ServiceInterruption(message3,
-      e,
-      currentTime + interruptionRetryTime,
-      -1L,
-      3,
-      false);
+    final String message3 = "IO exception during " + context + ": " + e.getMessage();
+    Logging.ingest.warn(message3, e);
+    throw new ServiceInterruption(message3, e, currentTime + interruptionRetryTime, -1L, 3, false);
   }
 
   /**
-  * Post the input stream to ingest
-  *
-   * @param documentURI is the document's uri.
-   * @param document is the document structure to ingest.
-   * @param arguments are the configuration arguments to pass in the post.  Key is argument name, value is a list of the argument values.
+   * Post the input stream to ingest
+   *
+   * @param documentURI         is the document's uri.
+   * @param document            is the document structure to ingest.
+   * @param arguments           are the configuration arguments to pass in the post. Key is argument name, value is a list of the argument values.
    * @param authorityNameString is the name of the governing authority for this document's acls, or null if none.
-   * @param activities is the activities object, so we can report what's happening.   @return true if the ingestion was successful, or false if the ingestion is illegal.
-  * @throws ManifoldCFException
-  * @throws ServiceInterruption
-  */
-  public boolean indexPost(String documentURI,
-    RepositoryDocument document, Map<String,List<String>> arguments,
-    String authorityNameString, IOutputAddActivity activities)
-    throws ManifoldCFException, ServiceInterruption
-  {
+   * @param activities          is the activities object, so we can report what's happening. @return true if the ingestion was successful, or false if the ingestion is illegal.
+   * @throws ManifoldCFException
+   * @throws ServiceInterruption
+   */
+  public boolean indexPost(final String documentURI, final RepositoryDocument document, final Map<String, List<String>> arguments, final String authorityNameString,
+      final IOutputAddActivity activities) throws ManifoldCFException, ServiceInterruption {
     if (Logging.ingest.isDebugEnabled())
       Logging.ingest.debug("indexPost(): '" + documentURI + "'");
 
     // If the document is too long, reject it.
-    if (maxDocumentLength != null && document.getBinaryLength() > maxDocumentLength.longValue()){
-      activities.recordActivity(null,SolrConnector.INGEST_ACTIVITY,null,documentURI,activities.EXCLUDED_LENGTH,"Solr connector rejected document due to its big size: ('"+document.getBinaryLength()+"')");
+    if (maxDocumentLength != null && document.getBinaryLength() > maxDocumentLength.longValue()) {
+      activities.recordActivity(null, SolrConnector.INGEST_ACTIVITY, null, documentURI, activities.EXCLUDED_LENGTH,
+          "Solr connector rejected document due to its big size: ('" + document.getBinaryLength() + "')");
       return false;
     }
 
     // If not the right mime type, reject it.
-    // Note: this code added as part of CONNECTORS-1482 was incorrect!  Document filtering specified in the solr connector is always against the
-    // ORIGINAL mime type (which is what's in the document).  This why the checkMimeTypeIndexable second argument is always "true".
-    if ((includedMimeTypes !=null || excludedMimeTypes != null) && !checkMimeTypeIndexable(document.getMimeType(), true, includedMimeTypes, excludedMimeTypes)) {
-      activities.recordActivity(null,SolrConnector.INGEST_ACTIVITY,null,documentURI,activities.EXCLUDED_MIMETYPE,"Solr connector rejected document due to mime type restrictions: ("+document.getMimeType()+")");
+    // Note: this code added as part of CONNECTORS-1482 was incorrect! Document filtering specified in the solr connector is always against the
+    // ORIGINAL mime type (which is what's in the document). This why the checkMimeTypeIndexable second argument is always "true".
+    if ((includedMimeTypes != null || excludedMimeTypes != null) && !checkMimeTypeIndexable(document.getMimeType(), true, includedMimeTypes, excludedMimeTypes)) {
+      activities.recordActivity(null, SolrConnector.INGEST_ACTIVITY, null, documentURI, activities.EXCLUDED_MIMETYPE,
+          "Solr connector rejected document due to mime type restrictions: (" + document.getMimeType() + ")");
       return false;
     }
 
     // Convert the incoming acls that we know about to qualified forms, and reject the document if
     // we don't know how to deal with its acls
-    Map<String,String[]> aclsMap = new HashMap<String,String[]>();
-    Map<String,String[]> denyAclsMap = new HashMap<String,String[]>();
+    final Map<String, String[]> aclsMap = new HashMap<String, String[]>();
+    final Map<String, String[]> denyAclsMap = new HashMap<String, String[]>();
 
-    Iterator<String> aclTypes = document.securityTypesIterator();
-    while (aclTypes.hasNext())
-    {
-      String aclType = aclTypes.next();
-      aclsMap.put(aclType,convertACL(document.getSecurityACL(aclType),authorityNameString,activities));
-      denyAclsMap.put(aclType,convertACL(document.getSecurityDenyACL(aclType),authorityNameString,activities));
+    final Iterator<String> aclTypes = document.securityTypesIterator();
+    while (aclTypes.hasNext()) {
+      final String aclType = aclTypes.next();
+      aclsMap.put(aclType, convertACL(document.getSecurityACL(aclType), authorityNameString, activities));
+      denyAclsMap.put(aclType, convertACL(document.getSecurityDenyACL(aclType), authorityNameString, activities));
 
-      // Reject documents that have security we don't know how to deal with in the Solr plugin!!  Only safe thing to do.
-      if (!aclType.equals(RepositoryDocument.SECURITY_TYPE_DOCUMENT) &&
-        !aclType.equals(RepositoryDocument.SECURITY_TYPE_SHARE) &&
-        !aclType.startsWith(RepositoryDocument.SECURITY_TYPE_PARENT)){
-          activities.recordActivity(null,SolrConnector.INGEST_ACTIVITY,null,documentURI,activities.UNKNOWN_SECURITY,"Solr connector rejected document that has security info which Solr does not recognize: '"+aclType + "'");
-          return false;
+      // Reject documents that have security we don't know how to deal with in the Solr plugin!! Only safe thing to do.
+      if (!aclType.equals(RepositoryDocument.SECURITY_TYPE_DOCUMENT) && !aclType.equals(RepositoryDocument.SECURITY_TYPE_SHARE) && !aclType.startsWith(RepositoryDocument.SECURITY_TYPE_PARENT)) {
+        activities.recordActivity(null, SolrConnector.INGEST_ACTIVITY, null, documentURI, activities.UNKNOWN_SECURITY,
+            "Solr connector rejected document that has security info which Solr does not recognize: '" + aclType + "'");
+        return false;
       }
 
     }
 
-    try
-    {
-      IngestThread t = new IngestThread(documentURI,document,arguments,
-                                        aclsMap,denyAclsMap);
-      try
-      {
+    try {
+      final IngestThread t = new IngestThread(documentURI, document, arguments, aclsMap, denyAclsMap);
+      try {
         t.start();
         t.finishUp();
 
         if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.INGEST_ACTIVITY,t.getActivityBytes(),documentURI,t.getActivityCode(),t.getActivityDetails());
+          activities.recordActivity(t.getActivityStart(), SolrConnector.INGEST_ACTIVITY, t.getActivityBytes(), documentURI, t.getActivityCode(), t.getActivityDetails());
 
         return t.getRval();
-      }
-      catch (InterruptedException e)
-      {
+      } catch (final InterruptedException e) {
         t.interrupt();
-        throw new ManifoldCFException("Interrupted: "+e.getMessage(),ManifoldCFException.INTERRUPTED);
-      }
-      catch (SolrServerException e)
-      {
+        throw new ManifoldCFException("Interrupted: " + e.getMessage(), ManifoldCFException.INTERRUPTED);
+      } catch (final SolrServerException e) {
         if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.INGEST_ACTIVITY,t.getActivityBytes(),documentURI,t.getActivityCode(),t.getActivityDetails());
+          activities.recordActivity(t.getActivityStart(), SolrConnector.INGEST_ACTIVITY, t.getActivityBytes(), documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final SolrException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.INGEST_ACTIVITY, t.getActivityBytes(), documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final RuntimeException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.INGEST_ACTIVITY, t.getActivityBytes(), documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final IOException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.INGEST_ACTIVITY, t.getActivityBytes(), documentURI, t.getActivityCode(), t.getActivityDetails());
         throw e;
       }
-      catch (SolrException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.INGEST_ACTIVITY,t.getActivityBytes(),documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-      catch (RuntimeException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.INGEST_ACTIVITY,t.getActivityBytes(),documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-      catch (IOException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.INGEST_ACTIVITY,t.getActivityBytes(),documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-    }
-    catch (SolrServerException e)
-    {
-      handleSolrServerException(e, "indexing "+documentURI);
+    } catch (final SolrServerException e) {
+      handleSolrServerException(e, "indexing " + documentURI);
       return false;
-    }
-    catch (SolrException e)
-    {
-      handleSolrException(e, "indexing "+documentURI);
+    } catch (final SolrException e) {
+      handleSolrException(e, "indexing " + documentURI);
       return false;
-    }
-    catch (RuntimeException e)
-    {
-      handleRuntimeException(e, "indexing "+documentURI);
+    } catch (final RuntimeException e) {
+      handleRuntimeException(e, "indexing " + documentURI);
       return false;
-    }
-    catch (IOException ioe)
-    {
-      handleIOException(ioe, "indexing "+documentURI);
+    } catch (final IOException ioe) {
+      handleIOException(ioe, "indexing " + documentURI);
       return false;
     }
 
   }
 
-  /** Post a check request.
-  */
-  public void checkPost()
-    throws ManifoldCFException, ServiceInterruption
-  {
+  /**
+   * Post a check request.
+   */
+  public void checkPost() throws ManifoldCFException, ServiceInterruption {
     if (Logging.ingest.isDebugEnabled())
       Logging.ingest.debug("checkPost()");
 
     // Open a socket to ingest, and to the response stream to get the post result
-    try
-    {
-      StatusThread t = new StatusThread();
-      try
-      {
+    try {
+      final StatusThread t = new StatusThread();
+      try {
         t.start();
         t.finishUp();
         return;
-      }
-      catch (InterruptedException e)
-      {
+      } catch (final InterruptedException e) {
         t.interrupt();
-        throw new ManifoldCFException("Interrupted: "+e.getMessage(),ManifoldCFException.INTERRUPTED);
+        throw new ManifoldCFException("Interrupted: " + e.getMessage(), ManifoldCFException.INTERRUPTED);
       }
-    }
-    catch (SolrServerException e)
-    {
+    } catch (final SolrServerException e) {
       handleSolrServerException(e, "check");
       return;
-    }
-    catch (SolrException e)
-    {
+    } catch (final SolrException e) {
       handleSolrException(e, "check");
       return;
-    }
-    catch (RuntimeException e)
-    {
+    } catch (final RuntimeException e) {
       handleRuntimeException(e, "check");
       return;
-    }
-    catch (IOException ioe)
-    {
+    } catch (final IOException ioe) {
       handleIOException(ioe, "check");
       return;
     }
 
   }
 
-  /** Post a delete request.
-  *@param documentURI is the document's URI.
-  */
-  public void deletePost(String documentURI, IOutputRemoveActivity activities)
-    throws ManifoldCFException, ServiceInterruption
-  {
+  /**
+   * Post a delete request.
+   *
+   * @param documentURI is the document's URI.
+   */
+  public void deletePost(final String documentURI, final IOutputRemoveActivity activities) throws ManifoldCFException, ServiceInterruption {
     if (Logging.ingest.isDebugEnabled())
       Logging.ingest.debug("deletePost(): '" + documentURI + "'");
 
-    try
-    {
-      DeleteThread t = new DeleteThread(documentURI);
-      try
-      {
+    try {
+      final DeleteThread t = new DeleteThread(documentURI);
+      try {
         t.start();
         t.finishUp();
 
         if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.REMOVE_ACTIVITY,null,documentURI,t.getActivityCode(),t.getActivityDetails());
+          activities.recordActivity(t.getActivityStart(), SolrConnector.REMOVE_ACTIVITY, null, documentURI, t.getActivityCode(), t.getActivityDetails());
 
         return;
-      }
-      catch (InterruptedException e)
-      {
+      } catch (final InterruptedException e) {
         t.interrupt();
-        throw new ManifoldCFException("Interrupted: "+e.getMessage(),ManifoldCFException.INTERRUPTED);
-      }
-      catch (SolrServerException e)
-      {
+        throw new ManifoldCFException("Interrupted: " + e.getMessage(), ManifoldCFException.INTERRUPTED);
+      } catch (final SolrServerException e) {
         if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.REMOVE_ACTIVITY,null,documentURI,t.getActivityCode(),t.getActivityDetails());
+          activities.recordActivity(t.getActivityStart(), SolrConnector.REMOVE_ACTIVITY, null, documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final SolrException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.REMOVE_ACTIVITY, null, documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final RuntimeException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.REMOVE_ACTIVITY, null, documentURI, t.getActivityCode(), t.getActivityDetails());
+        throw e;
+      } catch (final IOException e) {
+        if (t.getActivityCode() != null)
+          activities.recordActivity(t.getActivityStart(), SolrConnector.REMOVE_ACTIVITY, null, documentURI, t.getActivityCode(), t.getActivityDetails());
         throw e;
       }
-      catch (SolrException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.REMOVE_ACTIVITY,null,documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-      catch (RuntimeException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.REMOVE_ACTIVITY,null,documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-      catch (IOException e)
-      {
-        if (t.getActivityCode() != null)
-          activities.recordActivity(t.getActivityStart(),SolrConnector.REMOVE_ACTIVITY,null,documentURI,t.getActivityCode(),t.getActivityDetails());
-        throw e;
-      }
-    }
-    catch (SolrServerException e)
-    {
+    } catch (final SolrServerException e) {
       handleSolrServerException(e, "delete");
       return;
-    }
-    catch (SolrException e)
-    {
+    } catch (final SolrException e) {
       handleSolrException(e, "delete");
       return;
-    }
-    catch (RuntimeException e)
-    {
+    } catch (final RuntimeException e) {
       handleRuntimeException(e, "delete");
       return;
-    }
-    catch (IOException ioe)
-    {
+    } catch (final IOException ioe) {
       handleIOException(ioe, "delete");
       return;
     }
@@ -785,8 +631,7 @@
   }
 
   private final static Set<String> acceptableMimeTypes = new HashSet<String>();
-  static
-  {
+  static {
     acceptableMimeTypes.add("text/plain;charset=utf-8");
     acceptableMimeTypes.add("text/plain;charset=ascii");
     acceptableMimeTypes.add("text/plain;charset=us-ascii");
@@ -796,17 +641,14 @@
     acceptableMimeTypes.add("text/plain");
   }
 
-  public static boolean checkMimeTypeIndexable(final String mimeType, final boolean useExtractUpdateHandler,
-    final Set<String> includedMimeTypes, final Set<String> excludedMimeTypes)
-  {
-    final String lowerMimeType = (mimeType==null)?null:mimeType.toLowerCase(Locale.ROOT);
-    if (useExtractUpdateHandler)
-    {
+  public static boolean checkMimeTypeIndexable(final String mimeType, final boolean useExtractUpdateHandler, final Set<String> includedMimeTypes, final Set<String> excludedMimeTypes) {
+    final String lowerMimeType = (mimeType == null) ? null : mimeType.toLowerCase(Locale.ROOT);
+    if (useExtractUpdateHandler) {
       // Strip the charset off for this check
-      int index = lowerMimeType == null ? -1 : lowerMimeType.indexOf(";");
+      final int index = lowerMimeType == null ? -1 : lowerMimeType.indexOf(";");
       final String checkMimeType;
       if (index != -1) {
-        checkMimeType = lowerMimeType.substring(0,index);
+        checkMimeType = lowerMimeType.substring(0, index);
       } else {
         checkMimeType = lowerMimeType;
       }
@@ -819,22 +661,20 @@
     return acceptableMimeTypes.contains(lowerMimeType);
   }
 
-  /** Convert an unqualified ACL to qualified form.
-  * @param acl is the initial, unqualified ACL.
-  * @param authorityNameString is the name of the governing authority for this document's acls, or null if none.
-  * @param activities is the activities object, so we can report what's happening.
-  * @return the modified ACL.
-  */
-  protected static String[] convertACL(String[] acl, String authorityNameString, IOutputAddActivity activities)
-    throws ManifoldCFException
-  {
-    if (acl != null)
-    {
-      String[] rval = new String[acl.length];
+  /**
+   * Convert an unqualified ACL to qualified form.
+   *
+   * @param acl                 is the initial, unqualified ACL.
+   * @param authorityNameString is the name of the governing authority for this document's acls, or null if none.
+   * @param activities          is the activities object, so we can report what's happening.
+   * @return the modified ACL.
+   */
+  protected static String[] convertACL(final String[] acl, final String authorityNameString, final IOutputAddActivity activities) throws ManifoldCFException {
+    if (acl != null) {
+      final String[] rval = new String[acl.length];
       int i = 0;
-      while (i < rval.length)
-      {
-        rval[i] = activities.qualifyAccessToken(authorityNameString,acl[i]);
+      while (i < rval.length) {
+        rval[i] = activities.qualifyAccessToken(authorityNameString, acl[i]);
         i++;
       }
       return rval;
@@ -843,68 +683,60 @@
   }
 
   /** Write a field */
-  protected static void writeField(ModifiableSolrParams out, String fieldName, String[] fieldValues)
-  {
+  protected static void writeField(final ModifiableSolrParams out, final String fieldName, final String[] fieldValues) {
     out.add(fieldName, fieldValues);
   }
 
   /** Write a field */
-  protected static void writeField(ModifiableSolrParams out, String fieldName, List<String> fieldValues)
-  {
-    String[] values = new String[fieldValues.size()];
+  protected static void writeField(final ModifiableSolrParams out, final String fieldName, final List<String> fieldValues) {
+    final String[] values = new String[fieldValues.size()];
     int i = 0;
-    for (String fieldValue : fieldValues) {
+    for (final String fieldValue : fieldValues) {
       values[i++] = fieldValue;
     }
     writeField(out, fieldName, values);
   }
 
   /** Write a field */
-  protected static void writeField(ModifiableSolrParams out, String fieldName, String fieldValue)
-  {
+  protected static void writeField(final ModifiableSolrParams out, final String fieldName, final String fieldValue) {
     out.add(fieldName, fieldValue);
   }
 
   /** Output an acl level */
-  protected void writeACLs(ModifiableSolrParams out, String aclType, String[] acl, String[] denyAcl)
-  {
-    String metadataACLName = LITERAL + allowAttributeName + aclType;
-    for (int i = 0; i < acl.length; i++)
-    {
-      writeField(out,metadataACLName,acl[i]);
+  protected void writeACLs(final ModifiableSolrParams out, final String aclType, final String[] acl, final String[] denyAcl) {
+    final String metadataACLName = LITERAL + allowAttributeName + aclType;
+    for (int i = 0; i < acl.length; i++) {
+      writeField(out, metadataACLName, acl[i]);
     }
-    String metadataDenyACLName = LITERAL + denyAttributeName + aclType;
-    for (int i = 0; i < denyAcl.length; i++)
-    {
-      writeField(out,metadataDenyACLName,denyAcl[i]);
+    final String metadataDenyACLName = LITERAL + denyAttributeName + aclType;
+    for (int i = 0; i < denyAcl.length; i++) {
+      writeField(out, metadataDenyACLName, denyAcl[i]);
     }
   }
 
   /**
-    * Output an acl level in a SolrInputDocument
-    */
-  protected void writeACLsInSolrDoc( SolrInputDocument inputDoc, String aclType, String[] acl, String[] denyAcl )
-  {
-    String metadataACLName = allowAttributeName + aclType;
-    inputDoc.addField( metadataACLName, acl );
+   * Output an acl level in a SolrInputDocument
+   */
+  protected void writeACLsInSolrDoc(final SolrInputDocument inputDoc, final String aclType, final String[] acl, final String[] denyAcl) {
+    final String metadataACLName = allowAttributeName + aclType;
+    inputDoc.addField(metadataACLName, acl);
 
-    String metadataDenyACLName = denyAttributeName + aclType;
-    inputDoc.addField( metadataDenyACLName, denyAcl );
+    final String metadataDenyACLName = denyAttributeName + aclType;
+    inputDoc.addField(metadataDenyACLName, denyAcl);
   }
 
-  /** Killable thread that does ingestions.
-  * Java 1.5 stopped permitting thread interruptions to abort socket waits.  As a result, it is impossible to get threads to shutdown cleanly that are doing
-  * such waits.  So, the places where this happens are segregated in their own threads so that they can be just abandoned.
-  *
-  * This thread does a single document ingestion.
-  */
-  protected class IngestThread extends java.lang.Thread
-  {
+  /**
+   * Killable thread that does ingestions. Java 1.5 stopped permitting thread interruptions to abort socket waits. As a result, it is impossible to get threads to shutdown cleanly that are doing such
+   * waits. So, the places where this happens are segregated in their own threads so that they can be just abandoned.
+   *
+   * This thread does a single document ingestion.
+   */
+  protected class IngestThread extends java.lang.Thread {
     protected final String documentURI;
     protected final RepositoryDocument document;
-    protected final Map<String,List<String>> arguments;
-    protected final Map<String,String[]> aclsMap;
-    protected final Map<String,String[]> denyAclsMap;
+    protected final Map<String, List<String>> arguments;
+    protected final Map<String, String[]> aclsMap;
+    protected final Map<String, String[]> denyAclsMap;
 
     protected Long activityStart = null;
     protected Long activityBytes = null;
@@ -914,10 +746,8 @@
     protected boolean readFromDocumentStreamYet = false;
     protected boolean rval = false;
 
-    public IngestThread(String documentURI, RepositoryDocument document,
-      Map<String, List<String>> arguments,
-      Map<String,String[]> aclsMap, Map<String,String[]> denyAclsMap)
-    {
+    public IngestThread(final String documentURI, final RepositoryDocument document, final Map<String, List<String>> arguments, final Map<String, String[]> aclsMap,
+        final Map<String, String[]> denyAclsMap) {
       super();
       setDaemon(true);
       this.documentURI = documentURI;
@@ -927,50 +757,39 @@
       this.denyAclsMap = denyAclsMap;
     }
 
-    public void run()
-    {
-      long length = document.getBinaryLength();
-      InputStream is = document.getBinaryStream();
-      String contentType = document.getMimeType();
-      String contentName = document.getFileName();
+    @Override
+    public void run() {
+      final long length = document.getBinaryLength();
+      final InputStream is = document.getBinaryStream();
+      final String contentType = document.getMimeType();
+      final String contentName = document.getFileName();
 
-      try
-      {
+      try {
         // Do the operation!
-        long fullStartTime = System.currentTimeMillis();
+        final long fullStartTime = System.currentTimeMillis();
 
         // Open a socket to ingest, and to the response stream to get the post result
-        try
-        {
+        try {
           SolrInputDocument currentSolrDoc = new SolrInputDocument();
-          ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(postUpdateAction);
-          if ( useExtractUpdateHandler )
-          {
-            buildExtractUpdateHandlerRequest( length, is, contentType, (contentName==null || contentName.length()==0)?"docname":contentName,
-              contentStreamUpdateRequest );
-          }
-          else
-          {
-            currentSolrDoc = buildSolrDocument( length, is );
+          final ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(postUpdateAction);
+          if (useExtractUpdateHandler) {
+            buildExtractUpdateHandlerRequest(length, is, contentType, (contentName == null || contentName.length() == 0) ? "docname" : contentName, contentStreamUpdateRequest);
+          } else {
+            currentSolrDoc = buildSolrDocument(length, is);
           }
 
           // Fire off the request.
           // Note: I need to know whether the document has been permanently rejected or not, but we currently have
-          // no means to determine that.  Analysis of SolrServerExceptions that have been thrown is likely needed.
-          try
-          {
+          // no means to determine that. Analysis of SolrServerExceptions that have been thrown is likely needed.
+          try {
             readFromDocumentStreamYet = true;
             UpdateResponse response;
-            if ( useExtractUpdateHandler )
-            {
+            if (useExtractUpdateHandler) {
               response = contentStreamUpdateRequest.process(solrServer);
-            }
-            else
-            {
+            } else {
               final ModifiableSolrParams params = new ModifiableSolrParams();
               // Write the arguments
-              for (final String name : arguments.keySet())
-              {
+              for (final String name : arguments.keySet()) {
                 final List<String> values = arguments.get(name);
                 writeField(params, name, values);
               }
@@ -980,7 +799,7 @@
               if (commitWithin != null) {
                 req.setCommitWithin(Integer.parseInt(commitWithin));
               }
-              response =  req.process(solrServer);
+              response = req.process(solrServer);
             }
 
             // Successful completion
@@ -991,43 +810,33 @@
 
             rval = true;
             return;
-          }
-          catch (SolrServerException e)
-          {
+          } catch (final SolrServerException e) {
             // Log what happened to us
             activityStart = new Long(fullStartTime);
             activityBytes = new Long(length);
-            activityDetails = e.getMessage() +
-              ((e.getCause() != null)?": "+e.getCause().getMessage():"");
+            activityDetails = e.getMessage() + ((e.getCause() != null) ? ": " + e.getCause().getMessage() : "");
 
             // Broken pipe exceptions we log specially because they usually mean
             // Solr has rejected the document, and the user will want to know that.
-            if (e.getCause() != null && e.getCause().getClass().getName().equals("java.net.SocketException") &&
-              (activityDetails.toLowerCase(Locale.ROOT).indexOf("broken pipe") != -1 ||
-                activityDetails.toLowerCase(Locale.ROOT).indexOf("connection reset") != -1 ||
-                activityDetails.toLowerCase(Locale.ROOT).indexOf("target server failed to respond") != -1))
+            if (e.getCause() != null && e.getCause().getClass().getName().equals("java.net.SocketException") && (activityDetails.toLowerCase(Locale.ROOT).indexOf("broken pipe") != -1
+                || activityDetails.toLowerCase(Locale.ROOT).indexOf("connection reset") != -1 || activityDetails.toLowerCase(Locale.ROOT).indexOf("target server failed to respond") != -1))
               activityCode = "SOLRREJECT";
             else
               activityCode = e.getClass().getSimpleName().toUpperCase(Locale.ROOT);
 
             // Rethrow; will interpret at a higher level
             throw e;
-          }
-          catch (SolrException e)
-          {
+          } catch (final SolrException e) {
             // Log what happened to us
             activityStart = new Long(fullStartTime);
             activityBytes = new Long(length);
             activityCode = Integer.toString(e.code());
-            activityDetails = e.getMessage() +
-              ((e.getCause() != null)?": "+e.getCause().getMessage():"");
+            activityDetails = e.getMessage() + ((e.getCause() != null) ? ": " + e.getCause().getMessage() : "");
 
             // Rethrow; we'll interpret at the next level
             throw e;
           }
-        }
-        catch (IOException ioe)
-        {
+        } catch (final IOException ioe) {
           if ((ioe instanceof InterruptedIOException) && (!(ioe instanceof java.net.SocketTimeoutException)))
             return;
 
@@ -1036,307 +845,262 @@
           activityDetails = ioe.getMessage();
 
           // Log the error
-          Logging.ingest.warn("Error indexing into Solr: "+ioe.getMessage(),ioe);
+          Logging.ingest.warn("Error indexing into Solr: " + ioe.getMessage(), ioe);
 
           throw ioe;
         }
-      }
-      catch (Throwable e)
-      {
+      } catch (final Throwable e) {
         this.exception = e;
       }
     }
 
-    private SolrInputDocument buildSolrDocument( long length, InputStream is )
-      throws IOException
-    {
-      SolrInputDocument outputDoc = new SolrInputDocument();
+    private SolrInputDocument buildSolrDocument(final long length, final InputStream is) throws IOException {
+      final SolrInputDocument outputDoc = new SolrInputDocument();
 
       // Write the id field
-      outputDoc.addField( idAttributeName, documentURI );
+      outputDoc.addField(idAttributeName, documentURI);
 
-      if (contentAttributeName != null)
-      {
-        // Copy the content into a string.  This is a bad thing to do, but we have no choice given SolrJ architecture at this time.
+      if (contentAttributeName != null) {
+        // Copy the content into a string. This is a bad thing to do, but we have no choice given SolrJ architecture at this time.
         // We enforce a size limit upstream.
-        Reader r = new InputStreamReader(is, Consts.UTF_8);
-        StringBuilder sb = new StringBuilder((int)length);
-        char[] buffer = new char[65536];
-        while (true)
-        {
-          int amt = r.read(buffer,0,buffer.length);
+        final Reader r = new InputStreamReader(is, Consts.UTF_8);
+        final StringBuilder sb = new StringBuilder((int) length);
+        final char[] buffer = new char[65536];
+        while (true) {
+          final int amt = r.read(buffer, 0, buffer.length);
           if (amt == -1)
             break;
-          sb.append(buffer,0,amt);
+          sb.append(buffer, 0, amt);
         }
-        outputDoc.addField( contentAttributeName, sb.toString() );
+        outputDoc.addField(contentAttributeName, sb.toString());
       }
 
       // Write the rest of the attributes
-      if ( originalSizeAttributeName != null )
-      {
-        Long size = document.getOriginalSize();
-        if ( size != null )
-        {
-          outputDoc.addField( originalSizeAttributeName, size.toString() );
+      if (originalSizeAttributeName != null) {
+        final Long size = document.getOriginalSize();
+        if (size != null) {
+          outputDoc.addField(originalSizeAttributeName, size.toString());
         }
       }
-      if ( modifiedDateAttributeName != null )
-      {
-        Date date = document.getModifiedDate();
-        if ( date != null )
-        {
-          outputDoc.addField( modifiedDateAttributeName, DateParser.formatISO8601Date( date ) );
+      if (modifiedDateAttributeName != null) {
+        final Date date = document.getModifiedDate();
+        if (date != null) {
+          outputDoc.addField(modifiedDateAttributeName, DateParser.formatISO8601Date(date));
         }
       }
-      if ( createdDateAttributeName != null )
-      {
-        Date date = document.getCreatedDate();
-        if ( date != null )
-        {
-          outputDoc.addField( createdDateAttributeName, DateParser.formatISO8601Date( date ) );
+      if (createdDateAttributeName != null) {
+        final Date date = document.getCreatedDate();
+        if (date != null) {
+          outputDoc.addField(createdDateAttributeName, DateParser.formatISO8601Date(date));
         }
 
       }
-      if ( indexedDateAttributeName != null )
-      {
-        Date date = document.getIndexingDate();
-        if ( date != null )
-        {
-          outputDoc.addField( indexedDateAttributeName, DateParser.formatISO8601Date( date ) );
+      if (indexedDateAttributeName != null) {
+        final Date date = document.getIndexingDate();
+        if (date != null) {
+          outputDoc.addField(indexedDateAttributeName, DateParser.formatISO8601Date(date));
         }
       }
-      if ( fileNameAttributeName != null )
-      {
-        String fileName = document.getFileName();
-        if ( !StringUtils.isBlank(fileName) )
-        {
-          outputDoc.addField( fileNameAttributeName, fileName );
+      if (fileNameAttributeName != null) {
+        final String fileName = document.getFileName();
+        if (!StringUtils.isBlank(fileName)) {
+          outputDoc.addField(fileNameAttributeName, fileName);
         }
       }
-      if ( mimeTypeAttributeName != null )
-      {
-        String mimeType = document.getMimeType();
-        if ( !StringUtils.isBlank(mimeType) )
-        {
-          outputDoc.addField( mimeTypeAttributeName, mimeType );
+      if (mimeTypeAttributeName != null) {
+        final String mimeType = document.getMimeType();
+        if (!StringUtils.isBlank(mimeType)) {
+          outputDoc.addField(mimeTypeAttributeName, mimeType);
         }
       }
 
-      Iterator<String> typeIterator = aclsMap.keySet().iterator();
-      while (typeIterator.hasNext())
-      {
-        String aclType = typeIterator.next();
-        writeACLsInSolrDoc(outputDoc,aclType,aclsMap.get(aclType),denyAclsMap.get(aclType));
+      final Iterator<String> typeIterator = aclsMap.keySet().iterator();
+      while (typeIterator.hasNext()) {
+        final String aclType = typeIterator.next();
+        writeACLsInSolrDoc(outputDoc, aclType, aclsMap.get(aclType), denyAclsMap.get(aclType));
       }
 
       // Write the metadata, each in a field by itself
-      buildSolrParamsFromMetadata( outputDoc );
+      buildSolrParamsFromMetadata(outputDoc);
 
       return outputDoc;
     }
 
-    private void buildExtractUpdateHandlerRequest( long length, InputStream is, String contentType,
-      String contentName,
-      ContentStreamUpdateRequest contentStreamUpdateRequest )
-      throws IOException
-    {
-      ModifiableSolrParams out = new ModifiableSolrParams();
-      Logging.ingest.debug("Solr: Writing document '"+documentURI);
+    private void buildExtractUpdateHandlerRequest(final long length, final InputStream is, final String contentType, final String contentName,
+        final ContentStreamUpdateRequest contentStreamUpdateRequest) throws IOException {
+      final ModifiableSolrParams out = new ModifiableSolrParams();
+      Logging.ingest.debug("Solr: Writing document '" + documentURI);
 
       // Write the id field
-      writeField(out,LITERAL+idAttributeName,documentURI);
+      writeField(out, LITERAL + idAttributeName, documentURI);
       // Write the rest of the attributes
-      if (originalSizeAttributeName != null)
-      {
-        Long size = document.getOriginalSize();
+      if (originalSizeAttributeName != null) {
+        final Long size = document.getOriginalSize();
         if (size != null)
           // Write value
-          writeField(out,LITERAL+originalSizeAttributeName,size.toString());
+          writeField(out, LITERAL + originalSizeAttributeName, size.toString());
       }
-      if (modifiedDateAttributeName != null)
-      {
-        Date date = document.getModifiedDate();
+      if (modifiedDateAttributeName != null) {
+        final Date date = document.getModifiedDate();
         if (date != null)
           // Write value
-          writeField(out,LITERAL+modifiedDateAttributeName,DateParser.formatISO8601Date(date));
+          writeField(out, LITERAL + modifiedDateAttributeName, DateParser.formatISO8601Date(date));
       }
-      if (createdDateAttributeName != null)
-      {
-        Date date = document.getCreatedDate();
+      if (createdDateAttributeName != null) {
+        final Date date = document.getCreatedDate();
         if (date != null)
           // Write value
-          writeField(out,LITERAL+createdDateAttributeName,DateParser.formatISO8601Date(date));
+          writeField(out, LITERAL + createdDateAttributeName, DateParser.formatISO8601Date(date));
       }
-      if (indexedDateAttributeName != null)
-      {
-        Date date = document.getIndexingDate();
+      if (indexedDateAttributeName != null) {
+        final Date date = document.getIndexingDate();
         if (date != null)
           // Write value
-          writeField(out,LITERAL+indexedDateAttributeName,DateParser.formatISO8601Date(date));
+          writeField(out, LITERAL + indexedDateAttributeName, DateParser.formatISO8601Date(date));
       }
-      if (fileNameAttributeName != null)
-      {
-        String fileName = document.getFileName();
+      if (fileNameAttributeName != null) {
+        final String fileName = document.getFileName();
         if (!StringUtils.isBlank(fileName))
-          writeField(out,LITERAL+fileNameAttributeName,fileName);
+          writeField(out, LITERAL + fileNameAttributeName, fileName);
       }
-      if (mimeTypeAttributeName != null)
-      {
-        String mimeType = document.getMimeType();
+      if (mimeTypeAttributeName != null) {
+        final String mimeType = document.getMimeType();
         if (!StringUtils.isBlank(mimeType))
-          writeField(out,LITERAL+mimeTypeAttributeName,mimeType);
+          writeField(out, LITERAL + mimeTypeAttributeName, mimeType);
       }
 
       // Write the access token information
       // Both maps have the same keys.
-      Iterator<String> typeIterator = aclsMap.keySet().iterator();
-      while (typeIterator.hasNext())
-      {
-        String aclType = typeIterator.next();
-        writeACLs(out,aclType,aclsMap.get(aclType),denyAclsMap.get(aclType));
+      final Iterator<String> typeIterator = aclsMap.keySet().iterator();
+      while (typeIterator.hasNext()) {
+        final String aclType = typeIterator.next();
+        writeACLs(out, aclType, aclsMap.get(aclType), denyAclsMap.get(aclType));
       }
 
       // Write the arguments
-      for (String name : arguments.keySet())
-      {
-        List<String> values = arguments.get(name);
-        writeField(out,name,values);
+      for (final String name : arguments.keySet()) {
+        final List<String> values = arguments.get(name);
+        writeField(out, name, values);
       }
 
       // Write the metadata, each in a field by itself
       buildSolrParamsFromMetadata(out);
 
       // These are unnecessary now in the case of non-solrcloud setups, because we overrode the SolrJ posting method to use multipart.
-      //writeField(out,LITERAL+"stream_size",String.valueOf(length));
-      //writeField(out,LITERAL+"stream_name",document.getFileName());
+      // writeField(out,LITERAL+"stream_size",String.valueOf(length));
+      // writeField(out,LITERAL+"stream_name",document.getFileName());
 
       // General hint for Tika
       if (!StringUtils.isBlank(document.getFileName()))
-        writeField(out,"resource.name",document.getFileName());
+        writeField(out, "resource.name", document.getFileName());
 
       // Write the commitWithin parameter
       if (commitWithin != null)
-        writeField(out,COMMITWITHIN_METADATA,commitWithin);
+        writeField(out, COMMITWITHIN_METADATA, commitWithin);
 
       contentStreamUpdateRequest.setParams(out);
 
-      contentStreamUpdateRequest.addContentStream(new RepositoryDocumentStream(is,length,contentType,contentName));
+      contentStreamUpdateRequest.addContentStream(new RepositoryDocumentStream(is, length, contentType, contentName));
 
-      Logging.ingest.debug("Solr: Done writing '"+documentURI+"'");
+      Logging.ingest.debug("Solr: Done writing '" + documentURI + "'");
     }
 
     /**
-      * builds the solr parameter maps for the update request.
-      * For each mapping expressed is applied the renaming for the metadata field name.
-      * If we set to keep all the metadata, the metadata non present in the mapping will be kept with their original names.
-      * In the other case ignored
-      * @param out
-      * @throws IOException
-      */
-    private void buildSolrParamsFromMetadata(ModifiableSolrParams out) throws IOException
-    {
-      Iterator<String> iter = document.getFields();
-      while (iter.hasNext())
-      {
-        String originalFieldName = iter.next();
-        String fieldName = makeSafeLuceneField(originalFieldName);
-        Logging.ingest.debug("Solr: Saw field '"+originalFieldName+"'; converted to '"+fieldName+"'");
+     * builds the solr parameter maps for the update request. For each mapping expressed is applied the renaming for the metadata field name. If we set to keep all the metadata, the metadata non
+     * present in the mapping will be kept with their original names. In the other case ignored
+     *
+     * @param out
+     * @throws IOException
+     */
+    private void buildSolrParamsFromMetadata(final ModifiableSolrParams out) throws IOException {
+      final Iterator<String> iter = document.getFields();
+      while (iter.hasNext()) {
+        final String originalFieldName = iter.next();
+        final String fieldName = makeSafeLuceneField(originalFieldName);
+        Logging.ingest.debug("Solr: Saw field '" + originalFieldName + "'; converted to '" + fieldName + "'");
         applySingleMapping(originalFieldName, out, fieldName);
       }
     }
 
-    private void buildSolrParamsFromMetadata(SolrInputDocument outputDocument) throws IOException
-    {
-      Iterator<String> iter = document.getFields();
-      while (iter.hasNext())
-      {
-        String originalFieldName = iter.next();
-        String fieldName = makeSafeLuceneField(originalFieldName);
+    private void buildSolrParamsFromMetadata(final SolrInputDocument outputDocument) throws IOException {
+      final Iterator<String> iter = document.getFields();
+      while (iter.hasNext()) {
+        final String originalFieldName = iter.next();
+        final String fieldName = makeSafeLuceneField(originalFieldName);
         applySingleMapping(originalFieldName, outputDocument, fieldName);
       }
     }
 
-    private void applySingleMapping(String originalFieldName, ModifiableSolrParams out, String newFieldName) throws IOException {
-      if(newFieldName != null && !newFieldName.isEmpty()) {
+    private void applySingleMapping(final String originalFieldName, final ModifiableSolrParams out, String newFieldName) throws IOException {
+      if (newFieldName != null && !newFieldName.isEmpty()) {
         if (newFieldName.toLowerCase(Locale.ROOT).equals(idAttributeName.toLowerCase(Locale.ROOT))) {
           newFieldName = ID_METADATA;
         }
-        String[] values = document.getFieldAsStrings(originalFieldName);
-        writeField(out,LITERAL+newFieldName,values);
+        final String[] values = document.getFieldAsStrings(originalFieldName);
+        writeField(out, LITERAL + newFieldName, values);
       }
     }
 
-    private void applySingleMapping(String originalFieldName, SolrInputDocument outputDocument, String newFieldName) throws IOException {
-      if(newFieldName != null && !newFieldName.isEmpty()) {
+    private void applySingleMapping(final String originalFieldName, final SolrInputDocument outputDocument, String newFieldName) throws IOException {
+      if (newFieldName != null && !newFieldName.isEmpty()) {
         if (newFieldName.toLowerCase(Locale.ROOT).equals(idAttributeName.toLowerCase(Locale.ROOT))) {
           newFieldName = ID_METADATA;
         }
-        String[] values = document.getFieldAsStrings(originalFieldName);
-        outputDocument.addField( newFieldName, values );
+        final String[] values = document.getFieldAsStrings(originalFieldName);
+        outputDocument.addField(newFieldName, values);
       }
     }
 
-    public void finishUp()
-      throws InterruptedException, SolrServerException, IOException
-    {
+    public void finishUp() throws InterruptedException, SolrServerException, IOException {
       join();
 
-      Throwable thr = exception;
-      if (thr != null)
-      {
+      final Throwable thr = exception;
+      if (thr != null) {
         if (thr instanceof SolrServerException)
-          throw (SolrServerException)thr;
+          throw (SolrServerException) thr;
         if (thr instanceof IOException)
-          throw (IOException)thr;
+          throw (IOException) thr;
         if (thr instanceof RuntimeException)
-          throw (RuntimeException)thr;
+          throw (RuntimeException) thr;
         if (thr instanceof Error)
-          throw (Error)thr;
+          throw (Error) thr;
         else
-          throw new RuntimeException("Unexpected exception type: "+thr.getClass().getName()+": "+thr.getMessage(),thr);
+          throw new RuntimeException("Unexpected exception type: " + thr.getClass().getName() + ": " + thr.getMessage(), thr);
       }
     }
 
-    public Long getActivityStart()
-    {
+    public Long getActivityStart() {
       return activityStart;
     }
 
-    public Long getActivityBytes()
-    {
+    public Long getActivityBytes() {
       return activityBytes;
     }
 
-    public String getActivityCode()
-    {
+    public String getActivityCode() {
       return activityCode;
     }
 
-    public String getActivityDetails()
-    {
+    public String getActivityDetails() {
       return activityDetails;
     }
 
-    public boolean getReadFromDocumentStreamYet()
-    {
+    public boolean getReadFromDocumentStreamYet() {
       return readFromDocumentStreamYet;
     }
 
-    public boolean getRval()
-    {
+    public boolean getRval() {
       return rval;
     }
   }
 
-  /** Killable thread that does deletions.
-  * Java 1.5 stopped permitting thread interruptions to abort socket waits.  As a result, it is impossible to get threads to shutdown cleanly that are doing
-  * such waits.  So, the places where this happens are segregated in their own threads so that they can be just abandoned.
-  *
-  * This thread does a single document deletion.
-  */
-  protected class DeleteThread extends java.lang.Thread
-  {
+  /**
+   * Killable thread that does deletions. Java 1.5 stopped permitting thread interruptions to abort socket waits. As a result, it is impossible to get threads to shutdown cleanly that are doing such
+   * waits. So, the places where this happens are segregated in their own threads so that they can be just abandoned.
+   *
+   * This thread does a single document deletion.
+   */
+  protected class DeleteThread extends java.lang.Thread {
     protected String documentURI;
 
     protected Long activityStart = null;
@@ -1344,56 +1108,43 @@
     protected String activityDetails = null;
     protected Throwable exception = null;
 
-    public DeleteThread(String documentURI)
-    {
+    public DeleteThread(final String documentURI) {
       super();
       setDaemon(true);
       this.documentURI = documentURI;
     }
 
-    public void run()
-    {
-      try
-      {
+    @Override
+    public void run() {
+      try {
         // Do the operation!
-        long fullStartTime = System.currentTimeMillis();
+        final long fullStartTime = System.currentTimeMillis();
         // Open a socket to ingest, and to the response stream to get the post result
-        try
-        {
-          UpdateResponse response = new UpdateRequest(postRemoveAction).deleteById(documentURI).process(solrServer);
+        try {
+          final UpdateResponse response = new UpdateRequest(postRemoveAction).deleteById(documentURI).process(solrServer);
 
           // Success
           activityStart = new Long(fullStartTime);
           activityCode = "OK";
           activityDetails = null;
           return;
-        }
-        catch (InterruptedIOException ioe)
-        {
+        } catch (final InterruptedIOException ioe) {
           return;
-        }
-        catch (SolrServerException e)
-        {
+        } catch (final SolrServerException e) {
           activityStart = new Long(fullStartTime);
           activityCode = e.getClass().getSimpleName().toUpperCase(Locale.ROOT);
-          activityDetails = e.getMessage() +
-            ((e.getCause() != null)?": "+e.getCause().getMessage():"");
+          activityDetails = e.getMessage() + ((e.getCause() != null) ? ": " + e.getCause().getMessage() : "");
 
           throw e;
-        }
-        catch (SolrException e)
-        {
+        } catch (final SolrException e) {
           activityStart = new Long(fullStartTime);
           activityCode = e.getClass().getSimpleName().toUpperCase(Locale.ROOT);
-          activityDetails = e.getMessage() +
-            ((e.getCause() != null)?": "+e.getCause().getMessage():"");
+          activityDetails = e.getMessage() + ((e.getCause() != null) ? ": " + e.getCause().getMessage() : "");
 
           throw e;
-        }
-        catch (IOException ioe)
-        {
+        } catch (final IOException ioe) {
           // Log the error
-          Logging.ingest.warn("Error deleting document: "+ioe.getMessage(),ioe);
+          Logging.ingest.warn("Error deleting document: " + ioe.getMessage(), ioe);
 
           activityStart = new Long(fullStartTime);
           activityCode = ioe.getClass().getSimpleName().toUpperCase(Locale.ROOT);
@@ -1401,194 +1152,159 @@
 
           throw ioe;
         }
-      }
-      catch (Throwable e)
-      {
+      } catch (final Throwable e) {
         this.exception = e;
       }
     }
 
-    public void finishUp()
-      throws InterruptedException, SolrServerException, IOException
-    {
+    public void finishUp() throws InterruptedException, SolrServerException, IOException {
       join();
 
-      Throwable thr = exception;
-      if (thr != null)
-      {
+      final Throwable thr = exception;
+      if (thr != null) {
         if (thr instanceof SolrServerException)
-          throw (SolrServerException)thr;
+          throw (SolrServerException) thr;
         if (thr instanceof IOException)
-          throw (IOException)thr;
+          throw (IOException) thr;
         if (thr instanceof RuntimeException)
-          throw (RuntimeException)thr;
+          throw (RuntimeException) thr;
         if (thr instanceof Error)
-          throw (Error)thr;
+          throw (Error) thr;
         else
-          throw new RuntimeException("Unexpected exception type: "+thr.getClass().getName()+": "+thr.getMessage(),thr);
+          throw new RuntimeException("Unexpected exception type: " + thr.getClass().getName() + ": " + thr.getMessage(), thr);
       }
     }
-    public Long getActivityStart()
-    {
+
+    public Long getActivityStart() {
       return activityStart;
     }
 
-    public String getActivityCode()
-    {
+    public String getActivityCode() {
       return activityCode;
     }
 
-    public String getActivityDetails()
-    {
+    public String getActivityDetails() {
       return activityDetails;
     }
   }
 
-  /** Killable thread that does a commit.
-  * Java 1.5 stopped permitting thread interruptions to abort socket waits.  As a result, it is impossible to get threads to shutdown cleanly that are doing
-  * such waits.  So, the places where this happens are segregated in their own threads so that they can be just abandoned.
-  *
-  * This thread does a commit.
-  */
-  protected class CommitThread extends java.lang.Thread
-  {
+  /**
+   * Killable thread that does a commit. Java 1.5 stopped permitting thread interruptions to abort socket waits. As a result, it is impossible to get threads to shutdown cleanly that are doing such
+   * waits. So, the places where this happens are segregated in their own threads so that they can be just abandoned.
+   *
+   * This thread does a commit.
+   */
+  protected class CommitThread extends java.lang.Thread {
     protected Throwable exception = null;
 
-    public CommitThread()
-    {
+    public CommitThread() {
       super();
       setDaemon(true);
     }
 
-    public void run()
-    {
-      try
-      {
-        try
-        {
+    @Override
+    public void run() {
+      try {
+        try {
           // Do the operation!
-          UpdateRequest updateRequest = new UpdateRequest(postUpdateAction + "?commit=true");
-          UpdateResponse response = updateRequest.process(solrServer);
-          //UpdateResponse response = solrServer.commit();
-        }
-        catch (InterruptedIOException ioe)
-        {
+          final UpdateRequest updateRequest = new UpdateRequest(postUpdateAction + "?commit=true");
+          final UpdateResponse response = updateRequest.process(solrServer);
+          // UpdateResponse response = solrServer.commit();
+        } catch (final InterruptedIOException ioe) {
           return;
-        }
-        catch (IOException ioe)
-        {
+        } catch (final IOException ioe) {
           // Log the error
-          Logging.ingest.warn("Error committing: "+ioe.getMessage(),ioe);
+          Logging.ingest.warn("Error committing: " + ioe.getMessage(), ioe);
           throw ioe;
         }
-      }
-      catch (Throwable e)
-      {
+      } catch (final Throwable e) {
         this.exception = e;
       }
     }
 
-    public void finishUp()
-      throws InterruptedException, SolrServerException, IOException
-    {
+    public void finishUp() throws InterruptedException, SolrServerException, IOException {
       join();
 
-      Throwable thr = exception;
-      if (thr != null)
-      {
+      final Throwable thr = exception;
+      if (thr != null) {
         if (thr instanceof SolrServerException)
-          throw (SolrServerException)thr;
+          throw (SolrServerException) thr;
         if (thr instanceof IOException)
-          throw (IOException)thr;
+          throw (IOException) thr;
         if (thr instanceof RuntimeException)
-          throw (RuntimeException)thr;
+          throw (RuntimeException) thr;
         if (thr instanceof Error)
-          throw (Error)thr;
+          throw (Error) thr;
         else
-          throw new RuntimeException("Unexpected exception type: "+thr.getClass().getName()+": "+thr.getMessage(),thr);
+          throw new RuntimeException("Unexpected exception type: " + thr.getClass().getName() + ": " + thr.getMessage(), thr);
       }
     }
 
   }
 
-
-  /** Killable thread that does a status check.
-  * Java 1.5 stopped permitting thread interruptions to abort socket waits.  As a result, it is impossible to get threads to shutdown cleanly that are doing
-  * such waits.  So, the places where this happens are segregated in their own threads so that they can be just abandoned.
-  *
-  * This thread does a status check.
-  */
-  protected class StatusThread extends java.lang.Thread
-  {
+  /**
+   * Killable thread that does a status check. Java 1.5 stopped permitting thread interruptions to abort socket waits. As a result, it is impossible to get threads to shutdown cleanly that are doing
+   * such waits. So, the places where this happens are segregated in their own threads so that they can be just abandoned.
+   *
+   * This thread does a status check.
+   */
+  protected class StatusThread extends java.lang.Thread {
     protected Throwable exception = null;
 
-    public StatusThread()
-    {
+    public StatusThread() {
       super();
       setDaemon(true);
     }
 
-    public void run()
-    {
-      try
-      {
+    @Override
+    public void run() {
+      try {
         // Do the operation!
-        try
-        {
-          SolrResponse response = new SolrPing(postStatusAction).process(solrServer);
-        }
-        catch (InterruptedIOException ioe)
-        {
+        try {
+          final SolrResponse response = new SolrPing(postStatusAction).process(solrServer);
+        } catch (final InterruptedIOException ioe) {
           // Exit the thread.
           return;
-        }
-        catch (IOException ioe)
-        {
+        } catch (final IOException ioe) {
           // Log the error
-          Logging.ingest.warn("Error checking status: "+ioe.getMessage(),ioe);
+          Logging.ingest.warn("Error checking status: " + ioe.getMessage(), ioe);
           throw ioe;
         }
-      }
-      catch (Throwable e)
-      {
+      } catch (final Throwable e) {
         this.exception = e;
       }
     }
 
-    public void finishUp()
-      throws InterruptedException, SolrServerException, IOException
-    {
+    public void finishUp() throws InterruptedException, SolrServerException, IOException {
       join();
 
-      Throwable thr = exception;
-      if (thr != null)
-      {
+      final Throwable thr = exception;
+      if (thr != null) {
         if (thr instanceof SolrServerException)
-          throw (SolrServerException)thr;
+          throw (SolrServerException) thr;
         if (thr instanceof IOException)
-          throw (IOException)thr;
+          throw (IOException) thr;
         if (thr instanceof RuntimeException)
-          throw (RuntimeException)thr;
+          throw (RuntimeException) thr;
         if (thr instanceof Error)
-          throw (Error)thr;
+          throw (Error) thr;
         else
-          throw new RuntimeException("Unexpected exception type: "+thr.getClass().getName()+": "+thr.getMessage(),thr);
+          throw new RuntimeException("Unexpected exception type: " + thr.getClass().getName() + ": " + thr.getMessage(), thr);
       }
     }
 
   }
 
-  /** Class for importing documents into Solr via SolrJ
-  */
-  protected static class RepositoryDocumentStream extends ContentStreamBase
-  {
+  /**
+   * Class for importing documents into Solr via SolrJ
+   */
+  protected static class RepositoryDocumentStream extends ContentStreamBase {
     protected final InputStream is;
     protected final long length;
     protected final String contentType;
     protected final String contentName;
 
-    public RepositoryDocumentStream(InputStream is, long length, String contentType, String contentName)
-    {
+    public RepositoryDocumentStream(final InputStream is, final long length, final String contentType, final String contentName) {
       this.is = is;
       this.length = length;
       this.contentType = contentType;
@@ -1596,42 +1312,37 @@
     }
 
     @Override
-    public Long getSize()
-    {
+    public Long getSize() {
       return new Long(length);
     }
 
     @Override
-    public InputStream getStream() throws IOException
-    {
+    public InputStream getStream() throws IOException {
       return is;
     }
 
     @Override
-    public Reader getReader() throws IOException
-    {
+    public Reader getReader() throws IOException {
       return null;
     }
 
     @Override
-    public String getContentType()
-    {
+    public String getContentType() {
       return contentType;
     }
 
     @Override
-    public String getName()
-    {
+    public String getName() {
       return contentName;
     }
   }
 
-  /** Special version of ping class where we can control the URL
-  */
-  protected static class SolrPing extends SolrRequest
-  {
+  /**
+   * Special version of ping class where we can control the URL
+   */
+  protected static class SolrPing extends SolrRequest {
     /** Request parameters. */
-    private ModifiableSolrParams params;
+    private final ModifiableSolrParams params;
 
     /**
      * Create a new SolrPing object.
@@ -1641,9 +1352,8 @@
       params = new ModifiableSolrParams();
     }
 
-    public SolrPing(String url)
-    {
-      super( METHOD.GET, url );
+    public SolrPing(final String url) {
+      super(METHOD.GET, url);
       params = new ModifiableSolrParams();
     }
 
@@ -1653,7 +1363,7 @@
     }
 
     @Override
-    protected SolrPingResponse createResponse(SolrClient client) {
+    protected SolrPingResponse createResponse(final SolrClient client) {
       return new SolrPingResponse();
     }
 
@@ -1663,9 +1373,7 @@
     }
 
     /**
-     * Remove the action parameter from this request. This will result in the same
-     * behavior as {@code SolrPing#setActionPing()}. For Solr server version 4.0
-     * and later.
+     * Remove the action parameter from this request. This will result in the same behavior as {@code SolrPing#setActionPing()}. For Solr server version 4.0 and later.
      *
      * @return this
      */
@@ -1675,8 +1383,7 @@
     }
 
     /**
-     * Set the action parameter on this request to enable. This will delete the
-     * health-check file for the Solr core. For Solr server version 4.0 and later.
+     * Set the action parameter on this request to enable. This will delete the health-check file for the Solr core. For Solr server version 4.0 and later.
      *
      * @return this
      */
@@ -1686,8 +1393,7 @@
     }
 
     /**
-     * Set the action parameter on this request to enable. This will create the
-     * health-check file for the Solr core. For Solr server version 4.0 and later.
+     * Set the action parameter on this request to enable. This will create the health-check file for the Solr core. For Solr server version 4.0 and later.
      *
      * @return this
      */
@@ -1697,8 +1403,7 @@
     }
 
     /**
-     * Set the action parameter on this request to ping. This is the same as not
-     * including the action at all. For Solr server version 4.0 and later.
+     * Set the action parameter on this request to ping. This is the same as not including the action at all. For Solr server version 4.0 and later.
      *
      * @return this
      */
@@ -1714,26 +1419,21 @@
 
   }
 
-  /** See CONNECTORS-956.  Make a safe lucene field name from a possibly
-  * unsafe input field name from a repository connector.
-  */
-  protected static String makeSafeLuceneField(String inputField)
-  {
-    StringBuilder sb = new StringBuilder();
+  /**
+   * See CONNECTORS-956. Make a safe lucene field name from a possibly unsafe input field name from a repository connector.
+   */
+  protected static String makeSafeLuceneField(final String inputField) {
+    final StringBuilder sb = new StringBuilder();
     boolean isFirst = true;
-    for (int i = 0; i < inputField.length(); i++)
-    {
-      char x = inputField.charAt(i);
-      if (isFirst && !Character.isJavaIdentifierStart(x) || !isFirst && !Character.isJavaIdentifierPart(x))
-      {
+    for (int i = 0; i < inputField.length(); i++) {
+      final char x = inputField.charAt(i);
+      if (isFirst && !Character.isJavaIdentifierStart(x) || !isFirst && !Character.isJavaIdentifierPart(x)) {
         // Check for exceptions for Lucene
         if (!isFirst && (x == '.' || x == '-'))
           sb.append(x);
         else
           sb.append('_');
-      }
-      else
-      {
+      } else {
         // Check for exceptions for Lucene
         if (isFirst && x == '$')
           sb.append('_');
@@ -1746,4 +1446,3 @@
   }
 
 }
-
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudHttp2SolrClient.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudHttp2SolrClient.java
new file mode 100644
index 0000000..26230c1
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudHttp2SolrClient.java
@@ -0,0 +1,239 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
+import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.SolrException;
+
+public class ModifiedCloudHttp2SolrClient extends ModifiedCloudSolrClient {
+
+  private static final long serialVersionUID = -7543846119917075693L;
+  private final ClusterStateProvider stateProvider;
+  private final ModifiedLBHttp2SolrClient lbClient;
+  private final ModifiedHttp2SolrClient myClient;
+  private final boolean clientIsInternal;
+
+  /**
+   * Create a new client object that connects to Zookeeper and is always aware of the SolrCloud state. If there is a fully redundant Zookeeper quorum and SolrCloud has enough replicas for every shard
+   * in a collection, there is no single point of failure. Updates will be sent to shard leaders by default.
+   *
+   * @param builder a {@link ModifiedHttp2SolrClient.Builder} with the options used to create the client.
+   */
+  protected ModifiedCloudHttp2SolrClient(final Builder builder) {
+    super(builder.shardLeadersOnly, builder.parallelUpdates, builder.directUpdatesToLeadersOnly);
+    if (builder.httpClient == null) {
+      this.clientIsInternal = true;
+      if (builder.internalClientBuilder == null) {
+        this.myClient = new ModifiedHttp2SolrClient.Builder().build();
+      } else {
+        this.myClient = builder.internalClientBuilder.build();
+      }
+    } else {
+      this.clientIsInternal = false;
+      this.myClient = builder.httpClient;
+    }
+    if (builder.stateProvider == null) {
+      if (builder.zkHosts != null && builder.solrUrls != null) {
+        throw new IllegalArgumentException("Both zkHost(s) & solrUrl(s) have been specified. Only specify one.");
+      }
+      if (builder.zkHosts != null) {
+        this.stateProvider = ClusterStateProvider.newZkClusterStateProvider(builder.zkHosts, builder.zkChroot);
+      } else if (builder.solrUrls != null && !builder.solrUrls.isEmpty()) {
+        try {
+          this.stateProvider = new ModifiedHttp2ClusterStateProvider(builder.solrUrls, builder.httpClient);
+        } catch (final Exception e) {
+          throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + "Solr server(s), " + builder.solrUrls + ", down?)", e);
+        }
+      } else {
+        throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null.");
+      }
+    } else {
+      this.stateProvider = builder.stateProvider;
+    }
+    this.lbClient = new ModifiedLBHttp2SolrClient(myClient);
+  }
+
+  @Override
+  public void close() throws IOException {
+    stateProvider.close();
+    lbClient.close();
+
+    if (clientIsInternal && myClient != null) {
+      myClient.close();
+    }
+
+    super.close();
+  }
+
+  @Override
+  public ModifiedLBHttp2SolrClient getLbClient() {
+    return lbClient;
+  }
+
+  @Override
+  public ClusterStateProvider getClusterStateProvider() {
+    return stateProvider;
+  }
+
+  public ModifiedHttp2SolrClient getHttpClient() {
+    return myClient;
+  }
+
+  @Override
+  protected boolean wasCommError(final Throwable rootCause) {
+    return false;
+  }
+
+  /** Constructs {@link CloudHttp2SolrClient} instances from provided configuration. */
+  public static class Builder {
+    protected Collection<String> zkHosts = new ArrayList<>();
+    protected List<String> solrUrls = new ArrayList<>();
+    protected String zkChroot;
+    protected ModifiedHttp2SolrClient httpClient;
+    protected boolean shardLeadersOnly = true;
+    protected boolean directUpdatesToLeadersOnly = false;
+    protected boolean parallelUpdates = true;
+    protected ClusterStateProvider stateProvider;
+    protected ModifiedHttp2SolrClient.Builder internalClientBuilder;
+
+    /**
+     * Provide a series of Solr URLs to be used when configuring {@link CloudHttp2SolrClient} instances. The solr client will use these urls to understand the cluster topology, which solr nodes are
+     * active etc.
+     *
+     * <p>
+     * Provided Solr URLs are expected to point to the root Solr path ("http://hostname:8983/solr"); they should not include any collections, cores, or other path components.
+     *
+     * <p>
+     * Usage example:
+     *
+     * <pre>
+     * final List&lt;String&gt; solrBaseUrls = new ArrayList&lt;String&gt;();
+     * solrBaseUrls.add("http://solr1:8983/solr");
+     * solrBaseUrls.add("http://solr2:8983/solr");
+     * solrBaseUrls.add("http://solr3:8983/solr");
+     * final SolrClient client = new CloudHttp2SolrClient.Builder(solrBaseUrls).build();
+     * </pre>
+     */
+    public Builder(final List<String> solrUrls) {
+      this.solrUrls = solrUrls;
+    }
+
+    /**
+     * Provide a series of ZK hosts which will be used when configuring {@link CloudHttp2SolrClient} instances.
+     *
+     * <p>
+     * Usage example when Solr stores data at the ZooKeeper root ('/'):
+     *
+     * <pre>
+     * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
+     * zkServers.add("zookeeper1:2181");
+     * zkServers.add("zookeeper2:2181");
+     * zkServers.add("zookeeper3:2181");
+     * final SolrClient client = new CloudHttp2SolrClient.Builder(zkServers, Optional.empty()).build();
+     * </pre>
+     *
+     * Usage example when Solr data is stored in a ZooKeeper chroot:
+     *
+     * <pre>
+     * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
+     * zkServers.add("zookeeper1:2181");
+     * zkServers.add("zookeeper2:2181");
+     * zkServers.add("zookeeper3:2181");
+     * final SolrClient client = new CloudHttp2SolrClient.Builder(zkServers, Optional.of("/solr")).build();
+     * </pre>
+     *
+     * @param zkHosts  a List of at least one ZooKeeper host and port (e.g. "zookeeper1:2181")
+     * @param zkChroot the path to the root ZooKeeper node containing Solr data. Provide {@code
+     *     java.util.Optional.empty()} if no ZK chroot is used.
+     */
+    public Builder(final List<String> zkHosts, final Optional<String> zkChroot) {
+      this.zkHosts = zkHosts;
+      if (zkChroot.isPresent())
+        this.zkChroot = zkChroot.get();
+    }
+
+    /**
+     * Tells {@link CloudHttp2SolrClient.Builder} that created clients should send direct updates to shard leaders only.
+     *
+     * <p>
+     * UpdateRequests whose leaders cannot be found will "fail fast" on the client side with a {@link SolrException}
+     */
+    public Builder sendDirectUpdatesToShardLeadersOnly() {
+      directUpdatesToLeadersOnly = true;
+      return this;
+    }
+
+    /**
+     * Tells {@link CloudHttp2SolrClient.Builder} that created clients can send updates to any shard replica (shard leaders and non-leaders).
+     *
+     * <p>
+     * Shard leaders are still preferred, but the created clients will fallback to using other replicas if a leader cannot be found.
+     */
+    public Builder sendDirectUpdatesToAnyShardReplica() {
+      directUpdatesToLeadersOnly = false;
+      return this;
+    }
+
+    /**
+     * Tells {@link CloudHttp2SolrClient.Builder} whether created clients should send shard updates serially or in parallel
+     *
+     * <p>
+     * When an {@link UpdateRequest} affects multiple shards, {@link CloudHttp2SolrClient} splits it up and sends a request to each affected shard. This setting chooses whether those sub-requests are
+     * sent serially or in parallel.
+     *
+     * <p>
+     * If not set, this defaults to 'true' and sends sub-requests in parallel.
+     */
+    public Builder withParallelUpdates(final boolean parallelUpdates) {
+      this.parallelUpdates = parallelUpdates;
+      return this;
+    }
+
+    public Builder withHttpClient(final ModifiedHttp2SolrClient httpClient) {
+      if (this.internalClientBuilder != null) {
+        throw new IllegalStateException("The builder can't accept an httpClient AND an internalClientBuilder, only one of those can be provided");
+      }
+      this.httpClient = httpClient;
+      return this;
+    }
+
+    /**
+     * If provided, the CloudHttp2SolrClient will build it's internal ModifiedHttp2SolrClient using this builder (instead of the empty default one). Providing this builder allows users to configure
+     * the internal clients (authentication, timeouts, etc).
+     *
+     * @param internalClientBuilder the builder to use for creating the internal http client.
+     * @return this
+     */
+    public Builder withInternalClientBuilder(final ModifiedHttp2SolrClient.Builder internalClientBuilder) {
+      if (this.httpClient != null) {
+        throw new IllegalStateException("The builder can't accept an httpClient AND an internalClientBuilder, only one of those can be provided");
+      }
+      this.internalClientBuilder = internalClientBuilder;
+      return this;
+    }
+
+    /** Create a {@link CloudHttp2SolrClient} based on the provided configuration. */
+    public ModifiedCloudHttp2SolrClient build() {
+      if (stateProvider == null) {
+        if (!zkHosts.isEmpty()) {
+          stateProvider = ClusterStateProvider.newZkClusterStateProvider(zkHosts, Builder.this.zkChroot);
+        } else if (!this.solrUrls.isEmpty()) {
+          try {
+            stateProvider = new ModifiedHttp2ClusterStateProvider(solrUrls, httpClient);
+          } catch (final Exception e) {
+            throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + "Solr server(s), " + solrUrls + ", down?)", e);
+          }
+        } else {
+          throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null.");
+        }
+      }
+      return new ModifiedCloudHttp2SolrClient(this);
+    }
+  }
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudSolrClient.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudSolrClient.java
new file mode 100644
index 0000000..f76f6a2
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedCloudSolrClient.java
@@ -0,0 +1,1229 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+import static org.apache.solr.common.params.CommonParams.ID;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.apache.solr.client.solrj.ResponseParser;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.V2RequestSupport;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.IsUpdateRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.routing.ReplicaListTransformer;
+import org.apache.solr.client.solrj.routing.RequestReplicaListTransformerGenerator;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.ToleratedUpdateError;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.Hash;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+public abstract class ModifiedCloudSolrClient extends SolrClient {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private volatile String defaultCollection;
+  // no of times collection state to be reloaded if stale state error is received
+  private static final int MAX_STALE_RETRIES = Integer.parseInt(System.getProperty("cloudSolrClientMaxStaleRetries", "5"));
+  private final Random rand = new Random();
+
+  private final boolean updatesToLeaders;
+  private final boolean directUpdatesToLeadersOnly;
+  private final RequestReplicaListTransformerGenerator requestRLTGenerator;
+  boolean parallelUpdates; // TODO final
+  private ExecutorService threadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("CloudSolrClient ThreadPool"));
+
+  public static final String STATE_VERSION = "_stateVer_";
+  private long retryExpiryTime = TimeUnit.NANOSECONDS.convert(3, TimeUnit.SECONDS); // 3 seconds or 3 million nanos
+  private final Set<String> NON_ROUTABLE_PARAMS;
+
+  {
+    NON_ROUTABLE_PARAMS = new HashSet<>();
+    NON_ROUTABLE_PARAMS.add(UpdateParams.EXPUNGE_DELETES);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.MAX_OPTIMIZE_SEGMENTS);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.WAIT_SEARCHER);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.OPEN_SEARCHER);
+
+    NON_ROUTABLE_PARAMS.add(UpdateParams.SOFT_COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.PREPARE_COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.OPTIMIZE);
+
+    // Not supported via SolrCloud
+    // NON_ROUTABLE_PARAMS.add(UpdateParams.ROLLBACK);
+
+  }
+
+  private volatile List<Object> locks = objectList(3);
+
+  /** Constructs {@link CloudSolrClient} instances from provided configuration. */
+  public static class Builder extends ModifiedCloudHttp2SolrClient.Builder {
+
+    /**
+     * Provide a series of Solr URLs to be used when configuring {@link CloudSolrClient} instances. The solr client will use these urls to understand the cluster topology, which solr nodes are active
+     * etc.
+     *
+     * <p>
+     * Provided Solr URLs are expected to point to the root Solr path ("http://hostname:8983/solr"); they should not include any collections, cores, or other path components.
+     *
+     * <p>
+     * Usage example:
+     *
+     * <pre>
+     * final List&lt;String&gt; solrBaseUrls = new ArrayList&lt;String&gt;();
+     * solrBaseUrls.add("http://solr1:8983/solr");
+     * solrBaseUrls.add("http://solr2:8983/solr");
+     * solrBaseUrls.add("http://solr3:8983/solr");
+     * final SolrClient client = new CloudSolrClient.Builder(solrBaseUrls).build();
+     * </pre>
+     */
+    public Builder(final List<String> solrUrls) {
+      super(solrUrls);
+    }
+
+    /**
+     * Provide a series of ZK hosts which will be used when configuring {@link CloudSolrClient} instances. This requires a dependency on {@code solr-solrj-zookeeper} which transitively depends on more
+     * JARs. The ZooKeeper based connection is the most reliable and performant means for CloudSolrClient to work. On the other hand, it means exposing ZooKeeper more broadly than to Solr nodes, which
+     * is a security risk.
+     *
+     * <p>
+     * Usage example when Solr stores data at the ZooKeeper root ('/'):
+     *
+     * <pre>
+     * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
+     * zkServers.add("zookeeper1:2181");
+     * zkServers.add("zookeeper2:2181");
+     * zkServers.add("zookeeper3:2181");
+     * final SolrClient client = new CloudSolrClient.Builder(zkServers, Optional.empty()).build();
+     * </pre>
+     *
+     * Usage example when Solr data is stored in a ZooKeeper chroot:
+     *
+     * <pre>
+     * final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
+     * zkServers.add("zookeeper1:2181");
+     * zkServers.add("zookeeper2:2181");
+     * zkServers.add("zookeeper3:2181");
+     * final SolrClient client = new CloudSolrClient.Builder(zkServers, Optional.of("/solr")).build();
+     * </pre>
+     *
+     * @param zkHosts  a List of at least one ZooKeeper host and port (e.g. "zookeeper1:2181")
+     * @param zkChroot the path to the root ZooKeeper node containing Solr data. Provide {@code
+     *     java.util.Optional.empty()} if no ZK chroot is used.
+     */
+    public Builder(final List<String> zkHosts, final Optional<String> zkChroot) {
+      super(zkHosts, zkChroot);
+    }
+  }
+
+  static class StateCache extends ConcurrentHashMap<String, ExpiringCachedDocCollection> {
+    final AtomicLong puts = new AtomicLong();
+    final AtomicLong hits = new AtomicLong();
+    final Lock evictLock = new ReentrantLock(true);
+    protected volatile long timeToLive = 60 * 1000L;
+
+    @Override
+    public ExpiringCachedDocCollection get(final Object key) {
+      final ExpiringCachedDocCollection val = super.get(key);
+      if (val == null) {
+        // a new collection is likely to be added now.
+        // check if there are stale items and remove them
+        evictStale();
+        return null;
+      }
+      if (val.isExpired(timeToLive)) {
+        super.remove(key);
+        return null;
+      }
+      hits.incrementAndGet();
+      return val;
+    }
+
+    @Override
+    public ExpiringCachedDocCollection put(final String key, final ExpiringCachedDocCollection value) {
+      puts.incrementAndGet();
+      return super.put(key, value);
+    }
+
+    void evictStale() {
+      if (!evictLock.tryLock())
+        return;
+      try {
+        for (final Entry<String, ExpiringCachedDocCollection> e : entrySet()) {
+          if (e.getValue().isExpired(timeToLive)) {
+            super.remove(e.getKey());
+          }
+        }
+      } finally {
+        evictLock.unlock();
+      }
+    }
+  }
+
+  /**
+   * This is the time to wait to refetch the state after getting the same state version from ZK
+   *
+   * <p>
+   * secs
+   */
+  public void setRetryExpiryTime(final int secs) {
+    this.retryExpiryTime = TimeUnit.NANOSECONDS.convert(secs, TimeUnit.SECONDS);
+  }
+
+  protected final StateCache collectionStateCache = new StateCache();
+
+  class ExpiringCachedDocCollection {
+    final DocCollection cached;
+    final long cachedAt;
+    // This is the time at which the collection is retried and got the same old version
+    volatile long retriedAt = -1;
+    // flag that suggests that this is potentially to be rechecked
+    volatile boolean maybeStale = false;
+
+    ExpiringCachedDocCollection(final DocCollection cached) {
+      this.cached = cached;
+      this.cachedAt = System.nanoTime();
+    }
+
+    boolean isExpired(final long timeToLiveMs) {
+      return (System.nanoTime() - cachedAt) > TimeUnit.NANOSECONDS.convert(timeToLiveMs, TimeUnit.MILLISECONDS);
+    }
+
+    boolean shouldRetry() {
+      if (maybeStale) { // we are not sure if it is stale so check with retry time
+        if ((retriedAt == -1 || (System.nanoTime() - retriedAt) > retryExpiryTime)) {
+          return true; // we retried a while back. and we could not get anything new.
+          // it's likely that it is not going to be available now also.
+        }
+      }
+      return false;
+    }
+
+    void setRetriedAt() {
+      retriedAt = System.nanoTime();
+    }
+  }
+
+  protected ModifiedCloudSolrClient(final boolean updatesToLeaders, final boolean parallelUpdates, final boolean directUpdatesToLeadersOnly) {
+    this.updatesToLeaders = updatesToLeaders;
+    this.parallelUpdates = parallelUpdates;
+    this.directUpdatesToLeadersOnly = directUpdatesToLeadersOnly;
+    this.requestRLTGenerator = new RequestReplicaListTransformerGenerator();
+  }
+
+  /**
+   * Sets the cache ttl for DocCollection Objects cached.
+   *
+   * @param seconds ttl value in seconds
+   */
+  public void setCollectionCacheTTl(final int seconds) {
+    assert seconds > 0;
+    this.collectionStateCache.timeToLive = seconds * 1000L;
+  }
+
+  protected abstract ModifiedLBSolrClient getLbClient();
+
+  public abstract ClusterStateProvider getClusterStateProvider();
+
+  public ClusterState getClusterState() {
+    return getClusterStateProvider().getClusterState();
+  }
+
+  protected abstract boolean wasCommError(Throwable t);
+
+  @Override
+  public void close() throws IOException {
+    if (this.threadPool != null && !this.threadPool.isShutdown()) {
+      ExecutorUtil.shutdownAndAwaitTermination(this.threadPool);
+      this.threadPool = null;
+    }
+  }
+
+  public ResponseParser getParser() {
+    return getLbClient().getParser();
+  }
+
+  /**
+   * Note: This setter method is <b>not thread-safe</b>.
+   *
+   * @param processor Default Response Parser chosen to parse the response if the parser were not specified as part of the request.
+   * @see org.apache.solr.client.solrj.SolrRequest#getResponseParser()
+   */
+  public void setParser(final ResponseParser processor) {
+    getLbClient().setParser(processor);
+  }
+
+  public RequestWriter getRequestWriter() {
+    return getLbClient().getRequestWriter();
+  }
+
+  public void setRequestWriter(final RequestWriter requestWriter) {
+    getLbClient().setRequestWriter(requestWriter);
+  }
+
+  /** Sets the default collection for request */
+  public void setDefaultCollection(final String collection) {
+    this.defaultCollection = collection;
+  }
+
+  /** Gets the default collection for request */
+  public String getDefaultCollection() {
+    return defaultCollection;
+  }
+
+  /** Gets whether direct updates are sent in parallel */
+  public boolean isParallelUpdates() {
+    return parallelUpdates;
+  }
+
+  /**
+   * Connect to the zookeeper ensemble. This is an optional method that may be used to force a connect before any other requests are sent.
+   */
+  public void connect() {
+    getClusterStateProvider().connect();
+  }
+
+  /**
+   * Connect to a cluster. If the cluster is not ready, retry connection up to a given timeout.
+   *
+   * @param duration the timeout
+   * @param timeUnit the units of the timeout
+   * @throws TimeoutException     if the cluster is not ready after the timeout
+   * @throws InterruptedException if the wait is interrupted
+   */
+  public void connect(final long duration, final TimeUnit timeUnit) throws TimeoutException, InterruptedException {
+    if (log.isInfoEnabled()) {
+      log.info("Waiting for {} {} for cluster at {} to be ready", duration, timeUnit, getClusterStateProvider());
+    }
+    final long timeout = System.nanoTime() + timeUnit.toNanos(duration);
+    while (System.nanoTime() < timeout) {
+      try {
+        connect();
+        if (log.isInfoEnabled()) {
+          log.info("Cluster at {} ready", getClusterStateProvider());
+        }
+        return;
+      } catch (final RuntimeException e) {
+        // not ready yet, then...
+      }
+      TimeUnit.MILLISECONDS.sleep(250);
+    }
+    throw new TimeoutException("Timed out waiting for cluster");
+  }
+
+  @SuppressWarnings({ "unchecked" })
+  private NamedList<Object> directUpdate(final AbstractUpdateRequest request, String collection) throws SolrServerException {
+    final ModifiedUpdateRequest updateRequest = (ModifiedUpdateRequest) request;
+    SolrParams params = request.getParams();
+    final ModifiableSolrParams routableParams = new ModifiableSolrParams();
+    final ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();
+
+    if (params != null) {
+      nonRoutableParams.add(params);
+      routableParams.add(params);
+      for (final String param : NON_ROUTABLE_PARAMS) {
+        routableParams.remove(param);
+      }
+    } else {
+      params = new ModifiableSolrParams();
+    }
+
+    if (collection == null) {
+      throw new SolrServerException("No collection param specified on request and no default collection has been set.");
+    }
+
+    // Check to see if the collection is an alias. Updates to multi-collection aliases are ok as
+    // long as they are routed aliases
+    final List<String> aliasedCollections = getClusterStateProvider().resolveAlias(collection);
+    if (getClusterStateProvider().isRoutedAlias(collection) || aliasedCollections.size() == 1) {
+      collection = aliasedCollections.get(0); // pick 1st (consistent with HttpSolrCall behavior)
+    } else {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Update request to non-routed multi-collection alias not supported: " + collection + " -> " + aliasedCollections);
+    }
+
+    final DocCollection col = getDocCollection(collection, null);
+
+    final DocRouter router = col.getRouter();
+
+    if (router instanceof ImplicitDocRouter) {
+      // short circuit as optimization
+      return null;
+    }
+
+    final ReplicaListTransformer replicaListTransformer = requestRLTGenerator.getReplicaListTransformer(params);
+
+    // Create the URL map, which is keyed on slice name.
+    // The value is a list of URLs for each replica in the slice.
+    // The first value in the list is the leader for the slice.
+    final Map<String, List<String>> urlMap = buildUrlMap(col, replicaListTransformer);
+    final String routeField = (col.getRouter().getRouteField(col) == null) ? ID : col.getRouter().getRouteField(col);
+    final Map<String, ? extends ModifiedLBSolrClient.Req> routes = createRoutes(updateRequest, routableParams, col, router, urlMap, routeField);
+    if (routes == null) {
+      if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, routeField)) {
+        // we have info (documents with ids and/or ids to delete) with
+        // which to find the leaders but we could not find (all of) them
+        throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "directUpdatesToLeadersOnly==true but could not find leader(s)");
+      } else {
+        // we could not find a leader or routes yet - use unoptimized general path
+        return null;
+      }
+    }
+
+    final NamedList<Throwable> exceptions = new NamedList<>();
+    final NamedList<NamedList<?>> shardResponses = new NamedList<>(routes.size() + 1); // +1 for deleteQuery
+
+    final long start = System.nanoTime();
+
+    if (parallelUpdates) {
+      final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
+      for (final Map.Entry<String, ? extends ModifiedLBSolrClient.Req> entry : routes.entrySet()) {
+        final String url = entry.getKey();
+        final ModifiedLBSolrClient.Req lbRequest = entry.getValue();
+        try {
+          MDC.put("CloudSolrClient.url", url);
+          responseFutures.put(url, threadPool.submit(() -> {
+            return getLbClient().request(lbRequest).getResponse();
+          }));
+        } finally {
+          MDC.remove("CloudSolrClient.url");
+        }
+      }
+
+      for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
+        final String url = entry.getKey();
+        final Future<NamedList<?>> responseFuture = entry.getValue();
+        try {
+          shardResponses.add(url, responseFuture.get());
+        } catch (final InterruptedException e) {
+          Thread.currentThread().interrupt();
+          throw new RuntimeException(e);
+        } catch (final ExecutionException e) {
+          exceptions.add(url, e.getCause());
+        }
+      }
+
+      if (exceptions.size() > 0) {
+        final Throwable firstException = exceptions.getVal(0);
+        if (firstException instanceof SolrException) {
+          final SolrException e = (SolrException) firstException;
+          throw getRouteException(SolrException.ErrorCode.getErrorCode(e.code()), exceptions, routes);
+        } else {
+          throw getRouteException(SolrException.ErrorCode.SERVER_ERROR, exceptions, routes);
+        }
+      }
+    } else {
+      for (final Map.Entry<String, ? extends ModifiedLBSolrClient.Req> entry : routes.entrySet()) {
+        final String url = entry.getKey();
+        final ModifiedLBSolrClient.Req lbRequest = entry.getValue();
+        try {
+          final NamedList<Object> rsp = getLbClient().request(lbRequest).getResponse();
+          shardResponses.add(url, rsp);
+        } catch (final Exception e) {
+          if (e instanceof SolrException) {
+            throw (SolrException) e;
+          } else {
+            throw new SolrServerException(e);
+          }
+        }
+      }
+    }
+
+    ModifiedUpdateRequest nonRoutableRequest = null;
+    final List<String> deleteQuery = updateRequest.getDeleteQuery();
+    if (deleteQuery != null && deleteQuery.size() > 0) {
+      final ModifiedUpdateRequest deleteQueryRequest = new ModifiedUpdateRequest();
+      deleteQueryRequest.setDeleteQuery(deleteQuery);
+      nonRoutableRequest = deleteQueryRequest;
+    }
+
+    final Set<String> paramNames = nonRoutableParams.getParameterNames();
+
+    final Set<String> intersection = new HashSet<>(paramNames);
+    intersection.retainAll(NON_ROUTABLE_PARAMS);
+
+    if (nonRoutableRequest != null || intersection.size() > 0) {
+      if (nonRoutableRequest == null) {
+        nonRoutableRequest = new ModifiedUpdateRequest();
+      }
+      nonRoutableRequest.setParams(nonRoutableParams);
+      nonRoutableRequest.setBasicAuthCredentials(request.getBasicAuthUser(), request.getBasicAuthPassword());
+      final List<String> urlList = new ArrayList<>(routes.keySet());
+      Collections.shuffle(urlList, rand);
+      final ModifiedLBSolrClient.Req req = new ModifiedLBSolrClient.Req(nonRoutableRequest, urlList);
+      try {
+        final ModifiedLBSolrClient.Rsp rsp = getLbClient().request(req);
+        shardResponses.add(urlList.get(0), rsp.getResponse());
+      } catch (final Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, urlList.get(0), e);
+      }
+    }
+
+    final long end = System.nanoTime();
+
+    @SuppressWarnings({ "rawtypes" })
+    final RouteResponse rr = condenseResponse(shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS));
+    rr.setRouteResponses(shardResponses);
+    rr.setRoutes(routes);
+    return rr;
+  }
+
+  protected RouteException getRouteException(final SolrException.ErrorCode serverError, final NamedList<Throwable> exceptions, final Map<String, ? extends ModifiedLBSolrClient.Req> routes) {
+    return new RouteException(serverError, exceptions, routes);
+  }
+
+  protected Map<String, ? extends ModifiedLBSolrClient.Req> createRoutes(final ModifiedUpdateRequest updateRequest, final ModifiableSolrParams routableParams, final DocCollection col,
+      final DocRouter router, final Map<String, List<String>> urlMap, final String routeField) {
+    return urlMap == null ? null : updateRequest.getRoutesToCollection(router, col, urlMap, routableParams, routeField);
+  }
+
+  private Map<String, List<String>> buildUrlMap(final DocCollection col, final ReplicaListTransformer replicaListTransformer) {
+    final Map<String, List<String>> urlMap = new HashMap<>();
+    final Slice[] slices = col.getActiveSlicesArr();
+    for (final Slice slice : slices) {
+      final String name = slice.getName();
+      final List<Replica> sortedReplicas = new ArrayList<>();
+      Replica leader = slice.getLeader();
+      if (directUpdatesToLeadersOnly && leader == null) {
+        for (final Replica replica : slice.getReplicas(replica -> replica.isActive(getClusterStateProvider().getLiveNodes()) && replica.getType() == Replica.Type.NRT)) {
+          leader = replica;
+          break;
+        }
+      }
+      if (leader == null) {
+        if (directUpdatesToLeadersOnly) {
+          continue;
+        }
+        // take unoptimized general path - we cannot find a leader yet
+        return null;
+      }
+
+      if (!directUpdatesToLeadersOnly) {
+        for (final Replica replica : slice.getReplicas()) {
+          if (!replica.equals(leader)) {
+            sortedReplicas.add(replica);
+          }
+        }
+      }
+
+      // Sort the non-leader replicas according to the request parameters
+      replicaListTransformer.transform(sortedReplicas);
+
+      // put the leaderUrl first.
+      sortedReplicas.add(0, leader);
+
+      urlMap.put(name, sortedReplicas.stream().map(Replica::getCoreUrl).collect(Collectors.toList()));
+    }
+    return urlMap;
+  }
+
+  protected <T extends RouteResponse<?>> T condenseResponse(final NamedList<?> response, final int timeMillis, final Supplier<T> supplier) {
+    final T condensed = supplier.get();
+    int status = 0;
+    Integer rf = null;
+
+    // TolerantUpdateProcessor
+    List<SimpleOrderedMap<String>> toleratedErrors = null;
+    int maxToleratedErrors = Integer.MAX_VALUE;
+
+    // For "adds", "deletes", "deleteByQuery" etc.
+    final Map<String, NamedList<Object>> versions = new HashMap<>();
+
+    for (int i = 0; i < response.size(); i++) {
+      final NamedList<?> shardResponse = (NamedList<?>) response.getVal(i);
+      final NamedList<?> header = (NamedList<?>) shardResponse.get("responseHeader");
+      final Integer shardStatus = (Integer) header.get("status");
+      final int s = shardStatus.intValue();
+      if (s > 0) {
+        status = s;
+      }
+      final Object rfObj = header.get(ModifiedUpdateRequest.REPFACT);
+      if (rfObj != null && rfObj instanceof Integer) {
+        final Integer routeRf = (Integer) rfObj;
+        if (rf == null || routeRf < rf)
+          rf = routeRf;
+      }
+
+      @SuppressWarnings("unchecked")
+      final List<SimpleOrderedMap<String>> shardTolerantErrors = (List<SimpleOrderedMap<String>>) header.get("errors");
+      if (null != shardTolerantErrors) {
+        final Integer shardMaxToleratedErrors = (Integer) header.get("maxErrors");
+        assert null != shardMaxToleratedErrors : "TolerantUpdateProcessor reported errors but not maxErrors";
+        // if we get into some weird state where the nodes disagree about the effective maxErrors,
+        // assume the min value seen to decide if we should fail.
+        maxToleratedErrors = Math.min(maxToleratedErrors, ToleratedUpdateError.getEffectiveMaxErrors(shardMaxToleratedErrors.intValue()));
+
+        if (null == toleratedErrors) {
+          toleratedErrors = new ArrayList<SimpleOrderedMap<String>>(shardTolerantErrors.size());
+        }
+        for (final SimpleOrderedMap<String> err : shardTolerantErrors) {
+          toleratedErrors.add(err);
+        }
+      }
+      for (final String updateType : Arrays.asList("adds", "deletes", "deleteByQuery")) {
+        final Object obj = shardResponse.get(updateType);
+        if (obj instanceof NamedList) {
+          final NamedList<Object> versionsList = versions.containsKey(updateType) ? versions.get(updateType) : new NamedList<>();
+          final NamedList<?> nl = (NamedList<?>) obj;
+          versionsList.addAll(nl);
+          versions.put(updateType, versionsList);
+        }
+      }
+    }
+
+    final NamedList<Object> cheader = new NamedList<>();
+    cheader.add("status", status);
+    cheader.add("QTime", timeMillis);
+    if (rf != null)
+      cheader.add(ModifiedUpdateRequest.REPFACT, rf);
+    if (null != toleratedErrors) {
+      cheader.add("maxErrors", ToleratedUpdateError.getUserFriendlyMaxErrors(maxToleratedErrors));
+      cheader.add("errors", toleratedErrors);
+      if (maxToleratedErrors < toleratedErrors.size()) {
+        // cumulative errors are too high, we need to throw a client exception w/correct metadata
+
+        // NOTE: it shouldn't be possible for 1 == toleratedErrors.size(), because if that were the
+        // case then at least one shard should have thrown a real error before this, so we don't
+        // worry about having a more "singular" exception msg for that situation
+        final StringBuilder msgBuf = new StringBuilder().append(toleratedErrors.size()).append(" Async failures during distributed update: ");
+
+        final NamedList<String> metadata = new NamedList<>();
+        for (final SimpleOrderedMap<String> err : toleratedErrors) {
+          final ToleratedUpdateError te = ToleratedUpdateError.parseMap(err);
+          metadata.add(te.getMetadataKey(), te.getMetadataValue());
+
+          msgBuf.append("\n").append(te.getMessage());
+        }
+
+        final SolrException toThrow = new SolrException(SolrException.ErrorCode.BAD_REQUEST, msgBuf.toString());
+        toThrow.setMetadata(metadata);
+        throw toThrow;
+      }
+    }
+    for (final Map.Entry<String, NamedList<Object>> entry : versions.entrySet()) {
+      condensed.add(entry.getKey(), entry.getValue());
+    }
+    condensed.add("responseHeader", cheader);
+    return condensed;
+  }
+
+  @SuppressWarnings({ "rawtypes" })
+  public RouteResponse condenseResponse(final NamedList<?> response, final int timeMillis) {
+    return condenseResponse(response, timeMillis, RouteResponse::new);
+  }
+
+  @SuppressWarnings({ "rawtypes" })
+  public static class RouteResponse<T extends ModifiedLBSolrClient.Req> extends NamedList<Object> {
+    private NamedList<NamedList<?>> routeResponses;
+    private Map<String, T> routes;
+
+    public void setRouteResponses(final NamedList<NamedList<?>> routeResponses) {
+      this.routeResponses = routeResponses;
+    }
+
+    public NamedList<NamedList<?>> getRouteResponses() {
+      return routeResponses;
+    }
+
+    public void setRoutes(final Map<String, T> routes) {
+      this.routes = routes;
+    }
+
+    public Map<String, T> getRoutes() {
+      return routes;
+    }
+  }
+
+  public static class RouteException extends SolrException {
+
+    private final NamedList<Throwable> throwables;
+    private final Map<String, ? extends ModifiedLBSolrClient.Req> routes;
+
+    public RouteException(final ErrorCode errorCode, final NamedList<Throwable> throwables, final Map<String, ? extends ModifiedLBSolrClient.Req> routes) {
+      super(errorCode, throwables.getVal(0).getMessage(), throwables.getVal(0));
+      this.throwables = throwables;
+      this.routes = routes;
+
+      // create a merged copy of the metadata from all wrapped exceptions
+      final NamedList<String> metadata = new NamedList<String>();
+      for (int i = 0; i < throwables.size(); i++) {
+        final Throwable t = throwables.getVal(i);
+        if (t instanceof SolrException) {
+          final SolrException e = (SolrException) t;
+          final NamedList<String> eMeta = e.getMetadata();
+          if (null != eMeta) {
+            metadata.addAll(eMeta);
+          }
+        }
+      }
+      if (0 < metadata.size()) {
+        this.setMetadata(metadata);
+      }
+    }
+
+    public NamedList<Throwable> getThrowables() {
+      return throwables;
+    }
+
+    public Map<String, ? extends ModifiedLBSolrClient.Req> getRoutes() {
+      return this.routes;
+    }
+  }
+
+  @Override
+  public NamedList<Object> request(final SolrRequest<?> request, String collection) throws SolrServerException, IOException {
+    // the collection parameter of the request overrides that of the parameter to this method
+    final String requestCollection = request.getCollection();
+    if (requestCollection != null) {
+      collection = requestCollection;
+    } else if (collection == null) {
+      collection = defaultCollection;
+    }
+    final List<String> inputCollections = collection == null ? Collections.emptyList() : StrUtils.splitSmart(collection, ",", true);
+    return requestWithRetryOnStaleState(request, 0, inputCollections);
+  }
+
+  /**
+   * As this class doesn't watch external collections on the client side, there's a chance that the request will fail due to cached stale state, which means the state must be refreshed from ZK and
+   * retried.
+   */
+  protected NamedList<Object> requestWithRetryOnStaleState(SolrRequest<?> request, final int retryCount, final List<String> inputCollections) throws SolrServerException, IOException {
+    connect(); // important to call this before you start working with the ZkStateReader
+
+    // build up a _stateVer_ param to pass to the server containing all of the
+    // external collection state versions involved in this request, which allows
+    // the server to notify us that our cached state for one or more of the external
+    // collections is stale and needs to be refreshed ... this code has no impact on internal
+    // collections
+    String stateVerParam = null;
+    List<DocCollection> requestedCollections = null;
+    boolean isCollectionRequestOfV2 = false;
+    if (request instanceof V2RequestSupport) {
+      request = ((V2RequestSupport) request).getV2Request();
+    }
+    if (request instanceof V2Request) {
+      isCollectionRequestOfV2 = ((V2Request) request).isPerCollectionRequest();
+    }
+    final boolean isAdmin = ADMIN_PATHS.contains(request.getPath());
+    final boolean isUpdate = (request instanceof IsUpdateRequest) && (request instanceof ModifiedUpdateRequest);
+    if (!inputCollections.isEmpty() && !isAdmin && !isCollectionRequestOfV2) { // don't do _stateVer_ checking for admin, v2 api requests
+      final Set<String> requestedCollectionNames = resolveAliases(inputCollections, isUpdate);
+
+      StringBuilder stateVerParamBuilder = null;
+      for (final String requestedCollection : requestedCollectionNames) {
+        // track the version of state we're using on the client side using the _stateVer_ param
+        final DocCollection coll = getDocCollection(requestedCollection, null);
+        if (coll == null) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + requestedCollection);
+        }
+        final int collVer = coll.getZNodeVersion();
+        if (requestedCollections == null)
+          requestedCollections = new ArrayList<>(requestedCollectionNames.size());
+        requestedCollections.add(coll);
+
+        if (stateVerParamBuilder == null) {
+          stateVerParamBuilder = new StringBuilder();
+        } else {
+          stateVerParamBuilder.append("|"); // hopefully pipe is not an allowed char in a collection name
+        }
+
+        stateVerParamBuilder.append(coll.getName()).append(":").append(collVer);
+      }
+
+      if (stateVerParamBuilder != null) {
+        stateVerParam = stateVerParamBuilder.toString();
+      }
+    }
+
+    if (request.getParams() instanceof ModifiableSolrParams) {
+      final ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
+      if (stateVerParam != null) {
+        params.set(STATE_VERSION, stateVerParam);
+      } else {
+        params.remove(STATE_VERSION);
+      }
+    } // else: ??? how to set this ???
+
+    NamedList<Object> resp = null;
+    try {
+      resp = sendRequest(request, inputCollections);
+      // to avoid an O(n) operation we always add STATE_VERSION to the last and try to read it from
+      // there
+      final Object o = resp == null || resp.size() == 0 ? null : resp.get(STATE_VERSION, resp.size() - 1);
+      if (o != null && o instanceof Map) {
+        // remove this because no one else needs this and tests would fail if they are comparing
+        // responses
+        resp.remove(resp.size() - 1);
+        final Map<?, ?> invalidStates = (Map<?, ?>) o;
+        for (final Map.Entry<?, ?> e : invalidStates.entrySet()) {
+          getDocCollection((String) e.getKey(), (Integer) e.getValue());
+        }
+      }
+    } catch (final Exception exc) {
+
+      final Throwable rootCause = SolrException.getRootCause(exc);
+      // don't do retry support for admin requests
+      // or if the request doesn't have a collection specified
+      // or request is v2 api and its method is not GET
+      if (inputCollections.isEmpty() || isAdmin || (request instanceof V2Request && request.getMethod() != SolrRequest.METHOD.GET)) {
+        if (exc instanceof SolrServerException) {
+          throw (SolrServerException) exc;
+        } else if (exc instanceof IOException) {
+          throw (IOException) exc;
+        } else if (exc instanceof RuntimeException) {
+          throw (RuntimeException) exc;
+        } else {
+          throw new SolrServerException(rootCause);
+        }
+      }
+
+      final int errorCode = (rootCause instanceof SolrException) ? ((SolrException) rootCause).code() : SolrException.ErrorCode.UNKNOWN.code;
+
+      final boolean wasCommError = (rootCause instanceof ConnectException || rootCause instanceof SocketException || wasCommError(rootCause));
+
+      if (wasCommError || (exc instanceof RouteException && (errorCode == 503)) // 404 because the core does not exist 503 service unavailable
+      // TODO there are other reasons for 404. We need to change the solr response format from HTML
+      // to structured data to know that
+      ) {
+        // it was a communication error. it is likely that
+        // the node to which the request to be sent is down . So , expire the state
+        // so that the next attempt would fetch the fresh state
+        // just re-read state for all of them, if it has not been retried
+        // in retryExpiryTime time
+        if (requestedCollections != null) {
+          for (final DocCollection ext : requestedCollections) {
+            final ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(ext.getName());
+            if (cacheEntry == null)
+              continue;
+            cacheEntry.maybeStale = true;
+          }
+        }
+        if (retryCount < MAX_STALE_RETRIES) { // if it is a communication error , we must try again
+          // may be, we have a stale version of the collection state
+          // and we could not get any information from the server
+          // it is probably not worth trying again and again because
+          // the state would not have been updated
+          log.info("Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} - retrying", inputCollections, errorCode, rootCause, retryCount, MAX_STALE_RETRIES,
+              wasCommError, errorCode);
+          return requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
+        }
+      } else {
+        log.info("request was not communication error it seems");
+      }
+      log.info("Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} ", inputCollections, errorCode, rootCause, retryCount, MAX_STALE_RETRIES, wasCommError,
+          errorCode);
+
+      boolean stateWasStale = false;
+      if (retryCount < MAX_STALE_RETRIES && requestedCollections != null && !requestedCollections.isEmpty()
+          && (SolrException.ErrorCode.getErrorCode(errorCode) == SolrException.ErrorCode.INVALID_STATE || errorCode == 404)) {
+        // cached state for one or more external collections was stale
+        // re-issue request using updated state
+        stateWasStale = true;
+
+        // just re-read state for all of them, which is a little heavy handed but hopefully a rare
+        // occurrence
+        for (final DocCollection ext : requestedCollections) {
+          collectionStateCache.remove(ext.getName());
+        }
+      }
+
+      // if we experienced a communication error, it's worth checking the state
+      // with ZK just to make sure the node we're trying to hit is still part of the collection
+      if (retryCount < MAX_STALE_RETRIES && !stateWasStale && requestedCollections != null && !requestedCollections.isEmpty() && wasCommError) {
+        for (final DocCollection ext : requestedCollections) {
+          final DocCollection latestStateFromZk = getDocCollection(ext.getName(), null);
+          if (latestStateFromZk.getZNodeVersion() != ext.getZNodeVersion()) {
+            // looks like we couldn't reach the server because the state was stale == retry
+            stateWasStale = true;
+            // we just pulled state from ZK, so update the cache so that the retry uses it
+            collectionStateCache.put(ext.getName(), new ExpiringCachedDocCollection(latestStateFromZk));
+          }
+        }
+      }
+
+      if (requestedCollections != null) {
+        requestedCollections.clear(); // done with this
+      }
+
+      // if the state was stale, then we retry the request once with new state pulled from Zk
+      if (stateWasStale) {
+        log.warn("Re-trying request to collection(s) {} after stale state error from server.", inputCollections);
+        resp = requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
+      } else {
+        if (exc instanceof SolrException || exc instanceof SolrServerException || exc instanceof IOException) {
+          throw exc;
+        } else {
+          throw new SolrServerException(rootCause);
+        }
+      }
+    }
+
+    return resp;
+  }
+
+  protected NamedList<Object> sendRequest(final SolrRequest<?> request, final List<String> inputCollections) throws SolrServerException, IOException {
+    connect();
+
+    boolean sendToLeaders = false;
+    boolean isUpdate = false;
+
+    if (request instanceof IsUpdateRequest) {
+      if (request instanceof ModifiedUpdateRequest) {
+        isUpdate = true;
+        if (inputCollections.size() > 1) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Update request must be sent to a single collection " + "or an alias: " + inputCollections);
+        }
+        final String collection = inputCollections.isEmpty() ? null : inputCollections.get(0); // getting first mimics HttpSolrCall
+        final NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, collection);
+        if (response != null) {
+          return response;
+        }
+      }
+      sendToLeaders = true;
+    }
+
+    SolrParams reqParams = request.getParams();
+    if (reqParams == null) { // TODO fix getParams to never return null!
+      reqParams = new ModifiableSolrParams();
+    }
+
+    final ReplicaListTransformer replicaListTransformer = requestRLTGenerator.getReplicaListTransformer(reqParams);
+
+    final ClusterStateProvider provider = getClusterStateProvider();
+    final String urlScheme = provider.getClusterProperty(ClusterState.URL_SCHEME, "http");
+    final Set<String> liveNodes = provider.getLiveNodes();
+
+    final List<String> theUrlList = new ArrayList<>(); // we populate this as follows...
+
+    if (request instanceof V2Request) {
+      if (!liveNodes.isEmpty()) {
+        final List<String> liveNodesList = new ArrayList<>(liveNodes);
+        Collections.shuffle(liveNodesList, rand);
+        theUrlList.add(Utils.getBaseUrlForNodeName(liveNodesList.get(0), urlScheme));
+      }
+
+    } else if (ADMIN_PATHS.contains(request.getPath())) {
+      for (final String liveNode : liveNodes) {
+        theUrlList.add(Utils.getBaseUrlForNodeName(liveNode, urlScheme));
+      }
+
+    } else { // Typical...
+      final Set<String> collectionNames = resolveAliases(inputCollections, isUpdate);
+      if (collectionNames.isEmpty()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No collection param specified on request and no default collection has been set: " + inputCollections);
+      }
+
+      final List<String> preferredNodes = request.getPreferredNodes();
+      if (preferredNodes != null && !preferredNodes.isEmpty()) {
+        final String joinedInputCollections = StrUtils.join(inputCollections, ',');
+        final List<String> urlList = new ArrayList<>(preferredNodes.size());
+        for (final String nodeName : preferredNodes) {
+          urlList.add(Utils.getBaseUrlForNodeName(nodeName, urlScheme) + "/" + joinedInputCollections);
+        }
+        if (!urlList.isEmpty()) {
+          final ModifiedLBSolrClient.Req req = new ModifiedLBSolrClient.Req(request, urlList);
+          final ModifiedLBSolrClient.Rsp rsp = getLbClient().request(req);
+          return rsp.getResponse();
+        }
+      }
+
+      // TODO: not a big deal because of the caching, but we could avoid looking
+      // at every shard when getting leaders if we tweaked some things
+
+      // Retrieve slices from the cloud state and, for each collection specified, add it to the Map
+      // of slices.
+      final Map<String, Slice> slices = new HashMap<>();
+      final String shardKeys = reqParams.get(ShardParams._ROUTE_);
+      for (final String collectionName : collectionNames) {
+        final DocCollection col = getDocCollection(collectionName, null);
+        if (col == null) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + collectionName);
+        }
+        final Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
+        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
+      }
+
+      // Gather URLs, grouped by leader or replica
+      final List<Replica> sortedReplicas = new ArrayList<>();
+      final List<Replica> replicas = new ArrayList<>();
+      for (final Slice slice : slices.values()) {
+        final Replica leader = slice.getLeader();
+        for (final Replica replica : slice.getReplicas()) {
+          final String node = replica.getNodeName();
+          if (!liveNodes.contains(node) // Must be a live node to continue
+              || replica.getState() != Replica.State.ACTIVE) // Must be an ACTIVE replica to continue
+            continue;
+          if (sendToLeaders && replica.equals(leader)) {
+            sortedReplicas.add(replica); // put leaders here eagerly (if sendToLeader mode)
+          } else {
+            replicas.add(replica); // replicas here
+          }
+        }
+      }
+
+      // Sort the leader replicas, if any, according to the request preferences (none if
+      // !sendToLeaders)
+      replicaListTransformer.transform(sortedReplicas);
+
+      // Sort the replicas, if any, according to the request preferences and append to our list
+      replicaListTransformer.transform(replicas);
+
+      sortedReplicas.addAll(replicas);
+
+      final String joinedInputCollections = StrUtils.join(inputCollections, ',');
+      final Set<String> seenNodes = new HashSet<>();
+      sortedReplicas.forEach(replica -> {
+        if (seenNodes.add(replica.getNodeName())) {
+          theUrlList.add(ZkCoreNodeProps.getCoreUrl(replica.getBaseUrl(), joinedInputCollections));
+        }
+      });
+
+      if (theUrlList.isEmpty()) {
+        collectionStateCache.keySet().removeAll(collectionNames);
+        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Could not find a healthy node to handle the request.");
+      }
+    }
+
+    final ModifiedLBSolrClient.Req req = new ModifiedLBSolrClient.Req(request, theUrlList);
+    final ModifiedLBSolrClient.Rsp rsp = getLbClient().request(req);
+    return rsp.getResponse();
+  }
+
+  /**
+   * Resolves the input collections to their possible aliased collections. Doesn't validate collection existence.
+   */
+  private Set<String> resolveAliases(final List<String> inputCollections, final boolean isUpdate) {
+    if (inputCollections.isEmpty()) {
+      return Collections.emptySet();
+    }
+    final LinkedHashSet<String> uniqueNames = new LinkedHashSet<>(); // consistent ordering
+    for (final String collectionName : inputCollections) {
+      if (getClusterStateProvider().getState(collectionName) == null) {
+        // perhaps it's an alias
+        uniqueNames.addAll(getClusterStateProvider().resolveAlias(collectionName));
+      } else {
+        uniqueNames.add(collectionName); // it's a collection
+      }
+    }
+    return uniqueNames;
+  }
+
+  public boolean isUpdatesToLeaders() {
+    return updatesToLeaders;
+  }
+
+  /**
+   * @return true if direct updates are sent to shard leaders only
+   */
+  public boolean isDirectUpdatesToLeadersOnly() {
+    return directUpdatesToLeadersOnly;
+  }
+
+  /**
+   * If caches are expired they are refreshed after acquiring a lock. use this to set the number of locks
+   */
+  public void setParallelCacheRefreshes(final int n) {
+    locks = objectList(n);
+  }
+
+  protected static ArrayList<Object> objectList(final int n) {
+    final ArrayList<Object> l = new ArrayList<>(n);
+    for (int i = 0; i < n; i++)
+      l.add(new Object());
+    return l;
+  }
+
+  protected DocCollection getDocCollection(final String collection, Integer expectedVersion) throws SolrException {
+    if (expectedVersion == null)
+      expectedVersion = -1;
+    if (collection == null)
+      return null;
+    ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(collection);
+    DocCollection col = cacheEntry == null ? null : cacheEntry.cached;
+    if (col != null) {
+      if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry())
+        return col;
+    }
+
+    final ClusterState.CollectionRef ref = getCollectionRef(collection);
+    if (ref == null) {
+      // no such collection exists
+      return null;
+    }
+    if (!ref.isLazilyLoaded()) {
+      // it is readily available just return it
+      return ref.get();
+    }
+    final List<Object> locks = this.locks;
+    final Object lock = locks.get(Math.abs(Hash.murmurhash3_x86_32(collection, 0, collection.length(), 0) % locks.size()));
+    DocCollection fetchedCol = null;
+    synchronized (lock) {
+      /* we have waited for sometime just check once again */
+      cacheEntry = collectionStateCache.get(collection);
+      col = cacheEntry == null ? null : cacheEntry.cached;
+      if (col != null) {
+        if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry())
+          return col;
+      }
+      // We are going to fetch a new version
+      // we MUST try to get a new version
+      fetchedCol = ref.get(); // this is a call to ZK
+      if (fetchedCol == null)
+        return null; // this collection no more exists
+      if (col != null && fetchedCol.getZNodeVersion() == col.getZNodeVersion()) {
+        cacheEntry.setRetriedAt(); // we retried and found that it is the same version
+        cacheEntry.maybeStale = false;
+      } else {
+        collectionStateCache.put(collection, new ExpiringCachedDocCollection(fetchedCol));
+      }
+      return fetchedCol;
+    }
+  }
+
+  ClusterState.CollectionRef getCollectionRef(final String collection) {
+    return getClusterStateProvider().getState(collection);
+  }
+
+  /**
+   * Useful for determining the minimum achieved replication factor across all shards involved in processing an update request, typically useful for gauging the replication factor of a batch.
+   */
+  public int getMinAchievedReplicationFactor(final String collection, final NamedList<?> resp) {
+    // it's probably already on the top-level header set by condense
+    final NamedList<?> header = (NamedList<?>) resp.get("responseHeader");
+    Integer achRf = (Integer) header.get(ModifiedUpdateRequest.REPFACT);
+    if (achRf != null)
+      return achRf.intValue();
+
+    // not on the top-level header, walk the shard route tree
+    final Map<String, Integer> shardRf = getShardReplicationFactor(collection, resp);
+    for (final Integer rf : shardRf.values()) {
+      if (achRf == null || rf < achRf) {
+        achRf = rf;
+      }
+    }
+    return (achRf != null) ? achRf.intValue() : -1;
+  }
+
+  /**
+   * Walks the NamedList response after performing an update request looking for the replication factor that was achieved in each shard involved in the request. For single doc updates, there will be
+   * only one shard in the return value.
+   */
+  public Map<String, Integer> getShardReplicationFactor(final String collection, final NamedList<?> resp) {
+    connect();
+
+    final Map<String, Integer> results = new HashMap<>();
+    if (resp instanceof RouteResponse) {
+      final NamedList<NamedList<?>> routes = ((RouteResponse<?>) resp).getRouteResponses();
+      final DocCollection coll = getDocCollection(collection, null);
+      final Map<String, String> leaders = new HashMap<>();
+      for (final Slice slice : coll.getActiveSlicesArr()) {
+        final Replica leader = slice.getLeader();
+        if (leader != null) {
+          final ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);
+          final String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName();
+          leaders.put(leaderUrl, slice.getName());
+          final String altLeaderUrl = zkProps.getBaseUrl() + "/" + collection;
+          leaders.put(altLeaderUrl, slice.getName());
+        }
+      }
+
+      final Iterator<Map.Entry<String, NamedList<?>>> routeIter = routes.iterator();
+      while (routeIter.hasNext()) {
+        final Map.Entry<String, NamedList<?>> next = routeIter.next();
+        final String host = next.getKey();
+        final NamedList<?> hostResp = next.getValue();
+        final Integer rf = (Integer) ((NamedList<?>) hostResp.get("responseHeader")).get(ModifiedUpdateRequest.REPFACT);
+        if (rf != null) {
+          String shard = leaders.get(host);
+          if (shard == null) {
+            if (host.endsWith("/"))
+              shard = leaders.get(host.substring(0, host.length() - 1));
+            if (shard == null) {
+              shard = host;
+            }
+          }
+          results.put(shard, rf);
+        }
+      }
+    }
+    return results;
+  }
+
+  private static boolean hasInfoToFindLeaders(final ModifiedUpdateRequest updateRequest, final String idField) {
+    final Map<SolrInputDocument, Map<String, Object>> documents = updateRequest.getDocumentsMap();
+    final Map<String, Map<String, Object>> deleteById = updateRequest.getDeleteByIdMap();
+
+    final boolean hasNoDocuments = (documents == null || documents.isEmpty());
+    final boolean hasNoDeleteById = (deleteById == null || deleteById.isEmpty());
+    if (hasNoDocuments && hasNoDeleteById) {
+      // no documents and no delete-by-id, so no info to find leader(s)
+      return false;
+    }
+
+    if (documents != null) {
+      for (final Map.Entry<SolrInputDocument, Map<String, Object>> entry : documents.entrySet()) {
+        final SolrInputDocument doc = entry.getKey();
+        final Object fieldValue = doc.getFieldValue(idField);
+        if (fieldValue == null) {
+          // a document with no id field value, so can't find leader for it
+          return false;
+        }
+      }
+    }
+
+    return true;
+  }
+
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2ClusterStateProvider.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2ClusterStateProvider.java
new file mode 100644
index 0000000..411be46
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2ClusterStateProvider.java
@@ -0,0 +1,30 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.impl.BaseHttpClusterStateProvider;
+
+public class ModifiedHttp2ClusterStateProvider extends BaseHttpClusterStateProvider {
+  final ModifiedHttp2SolrClient httpClient;
+  final boolean closeClient;
+
+  public ModifiedHttp2ClusterStateProvider(final List<String> solrUrls, final ModifiedHttp2SolrClient httpClient) throws Exception {
+    this.httpClient = httpClient == null ? new ModifiedHttp2SolrClient.Builder().build() : httpClient;
+    this.closeClient = httpClient == null;
+    init(solrUrls);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.closeClient && this.httpClient != null) {
+      httpClient.close();
+    }
+  }
+
+  @Override
+  protected SolrClient getSolrClient(final String baseUrl) {
+    return new ModifiedHttp2SolrClient.Builder(baseUrl).withHttpClient(httpClient).build();
+  }
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2SolrClient.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2SolrClient.java
new file mode 100644
index 0000000..90758ec
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttp2SolrClient.java
@@ -0,0 +1,1062 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import static org.apache.solr.common.util.Utils.getObjectByPath;
+
+import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.lang.invoke.MethodHandles;
+import java.net.ConnectException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Phaser;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.client.solrj.ResponseParser;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.V2RequestSupport;
+import org.apache.solr.client.solrj.embedded.SSLConfig;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient.RemoteExecutionException;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient.RemoteSolrException;
+import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
+import org.apache.solr.client.solrj.impl.BinaryResponseParser;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.HttpListenerFactory;
+import org.apache.solr.client.solrj.impl.InputStreamResponseParser;
+import org.apache.solr.client.solrj.request.RequestWriter;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.util.AsyncListener;
+import org.apache.solr.client.solrj.util.Cancellable;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.StringUtils;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.ObjectReleaseTracker;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.common.util.Utils;
+import org.eclipse.jetty.client.HttpClient;
+import org.eclipse.jetty.client.HttpClientTransport;
+import org.eclipse.jetty.client.ProtocolHandlers;
+import org.eclipse.jetty.client.api.Request;
+import org.eclipse.jetty.client.api.Response;
+import org.eclipse.jetty.client.http.HttpClientTransportOverHTTP;
+import org.eclipse.jetty.client.util.ByteBufferContentProvider;
+import org.eclipse.jetty.client.util.FormContentProvider;
+import org.eclipse.jetty.client.util.InputStreamContentProvider;
+import org.eclipse.jetty.client.util.InputStreamResponseListener;
+import org.eclipse.jetty.client.util.MultiPartContentProvider;
+import org.eclipse.jetty.client.util.OutputStreamContentProvider;
+import org.eclipse.jetty.client.util.StringContentProvider;
+import org.eclipse.jetty.http.HttpField;
+import org.eclipse.jetty.http.HttpFields;
+import org.eclipse.jetty.http.HttpHeader;
+import org.eclipse.jetty.http.HttpMethod;
+import org.eclipse.jetty.http.HttpStatus;
+import org.eclipse.jetty.http.MimeTypes;
+import org.eclipse.jetty.http2.client.HTTP2Client;
+import org.eclipse.jetty.http2.client.http.HttpClientTransportOverHTTP2;
+import org.eclipse.jetty.util.BlockingArrayQueue;
+import org.eclipse.jetty.util.Fields;
+import org.eclipse.jetty.util.ssl.SslContextFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ModifiedHttp2SolrClient extends SolrClient {
+
+  private static final long serialVersionUID = -869785058825555540L;
+
+  public static final String REQ_PRINCIPAL_KEY = "solr-req-principal";
+  private final boolean useMultiPartPost = true;
+
+  private static volatile SSLConfig defaultSSLConfig;
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private static final String AGENT = "Solr[" + ModifiedHttp2SolrClient.class.getName() + "] 2.0";
+  private static final Charset FALLBACK_CHARSET = StandardCharsets.UTF_8;
+  private static final String DEFAULT_PATH = "/select";
+  private static final List<String> errPath = Arrays.asList("metadata", "error-class");
+
+  private HttpClient httpClient;
+  private volatile Set<String> queryParams = Collections.emptySet();
+  private int idleTimeout;
+
+  private ResponseParser parser = new BinaryResponseParser();
+  private volatile RequestWriter requestWriter = new BinaryRequestWriter();
+  private final List<HttpListenerFactory> listenerFactory = new LinkedList<>();
+  private final AsyncTracker asyncTracker = new AsyncTracker();
+  /** The URL of the Solr server. */
+  private String serverBaseUrl;
+
+  private boolean closeClient;
+  private ExecutorService executor;
+  private boolean shutdownExecutor;
+
+  private final String basicAuthAuthorizationStr;
+
+  protected ModifiedHttp2SolrClient(String serverBaseUrl, final Builder builder) {
+    if (serverBaseUrl != null) {
+      if (!serverBaseUrl.equals("/") && serverBaseUrl.endsWith("/")) {
+        serverBaseUrl = serverBaseUrl.substring(0, serverBaseUrl.length() - 1);
+      }
+
+      if (serverBaseUrl.startsWith("//")) {
+        serverBaseUrl = serverBaseUrl.substring(1, serverBaseUrl.length());
+      }
+      this.serverBaseUrl = serverBaseUrl;
+    }
+
+    if (builder.idleTimeout != null && builder.idleTimeout > 0)
+      idleTimeout = builder.idleTimeout;
+    else
+      idleTimeout = HttpClientUtil.DEFAULT_SO_TIMEOUT;
+
+    if (builder.http2SolrClient == null) {
+      httpClient = createHttpClient(builder);
+      closeClient = true;
+    } else {
+      httpClient = builder.http2SolrClient.httpClient;
+    }
+    if (builder.basicAuthUser != null && builder.basicAuthPassword != null) {
+      basicAuthAuthorizationStr = basicAuthCredentialsToAuthorizationString(builder.basicAuthUser, builder.basicAuthPassword);
+    } else {
+      basicAuthAuthorizationStr = null;
+    }
+    assert ObjectReleaseTracker.track(this);
+  }
+
+  public void addListenerFactory(final HttpListenerFactory factory) {
+    this.listenerFactory.add(factory);
+  }
+
+  // internal usage only
+  HttpClient getHttpClient() {
+    return httpClient;
+  }
+
+  // internal usage only
+  ProtocolHandlers getProtocolHandlers() {
+    return httpClient.getProtocolHandlers();
+  }
+
+  private HttpClient createHttpClient(final Builder builder) {
+    HttpClient httpClient;
+
+    executor = builder.executor;
+    if (executor == null) {
+      final BlockingArrayQueue<Runnable> queue = new BlockingArrayQueue<>(256, 256);
+      this.executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(32, 256, 60, TimeUnit.SECONDS, queue, new SolrNamedThreadFactory("h2sc"));
+      shutdownExecutor = true;
+    } else {
+      shutdownExecutor = false;
+    }
+
+    SslContextFactory.Client sslContextFactory;
+    boolean sslEnabled;
+    if (builder.sslConfig == null) {
+      sslEnabled = System.getProperty("javax.net.ssl.keyStore") != null || System.getProperty("javax.net.ssl.trustStore") != null;
+      sslContextFactory = sslEnabled ? getDefaultSslContextFactory() : null;
+    } else {
+      sslContextFactory = builder.sslConfig.createClientContextFactory();
+      sslEnabled = true;
+    }
+
+    HttpClientTransport transport;
+    if (builder.useHttp1_1) {
+      if (log.isDebugEnabled()) {
+        log.debug("Create Http2SolrClient with HTTP/1.1 transport");
+      }
+      transport = new HttpClientTransportOverHTTP(2);
+      httpClient = sslEnabled ? new HttpClient(transport, sslContextFactory) : new HttpClient(transport);
+      if (builder.maxConnectionsPerHost != null)
+        httpClient.setMaxConnectionsPerDestination(builder.maxConnectionsPerHost);
+    } else {
+      log.debug("Create Http2SolrClient with HTTP/2 transport");
+      final HTTP2Client http2client = new HTTP2Client();
+      transport = new HttpClientTransportOverHTTP2(http2client);
+      httpClient = new HttpClient(transport, sslContextFactory);
+      httpClient.setMaxConnectionsPerDestination(4);
+    }
+
+    httpClient.setExecutor(this.executor);
+    httpClient.setStrictEventOrdering(false);
+    httpClient.setConnectBlocking(true);
+    httpClient.setFollowRedirects(false);
+    httpClient.setMaxRequestsQueuedPerDestination(asyncTracker.getMaxRequestsQueuedPerDestination());
+    httpClient.setUserAgentField(new HttpField(HttpHeader.USER_AGENT, AGENT));
+
+    httpClient.setIdleTimeout(idleTimeout);
+    if (builder.connectionTimeout != null)
+      httpClient.setConnectTimeout(builder.connectionTimeout);
+    try {
+      httpClient.start();
+    } catch (final Exception e) {
+      close(); // make sure we clean up
+      throw new RuntimeException(e);
+    }
+
+    return httpClient;
+  }
+
+  @Override
+  public void close() {
+    // we wait for async requests, so far devs don't want to give sugar for this
+    asyncTracker.waitForComplete();
+    try {
+      if (closeClient) {
+        httpClient.setStopTimeout(1000);
+        httpClient.stop();
+        httpClient.destroy();
+      }
+    } catch (final Exception e) {
+      throw new RuntimeException("Exception on closing client", e);
+    } finally {
+      if (shutdownExecutor) {
+        ExecutorUtil.shutdownAndAwaitTermination(executor);
+      }
+    }
+
+    assert ObjectReleaseTracker.release(this);
+  }
+
+  public boolean isV2ApiRequest(final SolrRequest<?> request) {
+    return request instanceof V2Request || request.getPath().contains("/____v2");
+  }
+
+  public long getIdleTimeout() {
+    return idleTimeout;
+  }
+
+  public static class OutStream implements Closeable {
+    private final String origCollection;
+    private final ModifiableSolrParams origParams;
+    private final OutputStreamContentProvider outProvider;
+    private final InputStreamResponseListener responseListener;
+    private final boolean isXml;
+
+    public OutStream(final String origCollection, final ModifiableSolrParams origParams, final OutputStreamContentProvider outProvider, final InputStreamResponseListener responseListener,
+        final boolean isXml) {
+      this.origCollection = origCollection;
+      this.origParams = origParams;
+      this.outProvider = outProvider;
+      this.responseListener = responseListener;
+      this.isXml = isXml;
+    }
+
+    boolean belongToThisStream(final SolrRequest<?> solrRequest, final String collection) {
+      final ModifiableSolrParams solrParams = new ModifiableSolrParams(solrRequest.getParams());
+      if (!origParams.toNamedList().equals(solrParams.toNamedList()) || !StringUtils.equals(origCollection, collection)) {
+        return false;
+      }
+      return true;
+    }
+
+    public void write(final byte b[]) throws IOException {
+      this.outProvider.getOutputStream().write(b);
+    }
+
+    public void flush() throws IOException {
+      this.outProvider.getOutputStream().flush();
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (isXml) {
+        write("</stream>".getBytes(FALLBACK_CHARSET));
+      }
+      this.outProvider.getOutputStream().close();
+    }
+
+    // TODO this class should be hidden
+    public InputStreamResponseListener getResponseListener() {
+      return responseListener;
+    }
+  }
+
+  public OutStream initOutStream(final String baseUrl, final UpdateRequest updateRequest, final String collection) throws IOException {
+    final String contentType = requestWriter.getUpdateContentType();
+    final ModifiableSolrParams origParams = new ModifiableSolrParams(updateRequest.getParams());
+
+    // The parser 'wt=' and 'version=' params are used instead of the
+    // original params
+    final ModifiableSolrParams requestParams = new ModifiableSolrParams(origParams);
+    requestParams.set(CommonParams.WT, parser.getWriterType());
+    requestParams.set(CommonParams.VERSION, parser.getVersion());
+
+    String basePath = baseUrl;
+    if (collection != null)
+      basePath += "/" + collection;
+    if (!basePath.endsWith("/"))
+      basePath += "/";
+
+    final OutputStreamContentProvider provider = new OutputStreamContentProvider();
+    final Request postRequest = httpClient.newRequest(basePath + "update" + requestParams.toQueryString()).method(HttpMethod.POST).header(HttpHeader.CONTENT_TYPE, contentType).content(provider);
+    decorateRequest(postRequest, updateRequest);
+    final InputStreamResponseListener responseListener = new InputStreamResponseListener();
+    postRequest.send(responseListener);
+
+    final boolean isXml = ClientUtils.TEXT_XML.equals(requestWriter.getUpdateContentType());
+    final OutStream outStream = new OutStream(collection, origParams, provider, responseListener, isXml);
+    if (isXml) {
+      outStream.write("<stream>".getBytes(FALLBACK_CHARSET));
+    }
+    return outStream;
+  }
+
+  public void send(final OutStream outStream, final SolrRequest<?> req, final String collection) throws IOException {
+    assert outStream.belongToThisStream(req, collection);
+    this.requestWriter.write(req, outStream.outProvider.getOutputStream());
+    if (outStream.isXml) {
+      // check for commit or optimize
+      final SolrParams params = req.getParams();
+      if (params != null) {
+        String fmt = null;
+        if (params.getBool(UpdateParams.OPTIMIZE, false)) {
+          fmt = "<optimize waitSearcher=\"%s\" />";
+        } else if (params.getBool(UpdateParams.COMMIT, false)) {
+          fmt = "<commit waitSearcher=\"%s\" />";
+        }
+        if (fmt != null) {
+          final byte[] content = String.format(Locale.ROOT, fmt, params.getBool(UpdateParams.WAIT_SEARCHER, false) + "").getBytes(FALLBACK_CHARSET);
+          outStream.write(content);
+        }
+      }
+    }
+    outStream.flush();
+  }
+
+  @SuppressWarnings("StaticAssignmentOfThrowable")
+  private static final Exception CANCELLED_EXCEPTION = new Exception();
+
+  private static final Cancellable FAILED_MAKING_REQUEST_CANCELLABLE = () -> {
+  };
+
+  public Cancellable asyncRequest(final SolrRequest<?> solrRequest, final String collection, final AsyncListener<NamedList<Object>> asyncListener) {
+    Request req;
+    try {
+      req = makeRequest(solrRequest, collection);
+    } catch (SolrServerException | IOException e) {
+      asyncListener.onFailure(e);
+      return FAILED_MAKING_REQUEST_CANCELLABLE;
+    }
+    final ResponseParser parser = solrRequest.getResponseParser() == null ? this.parser : solrRequest.getResponseParser();
+    req.onRequestQueued(asyncTracker.queuedListener).onComplete(asyncTracker.completeListener).send(new InputStreamResponseListener() {
+      @Override
+      public void onHeaders(final Response response) {
+        super.onHeaders(response);
+        final InputStreamResponseListener listener = this;
+        executor.execute(() -> {
+          final InputStream is = listener.getInputStream();
+          assert ObjectReleaseTracker.track(is);
+          try {
+            final NamedList<Object> body = processErrorsAndResponse(solrRequest, parser, response, is);
+            asyncListener.onSuccess(body);
+          } catch (final RemoteSolrException e) {
+            if (SolrException.getRootCause(e) != CANCELLED_EXCEPTION) {
+              asyncListener.onFailure(e);
+            }
+          } catch (final SolrServerException e) {
+            asyncListener.onFailure(e);
+          }
+        });
+      }
+
+      @Override
+      public void onFailure(final Response response, final Throwable failure) {
+        super.onFailure(response, failure);
+        if (failure != CANCELLED_EXCEPTION) {
+          asyncListener.onFailure(new SolrServerException(failure.getMessage(), failure));
+        }
+      }
+    });
+    return () -> req.abort(CANCELLED_EXCEPTION);
+  }
+
+  @Override
+  public NamedList<Object> request(final SolrRequest<?> solrRequest, final String collection) throws SolrServerException, IOException {
+    final Request req = makeRequest(solrRequest, collection);
+    final ResponseParser parser = solrRequest.getResponseParser() == null ? this.parser : solrRequest.getResponseParser();
+
+    Throwable abortCause = null;
+    try {
+      final InputStreamResponseListener listener = new InputStreamResponseListener();
+      req.send(listener);
+      final Response response = listener.get(idleTimeout, TimeUnit.MILLISECONDS);
+      final InputStream is = listener.getInputStream();
+      assert ObjectReleaseTracker.track(is);
+      return processErrorsAndResponse(solrRequest, parser, response, is);
+    } catch (final InterruptedException e) {
+      Thread.currentThread().interrupt();
+      abortCause = e;
+      throw new RuntimeException(e);
+    } catch (final TimeoutException e) {
+      throw new SolrServerException("Timeout occured while waiting response from server at: " + req.getURI(), e);
+    } catch (final ExecutionException e) {
+      final Throwable cause = e.getCause();
+      abortCause = cause;
+      if (cause instanceof ConnectException) {
+        throw new SolrServerException("Server refused connection at: " + req.getURI(), cause);
+      }
+      if (cause instanceof SolrServerException) {
+        throw (SolrServerException) cause;
+      } else if (cause instanceof IOException) {
+        throw new SolrServerException("IOException occured when talking to server at: " + getBaseURL(), cause);
+      }
+      throw new SolrServerException(cause.getMessage(), cause);
+    } catch (SolrServerException | RuntimeException sse) {
+      abortCause = sse;
+      throw sse;
+    } finally {
+      if (abortCause != null) {
+        req.abort(abortCause);
+      }
+    }
+  }
+
+  private NamedList<Object> processErrorsAndResponse(final SolrRequest<?> solrRequest, final ResponseParser parser, final Response response, final InputStream is) throws SolrServerException {
+    final String contentType = response.getHeaders().get(HttpHeader.CONTENT_TYPE);
+    String mimeType = null;
+    String encoding = null;
+    if (contentType != null) {
+      mimeType = MimeTypes.getContentTypeWithoutCharset(contentType);
+      encoding = MimeTypes.getCharsetFromContentType(contentType);
+    }
+    return processErrorsAndResponse(response, parser, is, mimeType, encoding, isV2ApiRequest(solrRequest));
+  }
+
+  private void setBasicAuthHeader(final SolrRequest<?> solrRequest, final Request req) {
+    if (solrRequest.getBasicAuthUser() != null && solrRequest.getBasicAuthPassword() != null) {
+      final String encoded = basicAuthCredentialsToAuthorizationString(solrRequest.getBasicAuthUser(), solrRequest.getBasicAuthPassword());
+      req.header("Authorization", encoded);
+    } else if (basicAuthAuthorizationStr != null) {
+      req.header("Authorization", basicAuthAuthorizationStr);
+    }
+  }
+
+  private String basicAuthCredentialsToAuthorizationString(final String user, final String pass) {
+    final String userPass = user + ":" + pass;
+    return "Basic " + Base64.getEncoder().encodeToString(userPass.getBytes(FALLBACK_CHARSET));
+  }
+
+  private Request makeRequest(final SolrRequest<?> solrRequest, final String collection) throws SolrServerException, IOException {
+    final Request req = createRequest(solrRequest, collection);
+    decorateRequest(req, solrRequest);
+    return req;
+  }
+
+  private void decorateRequest(final Request req, final SolrRequest<?> solrRequest) {
+    req.header(HttpHeader.ACCEPT_ENCODING, null);
+    req.timeout(idleTimeout, TimeUnit.MILLISECONDS);
+    if (solrRequest.getUserPrincipal() != null) {
+      req.attribute(REQ_PRINCIPAL_KEY, solrRequest.getUserPrincipal());
+    }
+
+    setBasicAuthHeader(solrRequest, req);
+    for (final HttpListenerFactory factory : listenerFactory) {
+      final HttpListenerFactory.RequestResponseListener listener = factory.get();
+      listener.onQueued(req);
+      req.onRequestBegin(listener);
+      req.onComplete(listener);
+    }
+
+    final Map<String, String> headers = solrRequest.getHeaders();
+    if (headers != null) {
+      for (final Map.Entry<String, String> entry : headers.entrySet()) {
+        req.header(entry.getKey(), entry.getValue());
+      }
+    }
+  }
+
+  private String changeV2RequestEndpoint(final String basePath) throws MalformedURLException {
+    final URL oldURL = new URL(basePath);
+    final String newPath = oldURL.getPath().replaceFirst("/solr", "/api");
+    return new URL(oldURL.getProtocol(), oldURL.getHost(), oldURL.getPort(), newPath).toString();
+  }
+
+  private Request createRequest(SolrRequest<?> solrRequest, final String collection) throws IOException, SolrServerException {
+    if (solrRequest.getBasePath() == null && serverBaseUrl == null)
+      throw new IllegalArgumentException("Destination node is not provided!");
+
+    if (solrRequest instanceof V2RequestSupport) {
+      solrRequest = ((V2RequestSupport) solrRequest).getV2Request();
+    }
+    final SolrParams params = solrRequest.getParams();
+    final RequestWriter.ContentWriter contentWriter = requestWriter.getContentWriter(solrRequest);
+    Collection<ContentStream> streams = contentWriter == null ? requestWriter.getContentStreams(solrRequest) : null;
+    String path = requestWriter.getPath(solrRequest);
+    if (path == null || !path.startsWith("/")) {
+      path = DEFAULT_PATH;
+    }
+
+    ResponseParser parser = solrRequest.getResponseParser();
+    if (parser == null) {
+      parser = this.parser;
+    }
+
+    // The parser 'wt=' and 'version=' params are used instead of the original
+    // params
+    final ModifiableSolrParams wparams = new ModifiableSolrParams(params);
+    if (parser != null) {
+      wparams.set(CommonParams.WT, parser.getWriterType());
+      wparams.set(CommonParams.VERSION, parser.getVersion());
+    }
+
+    // TODO add invariantParams support
+
+    String basePath = solrRequest.getBasePath() == null ? serverBaseUrl : solrRequest.getBasePath();
+    if (collection != null)
+      basePath += "/" + collection;
+
+    if (solrRequest instanceof V2Request) {
+      if (System.getProperty("solr.v2RealPath") == null) {
+        basePath = changeV2RequestEndpoint(basePath);
+      } else {
+        basePath = serverBaseUrl + "/____v2";
+      }
+    }
+
+    if (SolrRequest.METHOD.GET == solrRequest.getMethod()) {
+      if (streams != null || contentWriter != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "GET can't send streams!");
+      }
+
+      return httpClient.newRequest(basePath + path + wparams.toQueryString()).method(HttpMethod.GET);
+    }
+
+    if (SolrRequest.METHOD.DELETE == solrRequest.getMethod()) {
+      return httpClient.newRequest(basePath + path + wparams.toQueryString()).method(HttpMethod.DELETE);
+    }
+
+    if (SolrRequest.METHOD.POST == solrRequest.getMethod() || SolrRequest.METHOD.PUT == solrRequest.getMethod()) {
+
+      final String url = basePath + path;
+      boolean hasNullStreamName = false;
+      if (streams != null) {
+        hasNullStreamName = streams.stream().anyMatch(cs -> cs.getName() == null);
+      }
+      final String contentWriterUrl = url + toQueryString(wparams, false);
+
+//      final boolean isMultipart = streams != null && streams.size() > 1 && !hasNullStreamName;
+      boolean isMultipart;
+      // If the solrRequest is an UpdateRequest it means it is a commit or a delete request so we must use regular way (SolrJ default one) to set isMultipart
+      if (this.useMultiPartPost && !(solrRequest instanceof UpdateRequest)) {
+        final Collection<ContentStream> requestStreams = requestWriter.getContentStreams(solrRequest);
+        // Do we have streams?
+        if (requestStreams != null && requestStreams.size() > 0) {
+
+          // Also, is the contentWriter URL too big?
+          final boolean urlTooBig = contentWriterUrl.length() > 4000;
+          // System.out.println("RequestStreams present? "+(requestStreams != null && requestStreams.size() > 0)+"; hasNullStreamName? "+hasNullStreamName+"; url length = "+contentWriterUrl.length());
+          isMultipart = requestStreams != null && requestStreams.size() > 0 && ((solrRequest.getMethod() == SolrRequest.METHOD.POST && !hasNullStreamName) || urlTooBig);
+          if (isMultipart) {
+            // System.out.println("Overriding with multipart post");
+            streams = requestStreams;
+          }
+        } else {
+          isMultipart = false;
+        }
+      } else {
+        // SolrJ default way to set isMultipart
+        isMultipart = streams != null && streams.size() > 1 && !hasNullStreamName;
+      }
+
+      final HttpMethod method = SolrRequest.METHOD.POST == solrRequest.getMethod() ? HttpMethod.POST : HttpMethod.PUT;
+
+      if (contentWriter != null && !isMultipart) {
+        final Request req = httpClient.newRequest(url + wparams.toQueryString()).method(method);
+        final BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS();
+        contentWriter.write(baos);
+
+        // SOLR-16265: TODO reduce memory usage
+        return req.content(
+            // We're throwing this BAOS away, so no need to copy the byte[], just use the raw buf
+            new ByteBufferContentProvider(contentWriter.getContentType(), ByteBuffer.wrap(baos.getbuf(), 0, baos.size())));
+      } else if (streams == null || isMultipart) {
+        // send server list and request list as query string params
+        final ModifiableSolrParams queryParams = calculateQueryParams(this.queryParams, wparams);
+        queryParams.add(calculateQueryParams(solrRequest.getQueryParams(), wparams));
+        final Request req = httpClient.newRequest(url + queryParams.toQueryString()).method(method);
+        return fillContentStream(req, streams, wparams, isMultipart);
+      } else {
+        // It is has one stream, it is the post body, put the params in the URL
+        final ContentStream contentStream = streams.iterator().next();
+        return httpClient.newRequest(url + wparams.toQueryString()).method(method).content(new InputStreamContentProvider(contentStream.getStream()), contentStream.getContentType());
+      }
+    }
+
+    throw new SolrServerException("Unsupported method: " + solrRequest.getMethod());
+  }
+
+  public static String toQueryString(final SolrParams params, final boolean xml) {
+    final StringBuilder sb = new StringBuilder(128);
+    try {
+      final String amp = xml ? "&amp;" : "&";
+      boolean first = true;
+      final Iterator<String> names = params.getParameterNamesIterator();
+      while (names.hasNext()) {
+        final String key = names.next();
+        final String[] valarr = params.getParams(key);
+        if (valarr == null) {
+          sb.append(first ? "?" : amp);
+          sb.append(URLEncoder.encode(key, "UTF-8"));
+          first = false;
+        } else {
+          for (final String val : valarr) {
+            sb.append(first ? "?" : amp);
+            sb.append(key);
+            if (val != null) {
+              sb.append('=');
+              sb.append(URLEncoder.encode(val, "UTF-8"));
+            }
+            first = false;
+          }
+        }
+      }
+    } catch (final IOException e) {
+      throw new RuntimeException(e);
+    } // can't happen
+    return sb.toString();
+  }
+
+  private Request fillContentStream(final Request req, final Collection<ContentStream> streams, final ModifiableSolrParams wparams, final boolean isMultipart) throws IOException {
+    if (isMultipart) {
+      // multipart/form-data
+      final MultiPartContentProvider content = new MultiPartContentProvider();
+      final Iterator<String> iter = wparams.getParameterNamesIterator();
+      while (iter.hasNext()) {
+        final String key = iter.next();
+        final String[] vals = wparams.getParams(key);
+        if (vals != null) {
+          for (final String val : vals) {
+            content.addFieldPart(key, new StringContentProvider(val), null);
+          }
+        }
+      }
+      if (streams != null) {
+        for (final ContentStream contentStream : streams) {
+          String contentType = contentStream.getContentType();
+          if (contentType == null) {
+            contentType = BinaryResponseParser.BINARY_CONTENT_TYPE; // default
+          }
+          String name = contentStream.getName();
+          if (name == null) {
+            name = "";
+          }
+          final HttpFields fields = new HttpFields();
+          fields.add(HttpHeader.CONTENT_TYPE, contentType);
+          content.addFilePart(name, contentStream.getName(), new InputStreamContentProvider(contentStream.getStream()), fields);
+        }
+      }
+      req.content(content);
+    } else {
+      // application/x-www-form-urlencoded
+      final Fields fields = new Fields();
+      final Iterator<String> iter = wparams.getParameterNamesIterator();
+      while (iter.hasNext()) {
+        final String key = iter.next();
+        final String[] vals = wparams.getParams(key);
+        if (vals != null) {
+          for (final String val : vals) {
+            fields.add(key, val);
+          }
+        }
+      }
+      req.content(new FormContentProvider(fields, FALLBACK_CHARSET));
+    }
+
+    return req;
+  }
+
+  private boolean wantStream(final ResponseParser processor) {
+    return processor == null || processor instanceof InputStreamResponseParser;
+  }
+
+  @SuppressWarnings({ "unchecked", "rawtypes" })
+  private NamedList<Object> processErrorsAndResponse(final Response response, final ResponseParser processor, final InputStream is, final String mimeType, final String encoding, final boolean isV2Api)
+      throws SolrServerException {
+    boolean shouldClose = true;
+    try {
+      // handle some http level checks before trying to parse the response
+      final int httpStatus = response.getStatus();
+
+      switch (httpStatus) {
+      case HttpStatus.OK_200:
+      case HttpStatus.BAD_REQUEST_400:
+      case HttpStatus.CONFLICT_409:
+        break;
+      case HttpStatus.MOVED_PERMANENTLY_301:
+      case HttpStatus.MOVED_TEMPORARILY_302:
+        if (!httpClient.isFollowRedirects()) {
+          throw new SolrServerException("Server at " + getBaseURL() + " sent back a redirect (" + httpStatus + ").");
+        }
+        break;
+      default:
+        if (processor == null || mimeType == null) {
+          throw new RemoteSolrException(serverBaseUrl, httpStatus, "non ok status: " + httpStatus + ", message:" + response.getReason(), null);
+        }
+      }
+
+      if (wantStream(parser)) {
+        // no processor specified, return raw stream
+        final NamedList<Object> rsp = new NamedList<>();
+        rsp.add("stream", is);
+        // Only case where stream should not be closed
+        shouldClose = false;
+        return rsp;
+      }
+
+      final String procCt = processor.getContentType();
+      if (procCt != null) {
+        final String procMimeType = MimeTypes.getContentTypeWithoutCharset(procCt).trim().toLowerCase(Locale.ROOT);
+        if (!procMimeType.equals(mimeType)) {
+          // unexpected mime type
+          final String prefix = "Expected mime type " + procMimeType + " but got " + mimeType + ". ";
+          final String exceptionEncoding = encoding != null ? encoding : FALLBACK_CHARSET.name();
+          try {
+            final ByteArrayOutputStream body = new ByteArrayOutputStream();
+//            is.transferTo(body);
+            IOUtils.copy(is, body);
+            throw new RemoteSolrException(serverBaseUrl, httpStatus, prefix + body.toString(exceptionEncoding), null);
+          } catch (final IOException e) {
+            throw new RemoteSolrException(serverBaseUrl, httpStatus, "Could not parse response with encoding " + exceptionEncoding, e);
+          }
+        }
+      }
+
+      NamedList<Object> rsp;
+      try {
+        rsp = processor.processResponse(is, encoding);
+      } catch (final Exception e) {
+        throw new RemoteSolrException(serverBaseUrl, httpStatus, e.getMessage(), e);
+      }
+
+      final Object error = rsp == null ? null : rsp.get("error");
+      if (error != null && (String.valueOf(getObjectByPath(error, true, errPath)).endsWith("ExceptionWithErrObject"))) {
+        throw RemoteExecutionException.create(serverBaseUrl, rsp);
+      }
+      if (httpStatus != HttpStatus.OK_200 && !isV2Api) {
+        NamedList<String> metadata = null;
+        String reason = null;
+        try {
+          if (error != null) {
+            reason = (String) Utils.getObjectByPath(error, false, Collections.singletonList("msg"));
+            if (reason == null) {
+              reason = (String) Utils.getObjectByPath(error, false, Collections.singletonList("trace"));
+            }
+            final Object metadataObj = Utils.getObjectByPath(error, false, Collections.singletonList("metadata"));
+            if (metadataObj instanceof NamedList) {
+              metadata = (NamedList<String>) metadataObj;
+            } else if (metadataObj instanceof List) {
+              // NamedList parsed as List convert to NamedList again
+              final List<Object> list = (List<Object>) metadataObj;
+              metadata = new NamedList<>(list.size() / 2);
+              for (int i = 0; i < list.size(); i += 2) {
+                metadata.add((String) list.get(i), (String) list.get(i + 1));
+              }
+            } else if (metadataObj instanceof Map) {
+              metadata = new NamedList((Map) metadataObj);
+            }
+          }
+        } catch (final Exception ex) {
+          /* Ignored */
+        }
+        if (reason == null) {
+          final StringBuilder msg = new StringBuilder();
+          msg.append(response.getReason()).append("\n\n").append("request: ").append(response.getRequest().getMethod());
+          try {
+            reason = java.net.URLDecoder.decode(msg.toString(), FALLBACK_CHARSET.name());
+          } catch (final UnsupportedEncodingException e) {
+            // Quiet
+          }
+        }
+        final RemoteSolrException rss = new RemoteSolrException(serverBaseUrl, httpStatus, reason, null);
+        if (metadata != null)
+          rss.setMetadata(metadata);
+        throw rss;
+      }
+      return rsp;
+    } finally {
+      if (shouldClose) {
+        try {
+          is.close();
+          assert ObjectReleaseTracker.release(is);
+        } catch (final IOException e) {
+          // quitely
+        }
+      }
+    }
+  }
+
+  public void setRequestWriter(final RequestWriter requestWriter) {
+    this.requestWriter = requestWriter;
+  }
+
+  protected RequestWriter getRequestWriter() {
+    return requestWriter;
+  }
+
+  public void setFollowRedirects(final boolean follow) {
+    httpClient.setFollowRedirects(follow);
+  }
+
+  public String getBaseURL() {
+    return serverBaseUrl;
+  }
+
+  private static class AsyncTracker {
+    private static final int MAX_OUTSTANDING_REQUESTS = 1000;
+
+    // wait for async requests
+    private final Phaser phaser;
+    // maximum outstanding requests left
+    private final Semaphore available;
+    private final Request.QueuedListener queuedListener;
+    private final Response.CompleteListener completeListener;
+
+    AsyncTracker() {
+      // TODO: what about shared instances?
+      phaser = new Phaser(1);
+      available = new Semaphore(MAX_OUTSTANDING_REQUESTS, false);
+      queuedListener = request -> {
+        phaser.register();
+        try {
+          available.acquire();
+        } catch (final InterruptedException ignored) {
+
+        }
+      };
+      completeListener = result -> {
+        phaser.arriveAndDeregister();
+        available.release();
+      };
+    }
+
+    int getMaxRequestsQueuedPerDestination() {
+      // comfortably above max outstanding requests
+      return MAX_OUTSTANDING_REQUESTS * 3;
+    }
+
+    public void waitForComplete() {
+      phaser.arriveAndAwaitAdvance();
+      phaser.arriveAndDeregister();
+    }
+  }
+
+  public static class Builder {
+
+    private ModifiedHttp2SolrClient http2SolrClient;
+    private SSLConfig sslConfig = defaultSSLConfig;
+    private Integer idleTimeout;
+    private Integer connectionTimeout;
+    private Integer maxConnectionsPerHost;
+    private String basicAuthUser;
+    private String basicAuthPassword;
+    private boolean useHttp1_1 = Boolean.getBoolean("solr.http1");
+    protected String baseSolrUrl;
+    private ExecutorService executor;
+
+    public Builder() {
+    }
+
+    public Builder(final String baseSolrUrl) {
+      this.baseSolrUrl = baseSolrUrl;
+    }
+
+    public ModifiedHttp2SolrClient build() {
+      final ModifiedHttp2SolrClient client = new ModifiedHttp2SolrClient(baseSolrUrl, this);
+      try {
+        httpClientBuilderSetup(client);
+      } catch (final RuntimeException e) {
+        try {
+          client.close();
+        } catch (final Exception exceptionOnClose) {
+          e.addSuppressed(exceptionOnClose);
+        }
+        throw e;
+      }
+      return client;
+    }
+
+    private void httpClientBuilderSetup(final ModifiedHttp2SolrClient client) {
+      final String factoryClassName = System.getProperty(HttpClientUtil.SYS_PROP_HTTP_CLIENT_BUILDER_FACTORY);
+      if (factoryClassName != null) {
+        log.debug("Using Http Builder Factory: {}", factoryClassName);
+        ModifiedHttpClientBuilderFactory factory;
+        try {
+          if (factoryClassName.contains("Krb5HttpClientBuilder")) {
+            factory = new ModifiedKrb5HttpClientBuilder();
+          } else if (factoryClassName.contains("PreemptiveBasicAuthClientBuilderFactory")) {
+            factory = new ModifiedPreemptiveBasicAuthClientBuilderFactory();
+          } else {
+            throw new ClassNotFoundException("factoryClassName");
+          }
+        } catch (final ClassNotFoundException e) {
+          throw new RuntimeException("Unable to instantiate " + Http2SolrClient.class.getName(), e);
+        }
+        factory.setup(client);
+      }
+    }
+
+    /** Reuse {@code httpClient} connections pool */
+    public Builder withHttpClient(final ModifiedHttp2SolrClient httpClient) {
+      this.http2SolrClient = httpClient;
+      return this;
+    }
+
+    public Builder withExecutor(final ExecutorService executor) {
+      this.executor = executor;
+      return this;
+    }
+
+    public Builder withSSLConfig(final SSLConfig sslConfig) {
+      this.sslConfig = sslConfig;
+      return this;
+    }
+
+    public Builder withBasicAuthCredentials(final String user, final String pass) {
+      if (user != null || pass != null) {
+        if (user == null || pass == null) {
+          throw new IllegalStateException("Invalid Authentication credentials. Either both username and password or none must be provided");
+        }
+      }
+      this.basicAuthUser = user;
+      this.basicAuthPassword = pass;
+      return this;
+    }
+
+    /**
+     * Set maxConnectionsPerHost for http1 connections, maximum number http2 connections is limited by 4
+     */
+    public Builder maxConnectionsPerHost(final int max) {
+      this.maxConnectionsPerHost = max;
+      return this;
+    }
+
+    public Builder idleTimeout(final int idleConnectionTimeout) {
+      this.idleTimeout = idleConnectionTimeout;
+      return this;
+    }
+
+    public Builder useHttp1_1(final boolean useHttp1_1) {
+      this.useHttp1_1 = useHttp1_1;
+      return this;
+    }
+
+    public Builder connectionTimeout(final int connectionTimeOut) {
+      this.connectionTimeout = connectionTimeOut;
+      return this;
+    }
+  }
+
+  public Set<String> getQueryParams() {
+    return queryParams;
+  }
+
+  /**
+   * Expert Method
+   *
+   * @param queryParams set of param keys to only send via the query string Note that the param will be sent as a query string if the key is part of this Set or the SolrRequest's query params.
+   * @see org.apache.solr.client.solrj.SolrRequest#getQueryParams
+   */
+  public void setQueryParams(final Set<String> queryParams) {
+    this.queryParams = queryParams;
+  }
+
+  private ModifiableSolrParams calculateQueryParams(final Set<String> queryParamNames, final ModifiableSolrParams wparams) {
+    final ModifiableSolrParams queryModParams = new ModifiableSolrParams();
+    if (queryParamNames != null) {
+      for (final String param : queryParamNames) {
+        final String[] value = wparams.getParams(param);
+        if (value != null) {
+          for (final String v : value) {
+            queryModParams.add(param, v);
+          }
+          wparams.remove(param);
+        }
+      }
+    }
+    return queryModParams;
+  }
+
+  public ResponseParser getParser() {
+    return parser;
+  }
+
+  public void setParser(final ResponseParser processor) {
+    parser = processor;
+  }
+
+  public static void setDefaultSSLConfig(final SSLConfig sslConfig) {
+    ModifiedHttp2SolrClient.defaultSSLConfig = sslConfig;
+  }
+
+  // public for testing, only used by tests
+  public static void resetSslContextFactory() {
+    ModifiedHttp2SolrClient.defaultSSLConfig = null;
+  }
+
+  /* package-private for testing */
+  static SslContextFactory.Client getDefaultSslContextFactory() {
+    final String checkPeerNameStr = System.getProperty(HttpClientUtil.SYS_PROP_CHECK_PEER_NAME);
+    boolean sslCheckPeerName = true;
+    if (checkPeerNameStr == null || "false".equalsIgnoreCase(checkPeerNameStr)) {
+      sslCheckPeerName = false;
+    }
+
+    final SslContextFactory.Client sslContextFactory = new SslContextFactory.Client(!sslCheckPeerName);
+
+    if (null != System.getProperty("javax.net.ssl.keyStore")) {
+      sslContextFactory.setKeyStorePath(System.getProperty("javax.net.ssl.keyStore"));
+    }
+    if (null != System.getProperty("javax.net.ssl.keyStorePassword")) {
+      sslContextFactory.setKeyStorePassword(System.getProperty("javax.net.ssl.keyStorePassword"));
+    }
+    if (null != System.getProperty("javax.net.ssl.keyStoreType")) {
+      sslContextFactory.setKeyStoreType(System.getProperty("javax.net.ssl.keyStoreType"));
+    }
+    if (null != System.getProperty("javax.net.ssl.trustStore")) {
+      sslContextFactory.setTrustStorePath(System.getProperty("javax.net.ssl.trustStore"));
+    }
+    if (null != System.getProperty("javax.net.ssl.trustStorePassword")) {
+      sslContextFactory.setTrustStorePassword(System.getProperty("javax.net.ssl.trustStorePassword"));
+    }
+    if (null != System.getProperty("javax.net.ssl.trustStoreType")) {
+      sslContextFactory.setTrustStoreType(System.getProperty("javax.net.ssl.trustStoreType"));
+    }
+
+    sslContextFactory.setEndpointIdentificationAlgorithm(System.getProperty("solr.jetty.ssl.verifyClientHostName"));
+
+    return sslContextFactory;
+  }
+
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttpClientBuilderFactory.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttpClientBuilderFactory.java
new file mode 100644
index 0000000..83a9bd9
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedHttpClientBuilderFactory.java
@@ -0,0 +1,19 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import java.io.Closeable;
+
+import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
+
+public interface ModifiedHttpClientBuilderFactory extends Closeable {
+
+  /**
+   * This method configures the {@linkplain SolrHttpClientBuilder} by overriding the configuration of passed SolrHttpClientBuilder or as a new instance.
+   *
+   * @param builder The instance of the {@linkplain SolrHttpClientBuilder} which should by configured (optional).
+   * @return the {@linkplain SolrHttpClientBuilder}
+   */
+  public SolrHttpClientBuilder getHttpClientBuilder(SolrHttpClientBuilder builder);
+
+  public default void setup(final ModifiedHttp2SolrClient client) {
+  }
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedKrb5HttpClientBuilder.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedKrb5HttpClientBuilder.java
new file mode 100644
index 0000000..deb4dca
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedKrb5HttpClientBuilder.java
@@ -0,0 +1,225 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.file.Paths;
+import java.security.Principal;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpEntityEnclosingRequest;
+import org.apache.http.HttpRequestInterceptor;
+import org.apache.http.auth.AuthSchemeProvider;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.Credentials;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.client.config.AuthSchemes;
+import org.apache.http.config.Lookup;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.cookie.CookieSpecProvider;
+import org.apache.http.entity.BufferedHttpEntity;
+import org.apache.http.impl.auth.SPNegoSchemeFactory;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
+import org.apache.solr.client.solrj.impl.SolrPortAwareCookieSpecFactory;
+import org.eclipse.jetty.client.HttpAuthenticationStore;
+import org.eclipse.jetty.client.WWWAuthenticationProtocolHandler;
+import org.eclipse.jetty.client.util.SPNEGOAuthentication;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ModifiedKrb5HttpClientBuilder implements ModifiedHttpClientBuilderFactory {
+
+  public static final String LOGIN_CONFIG_PROP = "java.security.auth.login.config";
+  private static final String SPNEGO_OID = "1.3.6.1.5.5.2";
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static Configuration jaasConfig = new SolrJaasConfiguration();
+
+  public ModifiedKrb5HttpClientBuilder() {
+  }
+
+  /**
+   * The jaasConfig is static, which makes it problematic for testing in the same jvm. Call this function to regenerate the static config (this is not thread safe). Note: only used for tests
+   */
+  public static void regenerateJaasConfiguration() {
+    jaasConfig = new SolrJaasConfiguration();
+  }
+
+  public SolrHttpClientBuilder getBuilder() {
+    return getBuilder(HttpClientUtil.getHttpClientBuilder());
+  }
+
+  @Override
+  public void close() {
+    HttpClientUtil.removeRequestInterceptor(bufferedEntityInterceptor);
+  }
+
+  @Override
+  public SolrHttpClientBuilder getHttpClientBuilder(final SolrHttpClientBuilder builder) {
+    return builder == null ? getBuilder() : getBuilder(builder);
+  }
+
+  private SPNEGOAuthentication createSPNEGOAuthentication() {
+    final SPNEGOAuthentication authentication = new SPNEGOAuthentication(null) {
+
+      @Override
+      public boolean matches(final String type, final URI uri, final String realm) {
+        return this.getType().equals(type);
+      }
+    };
+    final String clientAppName = System.getProperty("solr.kerberos.jaas.appname", "Client");
+    final AppConfigurationEntry[] entries = jaasConfig.getAppConfigurationEntry(clientAppName);
+    if (entries == null) {
+      log.warn("Could not find login configuration entry for {}. SPNego authentication may not be successful.", clientAppName);
+      return authentication;
+    }
+    if (entries.length != 1) {
+      log.warn("Multiple login modules are specified in the configuration file");
+      return authentication;
+    }
+
+    final Map<String, ?> options = entries[0].getOptions();
+    setAuthenticationOptions(authentication, options, (String) options.get("principal"));
+    return authentication;
+  }
+
+  static void setAuthenticationOptions(final SPNEGOAuthentication authentication, final Map<String, ?> options, final String username) {
+    final String keyTab = (String) options.get("keyTab");
+    if (keyTab != null) {
+      authentication.setUserKeyTabPath(Paths.get(keyTab));
+    }
+    authentication.setServiceName("HTTP");
+    authentication.setUserName(username);
+    if ("true".equalsIgnoreCase((String) options.get("useTicketCache"))) {
+      authentication.setUseTicketCache(true);
+      final String ticketCachePath = (String) options.get("ticketCache");
+      if (ticketCachePath != null) {
+        authentication.setTicketCachePath(Paths.get(ticketCachePath));
+      }
+      authentication.setRenewTGT("true".equalsIgnoreCase((String) options.get("renewTGT")));
+    }
+  }
+
+  @Override
+  public void setup(final ModifiedHttp2SolrClient http2Client) {
+    final HttpAuthenticationStore authenticationStore = new HttpAuthenticationStore();
+    authenticationStore.addAuthentication(createSPNEGOAuthentication());
+    http2Client.getHttpClient().setAuthenticationStore(authenticationStore);
+    http2Client.getProtocolHandlers().put(new WWWAuthenticationProtocolHandler(http2Client.getHttpClient()));
+  }
+
+  public SolrHttpClientBuilder getBuilder(final SolrHttpClientBuilder builder) {
+    if (System.getProperty(LOGIN_CONFIG_PROP) != null) {
+      final String configValue = System.getProperty(LOGIN_CONFIG_PROP);
+
+      if (configValue != null) {
+        log.info("Setting up SPNego auth with config: {}", configValue);
+        final String useSubjectCredsProp = "javax.security.auth.useSubjectCredsOnly";
+        final String useSubjectCredsVal = System.getProperty(useSubjectCredsProp);
+
+        // "javax.security.auth.useSubjectCredsOnly" should be false so that the underlying
+        // authentication mechanism can load the credentials from the JAAS configuration.
+        if (useSubjectCredsVal == null) {
+          System.setProperty(useSubjectCredsProp, "false");
+        } else if (!useSubjectCredsVal.toLowerCase(Locale.ROOT).equals("false")) {
+          // Don't overwrite the prop value if it's already been written to something else,
+          // but log because it is likely the Credentials won't be loaded correctly.
+          log.warn("System Property: {} set to: {} not false.  SPNego authentication may not be successful.", useSubjectCredsProp, useSubjectCredsVal);
+        }
+
+        javax.security.auth.login.Configuration.setConfiguration(jaasConfig);
+        // Enable only SPNEGO authentication scheme.
+
+        builder.setAuthSchemeRegistryProvider(() -> {
+          final Lookup<AuthSchemeProvider> authProviders = RegistryBuilder.<AuthSchemeProvider>create().register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, false)).build();
+          return authProviders;
+        });
+        // Get the credentials from the JAAS configuration rather than here
+        final Credentials useJaasCreds = new Credentials() {
+          @Override
+          public String getPassword() {
+            return null;
+          }
+
+          @Override
+          public Principal getUserPrincipal() {
+            return null;
+          }
+        };
+
+        HttpClientUtil.setCookiePolicy(SolrPortAwareCookieSpecFactory.POLICY_NAME);
+
+        builder.setCookieSpecRegistryProvider(() -> {
+          final SolrPortAwareCookieSpecFactory cookieFactory = new SolrPortAwareCookieSpecFactory();
+
+          final Lookup<CookieSpecProvider> cookieRegistry = RegistryBuilder.<CookieSpecProvider>create().register(SolrPortAwareCookieSpecFactory.POLICY_NAME, cookieFactory).build();
+
+          return cookieRegistry;
+        });
+
+        builder.setDefaultCredentialsProvider(() -> {
+          final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
+          credentialsProvider.setCredentials(AuthScope.ANY, useJaasCreds);
+          return credentialsProvider;
+        });
+        HttpClientUtil.addRequestInterceptor(bufferedEntityInterceptor);
+      }
+    } else {
+      log.warn("{} is configured without specifying system property '{}'", getClass().getName(), LOGIN_CONFIG_PROP);
+    }
+
+    return builder;
+  }
+
+  // Set a buffered entity based request interceptor
+  private final HttpRequestInterceptor bufferedEntityInterceptor = (request, context) -> {
+    if (request instanceof HttpEntityEnclosingRequest) {
+      final HttpEntityEnclosingRequest enclosingRequest = ((HttpEntityEnclosingRequest) request);
+      final HttpEntity requestEntity = enclosingRequest.getEntity();
+      enclosingRequest.setEntity(new BufferedHttpEntity(requestEntity));
+    }
+  };
+
+  public static class SolrJaasConfiguration extends javax.security.auth.login.Configuration {
+
+    private javax.security.auth.login.Configuration baseConfig;
+
+    // the com.sun.security.jgss appNames
+    private final Set<String> initiateAppNames = new HashSet<>(Arrays.asList("com.sun.security.jgss.krb5.initiate", "com.sun.security.jgss.initiate"));
+
+    public SolrJaasConfiguration() {
+      try {
+
+        this.baseConfig = javax.security.auth.login.Configuration.getConfiguration();
+      } catch (final SecurityException e) {
+        this.baseConfig = null;
+      }
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(final String appName) {
+      if (baseConfig == null)
+        return null;
+
+      if (log.isDebugEnabled()) {
+        log.debug("Login prop: {}", System.getProperty(LOGIN_CONFIG_PROP));
+      }
+
+      final String clientAppName = System.getProperty("solr.kerberos.jaas.appname", "Client");
+      if (initiateAppNames.contains(appName)) {
+        log.debug("Using AppConfigurationEntry for appName '{}' instead of: '{}'", clientAppName, appName);
+        return baseConfig.getAppConfigurationEntry(clientAppName);
+      }
+      return baseConfig.getAppConfigurationEntry(appName);
+    }
+  }
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBHttp2SolrClient.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBHttp2SolrClient.java
new file mode 100644
index 0000000..f941560
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBHttp2SolrClient.java
@@ -0,0 +1,186 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.ResponseParser;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.request.IsUpdateRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
+import org.apache.solr.client.solrj.util.AsyncListener;
+import org.apache.solr.client.solrj.util.Cancellable;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.util.NamedList;
+import org.slf4j.MDC;
+
+public class ModifiedLBHttp2SolrClient extends ModifiedLBSolrClient {
+  private static final long serialVersionUID = -1147138830059067321L;
+  private final ModifiedHttp2SolrClient httpClient;
+
+  public ModifiedLBHttp2SolrClient(final ModifiedHttp2SolrClient httpClient, final String... baseSolrUrls) {
+    super(Arrays.asList(baseSolrUrls));
+    this.httpClient = httpClient;
+  }
+
+  @Override
+  protected SolrClient getClient(final String baseUrl) {
+    return httpClient;
+  }
+
+  @Override
+  public void setParser(final ResponseParser parser) {
+    super.setParser(parser);
+    this.httpClient.setParser(parser);
+  }
+
+  @Override
+  public void setRequestWriter(final RequestWriter writer) {
+    super.setRequestWriter(writer);
+    this.httpClient.setRequestWriter(writer);
+  }
+
+  @Override
+  public void setQueryParams(final Set<String> queryParams) {
+    super.setQueryParams(queryParams);
+    this.httpClient.setQueryParams(queryParams);
+  }
+
+  @Override
+  public void addQueryParams(final String queryOnlyParam) {
+    super.addQueryParams(queryOnlyParam);
+    this.httpClient.setQueryParams(getQueryParams());
+  }
+
+  public Cancellable asyncReq(final Req req, final AsyncListener<Rsp> asyncListener) {
+    final Rsp rsp = new Rsp();
+    final boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath());
+    final ServerIterator it = new ServerIterator(req, zombieServers);
+    asyncListener.onStart();
+    final AtomicBoolean cancelled = new AtomicBoolean(false);
+    final AtomicReference<Cancellable> currentCancellable = new AtomicReference<>();
+    final RetryListener retryListener = new RetryListener() {
+
+      @Override
+      public void onSuccess(final Rsp rsp) {
+        asyncListener.onSuccess(rsp);
+      }
+
+      @Override
+      public void onFailure(final Exception e, final boolean retryReq) {
+        if (retryReq) {
+          String url;
+          try {
+            url = it.nextOrError(e);
+          } catch (final SolrServerException ex) {
+            asyncListener.onFailure(e);
+            return;
+          }
+          try {
+            MDC.put("ModifiedLBSolrClient.url", url);
+            synchronized (cancelled) {
+              if (cancelled.get()) {
+                return;
+              }
+              final Cancellable cancellable = doRequest(url, req, rsp, isNonRetryable, it.isServingZombieServer(), this);
+              currentCancellable.set(cancellable);
+            }
+          } finally {
+            MDC.remove("ModifiedLBSolrClient.url");
+          }
+        } else {
+          asyncListener.onFailure(e);
+        }
+      }
+    };
+    try {
+      final Cancellable cancellable = doRequest(it.nextOrError(), req, rsp, isNonRetryable, it.isServingZombieServer(), retryListener);
+      currentCancellable.set(cancellable);
+    } catch (final SolrServerException e) {
+      asyncListener.onFailure(e);
+    }
+    return () -> {
+      synchronized (cancelled) {
+        cancelled.set(true);
+        if (currentCancellable.get() != null) {
+          currentCancellable.get().cancel();
+        }
+      }
+    };
+  }
+
+  private interface RetryListener {
+    void onSuccess(Rsp rsp);
+
+    void onFailure(Exception e, boolean retryReq);
+  }
+
+  private Cancellable doRequest(final String baseUrl, final Req req, final Rsp rsp, final boolean isNonRetryable, final boolean isZombie, final RetryListener listener) {
+    rsp.server = baseUrl;
+    req.getRequest().setBasePath(baseUrl);
+    return ((ModifiedHttp2SolrClient) getClient(baseUrl)).asyncRequest(req.getRequest(), null, new AsyncListener<NamedList<Object>>() {
+      @Override
+      public void onSuccess(final NamedList<Object> result) {
+        rsp.rsp = result;
+        if (isZombie) {
+          zombieServers.remove(baseUrl);
+        }
+        listener.onSuccess(rsp);
+      }
+
+      @Override
+      public void onFailure(final Throwable oe) {
+        try {
+          throw (Exception) oe;
+        } catch (final BaseHttpSolrClient.RemoteExecutionException e) {
+          listener.onFailure(e, false);
+        } catch (final SolrException e) {
+          // we retry on 404 or 403 or 503 or 500
+          // unless it's an update - then we only retry on connect exception
+          if (!isNonRetryable && RETRY_CODES.contains(e.code())) {
+            listener.onFailure((!isZombie) ? addZombie(baseUrl, e) : e, true);
+          } else {
+            // Server is alive but the request was likely malformed or invalid
+            if (isZombie) {
+              zombieServers.remove(baseUrl);
+            }
+            listener.onFailure(e, false);
+          }
+        } catch (final SocketException e) {
+          if (!isNonRetryable || e instanceof ConnectException) {
+            listener.onFailure((!isZombie) ? addZombie(baseUrl, e) : e, true);
+          } else {
+            listener.onFailure(e, false);
+          }
+        } catch (final SocketTimeoutException e) {
+          if (!isNonRetryable) {
+            listener.onFailure((!isZombie) ? addZombie(baseUrl, e) : e, true);
+          } else {
+            listener.onFailure(e, false);
+          }
+        } catch (final SolrServerException e) {
+          final Throwable rootCause = e.getRootCause();
+          if (!isNonRetryable && rootCause instanceof IOException) {
+            listener.onFailure((!isZombie) ? addZombie(baseUrl, e) : e, true);
+          } else if (isNonRetryable && rootCause instanceof ConnectException) {
+            listener.onFailure((!isZombie) ? addZombie(baseUrl, e) : e, true);
+          } else {
+            listener.onFailure(e, false);
+          }
+        } catch (final Exception e) {
+          listener.onFailure(new SolrServerException(e), false);
+        }
+      }
+    });
+  }
+
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBSolrClient.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBSolrClient.java
new file mode 100644
index 0000000..dc8bbf2
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedLBSolrClient.java
@@ -0,0 +1,691 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+
+import java.io.IOException;
+import java.lang.ref.WeakReference;
+import java.net.ConnectException;
+import java.net.MalformedURLException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.solr.client.solrj.ResponseParser;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.request.IsUpdateRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.slf4j.MDC;
+
+public abstract class ModifiedLBSolrClient extends SolrClient {
+
+  // defaults
+  protected static final Set<Integer> RETRY_CODES = new HashSet<>(Arrays.asList(404, 403, 503, 500));
+  private static final int CHECK_INTERVAL = 60 * 1000; // 1 minute between checks
+  private static final int NONSTANDARD_PING_LIMIT = 5; // number of times we'll ping dead servers not in the server list
+
+  // keys to the maps are currently of the form "http://localhost:8983/solr"
+  // which should be equivalent to HttpSolrServer.getBaseURL()
+  private final Map<String, ServerWrapper> aliveServers = new LinkedHashMap<>();
+  // access to aliveServers should be synchronized on itself
+
+  protected final Map<String, ServerWrapper> zombieServers = new ConcurrentHashMap<>();
+
+  // changes to aliveServers are reflected in this array, no need to synchronize
+  private volatile ServerWrapper[] aliveServerList = new ServerWrapper[0];
+
+  private volatile ScheduledExecutorService aliveCheckExecutor;
+
+  private int interval = CHECK_INTERVAL;
+  private final AtomicInteger counter = new AtomicInteger(-1);
+
+  private static final SolrQuery solrQuery = new SolrQuery("*:*");
+  protected volatile ResponseParser parser;
+  protected volatile RequestWriter requestWriter;
+
+  protected Set<String> queryParams = new HashSet<>();
+
+  static {
+    solrQuery.setRows(0);
+    /**
+     * Default sort (if we don't supply a sort) is by score and since we request 0 rows any sorting and scoring is not necessary. SolrQuery.DOCID schema-independently specifies a non-scoring sort.
+     * <code>_docid_ asc</code> sort is efficient, <code>_docid_ desc</code> sort is not, so choose ascending DOCID sort.
+     */
+    solrQuery.setSort(SolrQuery.DOCID, SolrQuery.ORDER.asc);
+    // not a top-level request, we are interested only in the server being sent to i.e. it need not
+    // distribute our request to further servers
+    solrQuery.setDistrib(false);
+  }
+
+  protected static class ServerWrapper {
+    final String baseUrl;
+
+    // "standard" servers are used by default. They normally live in the alive list
+    // and move to the zombie list when unavailable. When they become available again,
+    // they move back to the alive list.
+    boolean standard = true;
+
+    int failedPings = 0;
+
+    ServerWrapper(final String baseUrl) {
+      this.baseUrl = baseUrl;
+    }
+
+    public String getBaseUrl() {
+      return baseUrl;
+    }
+
+    @Override
+    public String toString() {
+      return baseUrl;
+    }
+
+    @Override
+    public int hashCode() {
+      return baseUrl.hashCode();
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+      if (this == obj)
+        return true;
+      if (!(obj instanceof ServerWrapper))
+        return false;
+      return baseUrl.equals(((ServerWrapper) obj).baseUrl);
+    }
+  }
+
+  protected static class ServerIterator {
+    String serverStr;
+    List<String> skipped;
+    int numServersTried;
+    Iterator<String> it;
+    Iterator<String> skippedIt;
+    String exceptionMessage;
+    long timeAllowedNano;
+    long timeOutTime;
+
+    final Map<String, ServerWrapper> zombieServers;
+    final Req req;
+
+    public ServerIterator(final Req req, final Map<String, ServerWrapper> zombieServers) {
+      this.it = req.getServers().iterator();
+      this.req = req;
+      this.zombieServers = zombieServers;
+      this.timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
+      this.timeOutTime = System.nanoTime() + timeAllowedNano;
+      fetchNext();
+    }
+
+    public synchronized boolean hasNext() {
+      return serverStr != null;
+    }
+
+    private void fetchNext() {
+      serverStr = null;
+      if (req.numServersToTry != null && numServersTried > req.numServersToTry) {
+        exceptionMessage = "Time allowed to handle this request exceeded";
+        return;
+      }
+
+      while (it.hasNext()) {
+        serverStr = it.next();
+        serverStr = normalize(serverStr);
+        // if the server is currently a zombie, just skip to the next one
+        final ServerWrapper wrapper = zombieServers.get(serverStr);
+        if (wrapper != null) {
+          final int numDeadServersToTry = req.getNumDeadServersToTry();
+          if (numDeadServersToTry > 0) {
+            if (skipped == null) {
+              skipped = new ArrayList<>(numDeadServersToTry);
+              skipped.add(wrapper.getBaseUrl());
+            } else if (skipped.size() < numDeadServersToTry) {
+              skipped.add(wrapper.getBaseUrl());
+            }
+          }
+          continue;
+        }
+
+        break;
+      }
+      if (serverStr == null && skipped != null) {
+        if (skippedIt == null) {
+          skippedIt = skipped.iterator();
+        }
+        if (skippedIt.hasNext()) {
+          serverStr = skippedIt.next();
+        }
+      }
+    }
+
+    boolean isServingZombieServer() {
+      return skippedIt != null;
+    }
+
+    public synchronized String nextOrError() throws SolrServerException {
+      return nextOrError(null);
+    }
+
+    public synchronized String nextOrError(final Exception previousEx) throws SolrServerException {
+      String suffix = "";
+      if (previousEx == null) {
+        suffix = ":" + zombieServers.keySet();
+      }
+      // Skipping check time exceeded for the first request
+      if (numServersTried > 0 && isTimeExceeded(timeAllowedNano, timeOutTime)) {
+        throw new SolrServerException("Time allowed to handle this request exceeded" + suffix, previousEx);
+      }
+      if (serverStr == null) {
+        throw new SolrServerException("No live SolrServers available to handle this request" + suffix, previousEx);
+      }
+      numServersTried++;
+      if (req.getNumServersToTry() != null && numServersTried > req.getNumServersToTry()) {
+        throw new SolrServerException("No live SolrServers available to handle this request:" + " numServersTried=" + numServersTried + " numServersToTry=" + req.getNumServersToTry() + suffix,
+            previousEx);
+      }
+      final String rs = serverStr;
+      fetchNext();
+      return rs;
+    }
+  }
+
+  // Req should be parameterized too, but that touches a whole lotta code
+  public static class Req {
+    protected SolrRequest<?> request;
+    protected List<String> servers;
+    protected int numDeadServersToTry;
+    private final Integer numServersToTry;
+
+    public Req(final SolrRequest<?> request, final List<String> servers) {
+      this(request, servers, null);
+    }
+
+    public Req(final SolrRequest<?> request, final List<String> servers, final Integer numServersToTry) {
+      this.request = request;
+      this.servers = servers;
+      this.numDeadServersToTry = servers.size();
+      this.numServersToTry = numServersToTry;
+    }
+
+    public SolrRequest<?> getRequest() {
+      return request;
+    }
+
+    public List<String> getServers() {
+      return servers;
+    }
+
+    /**
+     * @return the number of dead servers to try if there are no live servers left
+     */
+    public int getNumDeadServersToTry() {
+      return numDeadServersToTry;
+    }
+
+    /**
+     * @param numDeadServersToTry The number of dead servers to try if there are no live servers left. Defaults to the number of servers in this request.
+     */
+    public void setNumDeadServersToTry(final int numDeadServersToTry) {
+      this.numDeadServersToTry = numDeadServersToTry;
+    }
+
+    public Integer getNumServersToTry() {
+      return numServersToTry;
+    }
+  }
+
+  public static class Rsp {
+    protected String server;
+    protected NamedList<Object> rsp;
+
+    /** The response from the server */
+    public NamedList<Object> getResponse() {
+      return rsp;
+    }
+
+    /** The server that returned the response */
+    public String getServer() {
+      return server;
+    }
+  }
+
+  public ModifiedLBSolrClient(final List<String> baseSolrUrls) {
+    if (!baseSolrUrls.isEmpty()) {
+      for (final String s : baseSolrUrls) {
+        final ServerWrapper wrapper = createServerWrapper(s);
+        aliveServers.put(wrapper.getBaseUrl(), wrapper);
+      }
+      updateAliveList();
+    }
+  }
+
+  protected void updateAliveList() {
+    synchronized (aliveServers) {
+      aliveServerList = aliveServers.values().toArray(new ServerWrapper[0]);
+    }
+  }
+
+  protected ServerWrapper createServerWrapper(final String baseUrl) {
+    return new ServerWrapper(baseUrl);
+  }
+
+  public Set<String> getQueryParams() {
+    return queryParams;
+  }
+
+  /**
+   * Expert Method.
+   *
+   * @param queryParams set of param keys to only send via the query string
+   */
+  public void setQueryParams(final Set<String> queryParams) {
+    this.queryParams = queryParams;
+  }
+
+  public void addQueryParams(final String queryOnlyParam) {
+    this.queryParams.add(queryOnlyParam);
+  }
+
+  public static String normalize(String server) {
+    if (server.endsWith("/"))
+      server = server.substring(0, server.length() - 1);
+    return server;
+  }
+
+  /**
+   * Tries to query a live server from the list provided in Req. Servers in the dead pool are skipped. If a request fails due to an IOException, the server is moved to the dead pool for a certain
+   * period of time, or until a test request on that server succeeds.
+   *
+   * <p>
+   * Servers are queried in the exact order given (except servers currently in the dead pool are skipped). If no live servers from the provided list remain to be tried, a number of previously skipped
+   * dead servers will be tried. Req.getNumDeadServersToTry() controls how many dead servers will be tried.
+   *
+   * <p>
+   * If no live servers are found a SolrServerException is thrown.
+   *
+   * @param req contains both the request as well as the list of servers to query
+   * @return the result of the request
+   * @throws IOException If there is a low-level I/O error.
+   */
+  public Rsp request(final Req req) throws SolrServerException, IOException {
+    final Rsp rsp = new Rsp();
+    Exception ex = null;
+    final boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath());
+    final ServerIterator serverIterator = new ServerIterator(req, zombieServers);
+    String serverStr;
+    while ((serverStr = serverIterator.nextOrError(ex)) != null) {
+      try {
+        MDC.put("ModifiedLBSolrClient.url", serverStr);
+        ex = doRequest(serverStr, req, rsp, isNonRetryable, serverIterator.isServingZombieServer());
+        if (ex == null) {
+          return rsp; // SUCCESS
+        }
+      } finally {
+        MDC.remove("ModifiedLBSolrClient.url");
+      }
+    }
+    throw new SolrServerException("No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
+  }
+
+  /**
+   * @return time allowed in nanos, returns -1 if no time_allowed is specified.
+   */
+  private static long getTimeAllowedInNanos(final SolrRequest<?> req) {
+    final SolrParams reqParams = req.getParams();
+    return reqParams == null ? -1 : TimeUnit.NANOSECONDS.convert(reqParams.getInt(CommonParams.TIME_ALLOWED, -1), TimeUnit.MILLISECONDS);
+  }
+
+  private static boolean isTimeExceeded(final long timeAllowedNano, final long timeOutTime) {
+    return timeAllowedNano > 0 && System.nanoTime() > timeOutTime;
+  }
+
+  protected Exception doRequest(final String baseUrl, final Req req, final Rsp rsp, final boolean isNonRetryable, final boolean isZombie) throws SolrServerException, IOException {
+    Exception ex = null;
+    try {
+      rsp.server = baseUrl;
+      req.getRequest().setBasePath(baseUrl);
+      rsp.rsp = getClient(baseUrl).request(req.getRequest(), (String) null);
+      if (isZombie) {
+        zombieServers.remove(baseUrl);
+      }
+    } catch (final BaseHttpSolrClient.RemoteExecutionException e) {
+      throw e;
+    } catch (final SolrException e) {
+      // we retry on 404 or 403 or 503 or 500
+      // unless it's an update - then we only retry on connect exception
+      if (!isNonRetryable && RETRY_CODES.contains(e.code())) {
+        ex = (!isZombie) ? addZombie(baseUrl, e) : e;
+      } else {
+        // Server is alive but the request was likely malformed or invalid
+        if (isZombie) {
+          zombieServers.remove(baseUrl);
+        }
+        throw e;
+      }
+    } catch (final SocketException e) {
+      if (!isNonRetryable || e instanceof ConnectException) {
+        ex = (!isZombie) ? addZombie(baseUrl, e) : e;
+      } else {
+        throw e;
+      }
+    } catch (final SocketTimeoutException e) {
+      if (!isNonRetryable) {
+        ex = (!isZombie) ? addZombie(baseUrl, e) : e;
+      } else {
+        throw e;
+      }
+    } catch (final SolrServerException e) {
+      final Throwable rootCause = e.getRootCause();
+      if (!isNonRetryable && rootCause instanceof IOException) {
+        ex = (!isZombie) ? addZombie(baseUrl, e) : e;
+      } else if (isNonRetryable && rootCause instanceof ConnectException) {
+        ex = (!isZombie) ? addZombie(baseUrl, e) : e;
+      } else {
+        throw e;
+      }
+    } catch (final Exception e) {
+      throw new SolrServerException(e);
+    }
+
+    return ex;
+  }
+
+  protected abstract SolrClient getClient(String baseUrl);
+
+  protected Exception addZombie(final String serverStr, final Exception e) {
+    final ServerWrapper wrapper = createServerWrapper(serverStr);
+    wrapper.standard = false;
+    zombieServers.put(serverStr, wrapper);
+    startAliveCheckExecutor();
+    return e;
+  }
+
+  /**
+   * LBHttpSolrServer keeps pinging the dead servers at fixed interval to find if it is alive. Use this to set that interval
+   *
+   * @param interval time in milliseconds
+   */
+  public void setAliveCheckInterval(final int interval) {
+    if (interval <= 0) {
+      throw new IllegalArgumentException("Alive check interval must be " + "positive, specified value = " + interval);
+    }
+    this.interval = interval;
+  }
+
+  private void startAliveCheckExecutor() {
+    // double-checked locking, but it's OK because we don't *do* anything with aliveCheckExecutor
+    // if it's not null.
+    if (aliveCheckExecutor == null) {
+      synchronized (this) {
+        if (aliveCheckExecutor == null) {
+          aliveCheckExecutor = Executors.newSingleThreadScheduledExecutor(new SolrNamedThreadFactory("aliveCheckExecutor"));
+          aliveCheckExecutor.scheduleAtFixedRate(getAliveCheckRunner(new WeakReference<>(this)), this.interval, this.interval, TimeUnit.MILLISECONDS);
+        }
+      }
+    }
+  }
+
+  private static Runnable getAliveCheckRunner(final WeakReference<ModifiedLBSolrClient> lbRef) {
+    return () -> {
+      final ModifiedLBSolrClient lb = lbRef.get();
+      if (lb != null && lb.zombieServers != null) {
+        for (final Object zombieServer : lb.zombieServers.values()) {
+          lb.checkAZombieServer((ServerWrapper) zombieServer);
+        }
+      }
+    };
+  }
+
+  public ResponseParser getParser() {
+    return parser;
+  }
+
+  /**
+   * Changes the {@link ResponseParser} that will be used for the internal SolrServer objects.
+   *
+   * @param parser Default Response Parser chosen to parse the response if the parser were not specified as part of the request.
+   * @see org.apache.solr.client.solrj.SolrRequest#getResponseParser()
+   */
+  public void setParser(final ResponseParser parser) {
+    this.parser = parser;
+  }
+
+  /**
+   * Changes the {@link RequestWriter} that will be used for the internal SolrServer objects.
+   *
+   * @param requestWriter Default RequestWriter, used to encode requests sent to the server.
+   */
+  public void setRequestWriter(final RequestWriter requestWriter) {
+    this.requestWriter = requestWriter;
+  }
+
+  public RequestWriter getRequestWriter() {
+    return requestWriter;
+  }
+
+  private void checkAZombieServer(final ServerWrapper zombieServer) {
+    try {
+      final QueryRequest queryRequest = new QueryRequest(solrQuery);
+      queryRequest.setBasePath(zombieServer.baseUrl);
+      final QueryResponse resp = queryRequest.process(getClient(zombieServer.getBaseUrl()));
+      if (resp.getStatus() == 0) {
+        // server has come back up.
+        // make sure to remove from zombies before adding to alive to avoid a race condition
+        // where another thread could mark it down, move it back to zombie, and then we delete
+        // from zombie and lose it forever.
+        final ServerWrapper wrapper = zombieServers.remove(zombieServer.getBaseUrl());
+        if (wrapper != null) {
+          wrapper.failedPings = 0;
+          if (wrapper.standard) {
+            addToAlive(wrapper);
+          }
+        } else {
+          // something else already moved the server from zombie to alive
+        }
+      }
+    } catch (final Exception e) {
+      // Expected. The server is still down.
+      zombieServer.failedPings++;
+
+      // If the server doesn't belong in the standard set belonging to this load balancer
+      // then simply drop it after a certain number of failed pings.
+      if (!zombieServer.standard && zombieServer.failedPings >= NONSTANDARD_PING_LIMIT) {
+        zombieServers.remove(zombieServer.getBaseUrl());
+      }
+    }
+  }
+
+  private ServerWrapper removeFromAlive(final String key) {
+    synchronized (aliveServers) {
+      final ServerWrapper wrapper = aliveServers.remove(key);
+      if (wrapper != null)
+        updateAliveList();
+      return wrapper;
+    }
+  }
+
+  private void addToAlive(final ServerWrapper wrapper) {
+    synchronized (aliveServers) {
+      final ServerWrapper prev = aliveServers.put(wrapper.getBaseUrl(), wrapper);
+      // TODO: warn if there was a previous entry?
+      updateAliveList();
+    }
+  }
+
+  public void addSolrServer(final String server) throws MalformedURLException {
+    addToAlive(createServerWrapper(server));
+  }
+
+  public String removeSolrServer(String server) {
+    try {
+      server = new URL(server).toExternalForm();
+    } catch (final MalformedURLException e) {
+      throw new RuntimeException(e);
+    }
+    if (server.endsWith("/")) {
+      server = server.substring(0, server.length() - 1);
+    }
+
+    // there is a small race condition here - if the server is in the process of being moved between
+    // lists, we could fail to remove it.
+    removeFromAlive(server);
+    zombieServers.remove(server);
+    return null;
+  }
+
+  /**
+   * Tries to query a live server. A SolrServerException is thrown if all servers are dead. If the request failed due to IOException then the live server is moved to dead pool and the request is
+   * retried on another live server. After live servers are exhausted, any servers previously marked as dead will be tried before failing the request.
+   *
+   * @param request the SolrRequest.
+   * @return response
+   * @throws IOException If there is a low-level I/O error.
+   */
+  @Override
+  public NamedList<Object> request(final SolrRequest<?> request, final String collection) throws SolrServerException, IOException {
+    return request(request, collection, null);
+  }
+
+  public NamedList<Object> request(final SolrRequest<?> request, final String collection, final Integer numServersToTry) throws SolrServerException, IOException {
+    Exception ex = null;
+    final ServerWrapper[] serverList = aliveServerList;
+
+    final int maxTries = (numServersToTry == null ? serverList.length : numServersToTry.intValue());
+    int numServersTried = 0;
+    Map<String, ServerWrapper> justFailed = null;
+
+    boolean timeAllowedExceeded = false;
+    final long timeAllowedNano = getTimeAllowedInNanos(request);
+    final long timeOutTime = System.nanoTime() + timeAllowedNano;
+    for (int attempts = 0; attempts < maxTries; attempts++) {
+      if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) {
+        break;
+      }
+
+      final ServerWrapper wrapper = pickServer(serverList, request);
+      try {
+        ++numServersTried;
+        request.setBasePath(wrapper.baseUrl);
+        return getClient(wrapper.getBaseUrl()).request(request, collection);
+      } catch (final SolrException e) {
+        // Server is alive but the request was malformed or invalid
+        throw e;
+      } catch (final SolrServerException e) {
+        if (e.getRootCause() instanceof IOException) {
+          ex = e;
+          moveAliveToDead(wrapper);
+          if (justFailed == null)
+            justFailed = new HashMap<>();
+          justFailed.put(wrapper.getBaseUrl(), wrapper);
+        } else {
+          throw e;
+        }
+      } catch (final Exception e) {
+        throw new SolrServerException(e);
+      }
+    }
+
+    // try other standard servers that we didn't try just now
+    for (final ServerWrapper wrapper : zombieServers.values()) {
+      if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) {
+        break;
+      }
+
+      if (wrapper.standard == false || justFailed != null && justFailed.containsKey(wrapper.getBaseUrl()))
+        continue;
+      try {
+        ++numServersTried;
+        request.setBasePath(wrapper.baseUrl);
+        final NamedList<Object> rsp = getClient(wrapper.baseUrl).request(request, collection);
+        // remove from zombie list *before* adding to alive to avoid a race that could lose a server
+        zombieServers.remove(wrapper.getBaseUrl());
+        addToAlive(wrapper);
+        return rsp;
+      } catch (final SolrException e) {
+        // Server is alive but the request was malformed or invalid
+        throw e;
+      } catch (final SolrServerException e) {
+        if (e.getRootCause() instanceof IOException) {
+          ex = e;
+          // still dead
+        } else {
+          throw e;
+        }
+      } catch (final Exception e) {
+        throw new SolrServerException(e);
+      }
+    }
+
+    final String solrServerExceptionMessage;
+    if (timeAllowedExceeded) {
+      solrServerExceptionMessage = "Time allowed to handle this request exceeded";
+    } else {
+      if (numServersToTry != null && numServersTried > numServersToTry.intValue()) {
+        solrServerExceptionMessage = "No live SolrServers available to handle this request:" + " numServersTried=" + numServersTried + " numServersToTry=" + numServersToTry.intValue();
+      } else {
+        solrServerExceptionMessage = "No live SolrServers available to handle this request";
+      }
+    }
+    if (ex == null) {
+      throw new SolrServerException(solrServerExceptionMessage);
+    } else {
+      throw new SolrServerException(solrServerExceptionMessage, ex);
+    }
+  }
+
+  /**
+   * Pick a server from list to execute request. By default servers are picked in round-robin manner, custom classes can override this method for more advance logic
+   *
+   * @param aliveServerList list of currently alive servers
+   * @param request         the request will be sent to the picked server
+   * @return the picked server
+   */
+  protected ServerWrapper pickServer(final ServerWrapper[] aliveServerList, final SolrRequest<?> request) {
+    final int count = counter.incrementAndGet() & Integer.MAX_VALUE;
+    return aliveServerList[count % aliveServerList.length];
+  }
+
+  private void moveAliveToDead(ServerWrapper wrapper) {
+    wrapper = removeFromAlive(wrapper.getBaseUrl());
+    if (wrapper == null)
+      return; // another thread already detected the failure and removed it
+    zombieServers.put(wrapper.getBaseUrl(), wrapper);
+    startAliveCheckExecutor();
+  }
+
+  @Override
+  public void close() {
+    synchronized (this) {
+      if (aliveCheckExecutor != null) {
+        aliveCheckExecutor.shutdownNow();
+        ExecutorUtil.shutdownAndAwaitTermination(aliveCheckExecutor);
+      }
+    }
+  }
+
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedPreemptiveBasicAuthClientBuilderFactory.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedPreemptiveBasicAuthClientBuilderFactory.java
new file mode 100644
index 0000000..2a1f82f
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedPreemptiveBasicAuthClientBuilderFactory.java
@@ -0,0 +1,137 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.impl.auth.BasicScheme;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.PreemptiveAuth;
+import org.apache.solr.client.solrj.impl.PreemptiveBasicAuthClientBuilderFactory;
+import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
+import org.apache.solr.client.solrj.util.SolrBasicAuthentication;
+import org.apache.solr.common.StringUtils;
+import org.apache.solr.common.params.MapSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.StrUtils;
+import org.eclipse.jetty.client.HttpAuthenticationStore;
+import org.eclipse.jetty.client.ProxyAuthenticationProtocolHandler;
+import org.eclipse.jetty.client.WWWAuthenticationProtocolHandler;
+
+public class ModifiedPreemptiveBasicAuthClientBuilderFactory implements ModifiedHttpClientBuilderFactory {
+  /**
+   * A system property used to specify a properties file containing default parameters used for creating a HTTP client. This is specifically useful for configuring the HTTP basic auth credentials
+   * (i.e. username/password). The name of the property must match the relevant Solr config property name.
+   */
+  public static final String SYS_PROP_HTTP_CLIENT_CONFIG = "solr.httpclient.config";
+
+  /**
+   * A system property to configure the Basic auth credentials via a java system property. Since this will expose the password on the command-line, it is not very secure. But this mechanism is added
+   * for backwards compatibility.
+   */
+  public static final String SYS_PROP_BASIC_AUTH_CREDENTIALS = "basicauth";
+
+  private static PreemptiveAuth requestInterceptor = new PreemptiveAuth(new BasicScheme());
+
+  private static CredentialsResolver CREDENTIAL_RESOLVER = new CredentialsResolver();
+
+  /**
+   * This method enables configuring system wide defaults (apart from using a config file based approach).
+   */
+  public static void setDefaultSolrParams(final SolrParams params) {
+    CREDENTIAL_RESOLVER.defaultParams = params;
+  }
+
+  @Override
+  public void close() throws IOException {
+    HttpClientUtil.removeRequestInterceptor(requestInterceptor);
+  }
+
+  @Override
+  public void setup(final ModifiedHttp2SolrClient client) {
+    final String basicAuthUser = CREDENTIAL_RESOLVER.defaultParams.get(HttpClientUtil.PROP_BASIC_AUTH_USER);
+    final String basicAuthPass = CREDENTIAL_RESOLVER.defaultParams.get(HttpClientUtil.PROP_BASIC_AUTH_PASS);
+    this.setup(client, basicAuthUser, basicAuthPass);
+  }
+
+  public void setup(final ModifiedHttp2SolrClient client, final String basicAuthUser, final String basicAuthPass) {
+    if (basicAuthUser == null || basicAuthPass == null) {
+      throw new IllegalArgumentException("username & password must be specified with " + getClass().getName());
+    }
+
+    final HttpAuthenticationStore authenticationStore = new HttpAuthenticationStore();
+    authenticationStore.addAuthentication(new SolrBasicAuthentication(basicAuthUser, basicAuthPass));
+    client.getHttpClient().setAuthenticationStore(authenticationStore);
+    client.getProtocolHandlers().put(new WWWAuthenticationProtocolHandler(client.getHttpClient()));
+    client.getProtocolHandlers().put(new ProxyAuthenticationProtocolHandler(client.getHttpClient()));
+  }
+
+  @Override
+  public SolrHttpClientBuilder getHttpClientBuilder(final SolrHttpClientBuilder builder) {
+    final String basicAuthUser = CREDENTIAL_RESOLVER.defaultParams.get(HttpClientUtil.PROP_BASIC_AUTH_USER);
+    final String basicAuthPass = CREDENTIAL_RESOLVER.defaultParams.get(HttpClientUtil.PROP_BASIC_AUTH_PASS);
+    if (basicAuthUser == null || basicAuthPass == null) {
+      throw new IllegalArgumentException("username & password must be specified with " + getClass().getName());
+    }
+
+    return initHttpClientBuilder(builder == null ? SolrHttpClientBuilder.create() : builder, basicAuthUser, basicAuthPass);
+  }
+
+  private SolrHttpClientBuilder initHttpClientBuilder(final SolrHttpClientBuilder builder, final String basicAuthUser, final String basicAuthPass) {
+    builder.setDefaultCredentialsProvider(() -> {
+      final CredentialsProvider credsProvider = new BasicCredentialsProvider();
+      credsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(basicAuthUser, basicAuthPass));
+      return credsProvider;
+    });
+
+    HttpClientUtil.addRequestInterceptor(requestInterceptor);
+    return builder;
+  }
+
+  static class CredentialsResolver {
+
+    public volatile SolrParams defaultParams;
+
+    public CredentialsResolver() {
+      final String credentials = System.getProperty(PreemptiveBasicAuthClientBuilderFactory.SYS_PROP_BASIC_AUTH_CREDENTIALS);
+      final String configFile = System.getProperty(PreemptiveBasicAuthClientBuilderFactory.SYS_PROP_HTTP_CLIENT_CONFIG);
+
+      if (credentials != null && configFile != null) {
+        throw new IllegalArgumentException("Basic authentication credentials passed via a configuration file" + " as well as java system property. Please choose one mechanism!");
+      }
+
+      if (credentials != null) {
+        final List<String> ss = StrUtils.splitSmart(credentials, ':');
+        if (ss.size() != 2 || StringUtils.isEmpty(ss.get(0)) || StringUtils.isEmpty(ss.get(1))) {
+          throw new IllegalArgumentException("Invalid Authentication credentials: Please provide 'basicauth' in the 'user:password' format");
+        }
+        final Map<String, String> paramMap = new HashMap<>();
+        paramMap.put(HttpClientUtil.PROP_BASIC_AUTH_USER, ss.get(0));
+        paramMap.put(HttpClientUtil.PROP_BASIC_AUTH_PASS, ss.get(1));
+        defaultParams = new MapSolrParams(paramMap);
+      } else if (configFile != null) {
+        final Properties defaultProps = new Properties();
+        try (BufferedReader reader = Files.newBufferedReader(Paths.get(configFile), StandardCharsets.UTF_8)) {
+          defaultProps.load(reader);
+        } catch (final IOException e) {
+          throw new IllegalArgumentException("Unable to read credentials file at " + configFile, e);
+        }
+        final Map<String, String> map = new HashMap<>();
+        defaultProps.forEach((k, v) -> map.put((String) k, (String) v));
+        defaultParams = new MapSolrParams(map);
+      } else {
+        defaultParams = null;
+      }
+    }
+  }
+}
diff --git a/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedUpdateRequest.java b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedUpdateRequest.java
new file mode 100644
index 0000000..39f106b
--- /dev/null
+++ b/connectors/solr/connector/src/main/java/org/apache/manifoldcf/agents/output/solr/ModifiedUpdateRequest.java
@@ -0,0 +1,520 @@
+package org.apache.manifoldcf.agents.output.solr;
+
+import static org.apache.solr.common.params.ShardParams._ROUTE_;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.XML;
+
+public class ModifiedUpdateRequest extends AbstractUpdateRequest {
+
+  public static final String REPFACT = "rf";
+  public static final String VER = "ver";
+  public static final String OVERWRITE = "ow";
+  public static final String COMMIT_WITHIN = "cw";
+  private Map<SolrInputDocument, Map<String, Object>> documents = null;
+  private Iterator<SolrInputDocument> docIterator = null;
+  private Map<String, Map<String, Object>> deleteById = null;
+  private List<String> deleteQuery = null;
+
+  private boolean isLastDocInBatch = false;
+
+  public ModifiedUpdateRequest() {
+    super(METHOD.POST, "/update");
+  }
+
+  public ModifiedUpdateRequest(final String url) {
+    super(METHOD.POST, url);
+  }
+
+  // ---------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------
+
+  /** clear the pending documents and delete commands */
+  public void clear() {
+    if (documents != null) {
+      documents.clear();
+    }
+    if (deleteById != null) {
+      deleteById.clear();
+    }
+    if (deleteQuery != null) {
+      deleteQuery.clear();
+    }
+  }
+
+  // ---------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------
+
+  /**
+   * Add a SolrInputDocument to this request
+   *
+   * @throws NullPointerException if the document is null
+   */
+  public ModifiedUpdateRequest add(final SolrInputDocument doc) {
+    Objects.requireNonNull(doc, "Cannot add a null SolrInputDocument");
+    if (documents == null) {
+      documents = new LinkedHashMap<>();
+    }
+    documents.put(doc, null);
+    return this;
+  }
+
+  public ModifiedUpdateRequest add(final String... fields) {
+    return add(new SolrInputDocument(fields));
+  }
+
+  /**
+   * Add a SolrInputDocument to this request
+   *
+   * @param doc       the document
+   * @param overwrite true if the document should overwrite existing docs with the same id
+   * @throws NullPointerException if the document is null
+   */
+  public ModifiedUpdateRequest add(final SolrInputDocument doc, final Boolean overwrite) {
+    return add(doc, null, overwrite);
+  }
+
+  /**
+   * Add a SolrInputDocument to this request
+   *
+   * @param doc          the document
+   * @param commitWithin the time horizon by which the document should be committed (in ms)
+   * @throws NullPointerException if the document is null
+   */
+  public ModifiedUpdateRequest add(final SolrInputDocument doc, final Integer commitWithin) {
+    return add(doc, commitWithin, null);
+  }
+
+  /**
+   * Add a SolrInputDocument to this request
+   *
+   * @param doc          the document
+   * @param commitWithin the time horizon by which the document should be committed (in ms)
+   * @param overwrite    true if the document should overwrite existing docs with the same id
+   * @throws NullPointerException if the document is null
+   */
+  public ModifiedUpdateRequest add(final SolrInputDocument doc, final Integer commitWithin, final Boolean overwrite) {
+    Objects.requireNonNull(doc, "Cannot add a null SolrInputDocument");
+    if (documents == null) {
+      documents = new LinkedHashMap<>();
+    }
+    final Map<String, Object> params = new HashMap<>(2);
+    if (commitWithin != null)
+      params.put(COMMIT_WITHIN, commitWithin);
+    if (overwrite != null)
+      params.put(OVERWRITE, overwrite);
+
+    documents.put(doc, params);
+
+    return this;
+  }
+
+  /**
+   * Add a collection of SolrInputDocuments to this request
+   *
+   * @throws NullPointerException if any of the documents in the collection are null
+   */
+  public ModifiedUpdateRequest add(final Collection<SolrInputDocument> docs) {
+    if (documents == null) {
+      documents = new LinkedHashMap<>();
+    }
+    for (final SolrInputDocument doc : docs) {
+      Objects.requireNonNull(doc, "Cannot add a null SolrInputDocument");
+      documents.put(doc, null);
+    }
+    return this;
+  }
+
+  public ModifiedUpdateRequest deleteById(final String id) {
+    if (deleteById == null) {
+      deleteById = new LinkedHashMap<>();
+    }
+    deleteById.put(id, null);
+    return this;
+  }
+
+  public ModifiedUpdateRequest deleteById(final String id, final String route) {
+    return deleteById(id, route, null);
+  }
+
+  public ModifiedUpdateRequest deleteById(final String id, final String route, final Long version) {
+    if (deleteById == null) {
+      deleteById = new LinkedHashMap<>();
+    }
+    final Map<String, Object> params = (route == null && version == null) ? null : new HashMap<>(1);
+    if (version != null)
+      params.put(VER, version);
+    if (route != null)
+      params.put(_ROUTE_, route);
+    deleteById.put(id, params);
+    return this;
+  }
+
+  public ModifiedUpdateRequest deleteById(final List<String> ids) {
+    if (deleteById == null) {
+      deleteById = new LinkedHashMap<>();
+    }
+
+    for (final String id : ids) {
+      deleteById.put(id, null);
+    }
+
+    return this;
+  }
+
+  public ModifiedUpdateRequest deleteById(final String id, final Long version) {
+    return deleteById(id, null, version);
+  }
+
+  public ModifiedUpdateRequest deleteByQuery(final String q) {
+    if (deleteQuery == null) {
+      deleteQuery = new ArrayList<>();
+    }
+    deleteQuery.add(q);
+    return this;
+  }
+
+  public ModifiedUpdateRequest withRoute(final String route) {
+    if (params == null)
+      params = new ModifiableSolrParams();
+    params.set(_ROUTE_, route);
+    return this;
+  }
+
+  public UpdateResponse commit(final SolrClient client, final String collection) throws IOException, SolrServerException {
+    if (params == null)
+      params = new ModifiableSolrParams();
+    params.set(UpdateParams.COMMIT, "true");
+    return process(client, collection);
+  }
+
+  private interface ReqSupplier<T extends ModifiedLBSolrClient.Req> {
+    T get(ModifiedUpdateRequest request, List<String> servers);
+  }
+
+  private <T extends ModifiedLBSolrClient.Req> Map<String, T> getRoutes(final DocRouter router, final DocCollection col, final Map<String, List<String>> urlMap, final ModifiableSolrParams params,
+      final String idField, final ReqSupplier<T> reqSupplier) {
+    if ((documents == null || documents.size() == 0) && (deleteById == null || deleteById.size() == 0)) {
+      return null;
+    }
+
+    final Map<String, T> routes = new HashMap<>();
+    if (documents != null) {
+      final Set<Entry<SolrInputDocument, Map<String, Object>>> entries = documents.entrySet();
+      for (final Entry<SolrInputDocument, Map<String, Object>> entry : entries) {
+        final SolrInputDocument doc = entry.getKey();
+        final Object id = doc.getFieldValue(idField);
+        if (id == null) {
+          return null;
+        }
+        final Slice slice = router.getTargetSlice(id.toString(), doc, null, null, col);
+        if (slice == null) {
+          return null;
+        }
+        final List<String> urls = urlMap.get(slice.getName());
+        if (urls == null) {
+          return null;
+        }
+        final String leaderUrl = urls.get(0);
+        T request = routes.get(leaderUrl);
+        if (request == null) {
+          final ModifiedUpdateRequest updateRequest = new ModifiedUpdateRequest();
+          updateRequest.setMethod(getMethod());
+          updateRequest.setCommitWithin(getCommitWithin());
+          updateRequest.setParams(params);
+          updateRequest.setPath(getPath());
+          updateRequest.setBasicAuthCredentials(getBasicAuthUser(), getBasicAuthPassword());
+          updateRequest.setResponseParser(getResponseParser());
+          request = reqSupplier.get(updateRequest, urls);
+          routes.put(leaderUrl, request);
+        }
+        final ModifiedUpdateRequest urequest = (ModifiedUpdateRequest) request.getRequest();
+        final Map<String, Object> value = entry.getValue();
+        Boolean ow = null;
+        if (value != null) {
+          ow = (Boolean) value.get(OVERWRITE);
+        }
+        if (ow != null) {
+          urequest.add(doc, ow);
+        } else {
+          urequest.add(doc);
+        }
+      }
+    }
+
+    // Route the deleteById's
+
+    if (deleteById != null) {
+
+      final Iterator<Map.Entry<String, Map<String, Object>>> entries = deleteById.entrySet().iterator();
+      while (entries.hasNext()) {
+
+        final Map.Entry<String, Map<String, Object>> entry = entries.next();
+
+        final String deleteId = entry.getKey();
+        final Map<String, Object> map = entry.getValue();
+        Long version = null;
+        String route = null;
+        if (map != null) {
+          version = (Long) map.get(VER);
+          route = (String) map.get(_ROUTE_);
+        }
+        final Slice slice = router.getTargetSlice(deleteId, null, route, null, col);
+        if (slice == null) {
+          return null;
+        }
+        final List<String> urls = urlMap.get(slice.getName());
+        if (urls == null) {
+          return null;
+        }
+        final String leaderUrl = urls.get(0);
+        T request = routes.get(leaderUrl);
+        if (request != null) {
+          final ModifiedUpdateRequest urequest = (ModifiedUpdateRequest) request.getRequest();
+          urequest.deleteById(deleteId, route, version);
+        } else {
+          final ModifiedUpdateRequest urequest = new ModifiedUpdateRequest();
+          urequest.setParams(params);
+          urequest.deleteById(deleteId, route, version);
+          urequest.setCommitWithin(getCommitWithin());
+          urequest.setBasicAuthCredentials(getBasicAuthUser(), getBasicAuthPassword());
+          request = reqSupplier.get(urequest, urls);
+          routes.put(leaderUrl, request);
+        }
+      }
+    }
+
+    return routes;
+  }
+
+  /**
+   * @param router  to route updates with
+   * @param col     DocCollection for the updates
+   * @param urlMap  of the cluster
+   * @param params  params to use
+   * @param idField the id field
+   * @return a Map of urls to requests
+   */
+  public Map<String, ModifiedLBSolrClient.Req> getRoutesToCollection(final DocRouter router, final DocCollection col, final Map<String, List<String>> urlMap, final ModifiableSolrParams params,
+      final String idField) {
+    return getRoutes(router, col, urlMap, params, idField, ModifiedLBSolrClient.Req::new);
+  }
+
+  public void setDocIterator(final Iterator<SolrInputDocument> docIterator) {
+    this.docIterator = docIterator;
+  }
+
+  public void setDeleteQuery(final List<String> deleteQuery) {
+    this.deleteQuery = deleteQuery;
+  }
+
+  // --------------------------------------------------------------------------
+  // --------------------------------------------------------------------------
+
+  @Override
+  public Collection<ContentStream> getContentStreams() throws IOException {
+    return ClientUtils.toContentStreams(getXML(), ClientUtils.TEXT_XML);
+  }
+
+  public String getXML() throws IOException {
+    final StringWriter writer = new StringWriter();
+    writeXML(writer);
+    writer.flush();
+
+    // If action is COMMIT or OPTIMIZE, it is sent with params
+    final String xml = writer.toString();
+    // System.out.println( "SEND:"+xml );
+    return (xml.length() > 0) ? xml : null;
+  }
+
+  private List<Map<SolrInputDocument, Map<String, Object>>> getDocLists(final Map<SolrInputDocument, Map<String, Object>> documents) {
+    final List<Map<SolrInputDocument, Map<String, Object>>> docLists = new ArrayList<>();
+    Map<SolrInputDocument, Map<String, Object>> docList = null;
+    if (this.documents != null) {
+
+      Boolean lastOverwrite = true;
+      Integer lastCommitWithin = -1;
+
+      final Set<Entry<SolrInputDocument, Map<String, Object>>> entries = this.documents.entrySet();
+      for (final Entry<SolrInputDocument, Map<String, Object>> entry : entries) {
+        final Map<String, Object> map = entry.getValue();
+        Boolean overwrite = null;
+        Integer commitWithin = null;
+        if (map != null) {
+          overwrite = (Boolean) entry.getValue().get(OVERWRITE);
+          commitWithin = (Integer) entry.getValue().get(COMMIT_WITHIN);
+        }
+        if (!Objects.equals(overwrite, lastOverwrite) || !Objects.equals(commitWithin, lastCommitWithin) || docLists.isEmpty()) {
+          docList = new LinkedHashMap<>();
+          docLists.add(docList);
+        }
+        docList.put(entry.getKey(), entry.getValue());
+        lastCommitWithin = commitWithin;
+        lastOverwrite = overwrite;
+      }
+    }
+
+    if (docIterator != null) {
+      docList = new LinkedHashMap<>();
+      docLists.add(docList);
+      while (docIterator.hasNext()) {
+        final SolrInputDocument doc = docIterator.next();
+        if (doc != null) {
+          docList.put(doc, null);
+        }
+      }
+    }
+
+    return docLists;
+  }
+
+  /**
+   * @since solr 1.4
+   */
+  public ModifiedUpdateRequest writeXML(final Writer writer) throws IOException {
+    final List<Map<SolrInputDocument, Map<String, Object>>> getDocLists = getDocLists(documents);
+
+    for (final Map<SolrInputDocument, Map<String, Object>> docs : getDocLists) {
+
+      if ((docs != null && docs.size() > 0)) {
+        final Entry<SolrInputDocument, Map<String, Object>> firstDoc = docs.entrySet().iterator().next();
+        final Map<String, Object> map = firstDoc.getValue();
+        Integer cw = null;
+        Boolean ow = null;
+        if (map != null) {
+          cw = (Integer) firstDoc.getValue().get(COMMIT_WITHIN);
+          ow = (Boolean) firstDoc.getValue().get(OVERWRITE);
+        }
+        if (ow == null)
+          ow = true;
+        final int commitWithin = (cw != null && cw != -1) ? cw : this.commitWithin;
+        final boolean overwrite = ow;
+        if (commitWithin > -1 || overwrite != true) {
+          writer.write("<add commitWithin=\"" + commitWithin + "\" " + "overwrite=\"" + overwrite + "\">");
+        } else {
+          writer.write("<add>");
+        }
+
+        final Set<Entry<SolrInputDocument, Map<String, Object>>> entries = docs.entrySet();
+        for (final Entry<SolrInputDocument, Map<String, Object>> entry : entries) {
+          ClientUtils.writeXML(entry.getKey(), writer);
+        }
+
+        writer.write("</add>");
+      }
+    }
+
+    // Add the delete commands
+    final boolean deleteI = deleteById != null && deleteById.size() > 0;
+    final boolean deleteQ = deleteQuery != null && deleteQuery.size() > 0;
+    if (deleteI || deleteQ) {
+      if (commitWithin > 0) {
+        writer.append("<delete commitWithin=\"").append(String.valueOf(commitWithin)).append("\">");
+      } else {
+        writer.append("<delete>");
+      }
+      if (deleteI) {
+        for (final Map.Entry<String, Map<String, Object>> entry : deleteById.entrySet()) {
+          writer.append("<id");
+          final Map<String, Object> map = entry.getValue();
+          if (map != null) {
+            final Long version = (Long) map.get(VER);
+            final String route = (String) map.get(_ROUTE_);
+            if (version != null) {
+              writer.append(" version=\"").append(String.valueOf(version)).append('"');
+            }
+
+            if (route != null) {
+              writer.append(" _route_=\"").append(route).append('"');
+            }
+          }
+          writer.append(">");
+
+          XML.escapeCharData(entry.getKey(), writer);
+          writer.append("</id>");
+        }
+      }
+      if (deleteQ) {
+        for (final String q : deleteQuery) {
+          writer.append("<query>");
+          XML.escapeCharData(q, writer);
+          writer.append("</query>");
+        }
+      }
+      writer.append("</delete>");
+    }
+    return this;
+  }
+
+  // --------------------------------------------------------------------------
+  // --------------------------------------------------------------------------
+
+  // --------------------------------------------------------------------------
+  //
+  // --------------------------------------------------------------------------
+
+  public List<SolrInputDocument> getDocuments() {
+    if (documents == null)
+      return null;
+    final List<SolrInputDocument> docs = new ArrayList<>(documents.size());
+    docs.addAll(documents.keySet());
+    return docs;
+  }
+
+  public Map<SolrInputDocument, Map<String, Object>> getDocumentsMap() {
+    return documents;
+  }
+
+  public Iterator<SolrInputDocument> getDocIterator() {
+    return docIterator;
+  }
+
+  public List<String> getDeleteById() {
+    if (deleteById == null)
+      return null;
+    final List<String> deletes = new ArrayList<>(deleteById.keySet());
+    return deletes;
+  }
+
+  public Map<String, Map<String, Object>> getDeleteByIdMap() {
+    return deleteById;
+  }
+
+  public List<String> getDeleteQuery() {
+    return deleteQuery;
+  }
+
+  public boolean isLastDocInBatch() {
+    return isLastDocInBatch;
+  }
+
+  public void lastDocInBatch() {
+    isLastDocInBatch = true;
+  }
+
+}
diff --git a/connectors/solr/connector/src/test/java/org/apache/manifoldcf/agents/output/solr/tests/PreemptiveBasicAuthInterceptorTest.java b/connectors/solr/connector/src/test/java/org/apache/manifoldcf/agents/output/solr/tests/PreemptiveBasicAuthInterceptorTest.java
deleted file mode 100644
index cd95f2c..0000000
--- a/connectors/solr/connector/src/test/java/org/apache/manifoldcf/agents/output/solr/tests/PreemptiveBasicAuthInterceptorTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/* $Id$ */

-

-/**

- * Licensed to the Apache Software Foundation (ASF) under one or more

- * contributor license agreements. See the NOTICE file distributed with

- * this work for additional information regarding copyright ownership.

- * The ASF licenses this file to You under the Apache License, Version 2.0

- * (the "License"); you may not use this file except in compliance with

- * the License. You may obtain a copy of the License at

- * <p>

- * http://www.apache.org/licenses/LICENSE-2.0

- * <p>

- * Unless required by applicable law or agreed to in writing, software

- * distributed under the License is distributed on an "AS IS" BASIS,

- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

- * See the License for the specific language governing permissions and

- * limitations under the License.

- */

-package org.apache.manifoldcf.agents.output.solr.tests;

-

-import org.apache.http.HttpException;

-import org.apache.http.HttpRequest;

-import org.apache.http.HttpRequestInterceptor;

-import org.apache.http.auth.AuthScope;

-import org.apache.http.auth.AuthState;

-import org.apache.http.auth.Credentials;

-import org.apache.http.auth.UsernamePasswordCredentials;

-import org.apache.http.client.CredentialsProvider;

-import org.apache.http.client.protocol.HttpClientContext;

-import org.apache.http.impl.auth.BasicScheme;

-import org.apache.http.message.BasicHttpRequest;

-import org.apache.http.protocol.HttpContext;

-import org.apache.manifoldcf.agents.output.solr.PreemptiveBasicAuthInterceptor;

-import org.junit.Test;

-

-import java.io.IOException;

-import java.util.HashMap;

-import java.util.Map;

-

-import static org.junit.Assert.assertEquals;

-import static org.junit.Assert.assertTrue;

-import static org.junit.Assert.fail;

-

-public class PreemptiveBasicAuthInterceptorTest {

-

-  @Test

-  public void shouldAddBasicAuthenticationToRequestIfNotAlreadySet() throws Exception {

-    final HttpRequestInterceptor interceptor = new PreemptiveBasicAuthInterceptor(AuthScope.ANY);

-    final HttpContext context = contextWithoutBasicAuth(new UsernamePasswordCredentials("user", "secret"));

-    interceptor.process(get(), context);

-    final AuthState authState = (AuthState) context.getAttribute(HttpClientContext.TARGET_AUTH_STATE);

-    assertTrue(authState.getAuthScheme() instanceof BasicScheme);

-    assertEquals("user", authState.getCredentials().getUserPrincipal().getName());

-    assertEquals("secret", authState.getCredentials().getPassword());

-  }

-

-  @Test

-  public void shouldThrowHttpExceptionIfNoCredentialsWereProvided() {

-    final HttpRequestInterceptor interceptor = new PreemptiveBasicAuthInterceptor(AuthScope.ANY);

-    final HttpContext context = contextWithoutBasicAuth(null);

-    try {

-      interceptor.process(get(), context);

-      fail("Expected an HttpException, but none was raised.");

-    } catch (HttpException e) {

-      assertEquals("Missing credentials for preemptive basic authentication.", e.getMessage());

-    } catch (IOException e) {

-      fail("Expected an HttpException, but an IOException was raised instead.");

-    }

-  }

-

-  private HttpRequest get() {

-    return new BasicHttpRequest("GET", "https://manifoldcf.apache.org/");

-  }

-

-  private HttpContext contextWithoutBasicAuth(final Credentials credentials) {

-    final CredentialsProvider credentialsProvider = new FakeCredentialsProvider();

-    credentialsProvider.setCredentials(AuthScope.ANY, credentials);

-    final AuthState authState = new AuthState();

-    final HttpContext context = new FakeHttpContext();

-    context.setAttribute(HttpClientContext.CREDS_PROVIDER, credentialsProvider);

-    context.setAttribute(HttpClientContext.TARGET_AUTH_STATE, authState);

-    return context;

-  }

-

-  static class FakeHttpContext implements HttpContext {

-

-    private final Map<String, Object> context = new HashMap<>();

-

-    @Override

-    public Object getAttribute(final String id) {

-      return context.get(id);

-    }

-

-    @Override

-    public void setAttribute(final String id, final Object obj) {

-      context.put(id, obj);

-    }

-

-    @Override

-    public Object removeAttribute(final String id) {

-      return context.remove(id);

-    }

-  }

-

-  static class FakeCredentialsProvider implements CredentialsProvider {

-

-    private final Map<AuthScope, Credentials> credentialsByAuthScope = new HashMap<>();

-

-    @Override

-    public void setCredentials(final AuthScope authScope, final Credentials credentials) {

-      credentialsByAuthScope.put(authScope, credentials);

-    }

-

-    @Override

-    public Credentials getCredentials(final AuthScope authScope) {

-      return credentialsByAuthScope.get(authScope);

-    }

-

-    @Override

-    public void clear() {

-      credentialsByAuthScope.clear();

-    }

-  }

-}

diff --git a/connectors/solr/pom.xml b/connectors/solr/pom.xml
index fcc14a3..688d91a 100644
--- a/connectors/solr/pom.xml
+++ b/connectors/solr/pom.xml
@@ -244,6 +244,24 @@
       <version>${slf4j.version}</version>
     </dependency>
     
+    <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-client</artifactId>
+        <version>${jetty.version}</version>
+    </dependency>
+    <dependency>
+        <groupId>org.eclipse.jetty.http2</groupId>
+        <artifactId>http2-http-client-transport</artifactId>
+        <version>${jetty.version}</version>
+    </dependency>
+
+
+
+
+
+
+
+    
     <!-- Testing dependencies -->
     
     <dependency>
@@ -325,7 +343,6 @@
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
       <version>${jetty.version}</version>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
@@ -343,7 +360,6 @@
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-http</artifactId>
       <version>${jetty.version}</version>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
diff --git a/framework/core/src/test/java/org/apache/manifoldcf/core/lockmanager/ZooKeeperInstance.java b/framework/core/src/test/java/org/apache/manifoldcf/core/lockmanager/ZooKeeperInstance.java
index 75f99d6..9dd7faf 100644
--- a/framework/core/src/test/java/org/apache/manifoldcf/core/lockmanager/ZooKeeperInstance.java
+++ b/framework/core/src/test/java/org/apache/manifoldcf/core/lockmanager/ZooKeeperInstance.java
@@ -18,31 +18,31 @@
 */
 package org.apache.manifoldcf.core.lockmanager;
 
-import java.util.*;
-import java.io.*;
-import org.apache.zookeeper.server.*;
-import org.apache.zookeeper.server.quorum.*;
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
 
-public class ZooKeeperInstance
-{
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+
+public class ZooKeeperInstance {
   protected final int zkPort;
   protected final File tempDir;
-  
+
   protected ZooKeeperThread zookeeperThread = null;
-  
-  public ZooKeeperInstance(int zkPort, File tempDir)
-  {
+
+  public ZooKeeperInstance(final int zkPort, final File tempDir) {
     this.zkPort = zkPort;
     this.tempDir = tempDir;
   }
 
-  public void start()
-    throws Exception
-  {
-    Properties startupProperties = new Properties();
-    startupProperties.setProperty("tickTime","2000");
-    startupProperties.setProperty("dataDir",tempDir.toString());
-    startupProperties.setProperty("clientPort",Integer.toString(zkPort));
+  public void start() throws Exception {
+    final Properties startupProperties = new Properties();
+    startupProperties.setProperty("tickTime", "2000");
+    startupProperties.setProperty("dataDir", tempDir.toString());
+    startupProperties.setProperty("clientPort", Integer.toString(zkPort));
+    startupProperties.setProperty("admin.enableServer", "false");
 
     final QuorumPeerConfig quorumConfiguration = new QuorumPeerConfig();
     quorumConfiguration.parseProperties(startupProperties);
@@ -53,33 +53,26 @@
     zookeeperThread = new ZooKeeperThread(configuration);
     zookeeperThread.start();
     // We have no way of knowing whether zookeeper is alive or not, but the
-    // client is supposed to know about that.  But it doesn't, so wait for 5 seconds
+    // client is supposed to know about that. But it doesn't, so wait for 5 seconds
     Thread.sleep(5000L);
   }
-  
-  public void stop()
-    throws Exception
-  {
-    while (true)
-    {
+
+  public void stop() throws Exception {
+    while (true) {
       if (zookeeperThread == null)
         break;
-      else if (!zookeeperThread.isAlive())
-      {
-        Throwable e = zookeeperThread.finishUp();
-        if (e != null)
-        {
+      else if (!zookeeperThread.isAlive()) {
+        final Throwable e = zookeeperThread.finishUp();
+        if (e != null) {
           if (e instanceof RuntimeException)
-            throw (RuntimeException)e;
+            throw (RuntimeException) e;
           else if (e instanceof Exception)
-            throw (Exception)e;
+            throw (Exception) e;
           else if (e instanceof Error)
-            throw (Error)e;
+            throw (Error) e;
         }
         zookeeperThread = null;
-      }
-      else
-      {
+      } else {
         // This isn't the best way to kill zookeeper but it's the only way
         // we've got.
         zookeeperThread.interrupt();
@@ -87,42 +80,33 @@
       }
     }
   }
-  
-  protected static class ZooKeeperThread extends Thread
-  {
+
+  protected static class ZooKeeperThread extends Thread {
     protected final ServerConfig config;
-    
+
     protected Throwable exception = null;
-    
-    public ZooKeeperThread(ServerConfig config)
-    {
+
+    public ZooKeeperThread(final ServerConfig config) {
       this.config = config;
     }
-    
-    public void run()
-    {
-      try
-      {
-        ZooKeeperServerMain server = new ZooKeeperServerMain();
+
+    @Override
+    public void run() {
+      try {
+        final ZooKeeperServerMain server = new ZooKeeperServerMain();
         server.runFromConfig(config);
-      }
-      catch (IOException e)
-      {
+      } catch (final IOException e) {
         // Ignore IOExceptions, since that seems to be normal when shutting
         // down zookeeper via thread.interrupt()
-      }
-      catch (Throwable e)
-      {
+      } catch (final Throwable e) {
         exception = e;
       }
     }
-    
-    public Throwable finishUp()
-      throws InterruptedException
-    {
+
+    public Throwable finishUp() throws InterruptedException {
       join();
       return exception;
     }
   }
-  
+
 }